Merge branch 'upstream-fixes' into upstream
authorJeff Garzik <jeff@garzik.org>
Tue, 12 Sep 2006 16:03:54 +0000 (12:03 -0400)
committerJeff Garzik <jeff@garzik.org>
Tue, 12 Sep 2006 16:03:54 +0000 (12:03 -0400)
174 files changed:
Documentation/networking/LICENSE.qla3xxx [new file with mode: 0644]
MAINTAINERS
drivers/isdn/i4l/Kconfig
drivers/net/3c501.c
drivers/net/3c59x.c
drivers/net/8139cp.c
drivers/net/8139too.c
drivers/net/8390.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/acenic.c
drivers/net/amd8111e.c
drivers/net/arcnet/com20020-pci.c
drivers/net/ariadne.c
drivers/net/at1700.c
drivers/net/atarilance.c
drivers/net/au1000_eth.c
drivers/net/b44.c
drivers/net/bnx2.c
drivers/net/cassini.c
drivers/net/chelsio/cxgb2.c
drivers/net/defxx.c
drivers/net/dl2k.c
drivers/net/e100.c
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_param.c
drivers/net/eepro.c
drivers/net/eepro100.c
drivers/net/epic100.c
drivers/net/fealnx.c
drivers/net/fec.c
drivers/net/forcedeth.c
drivers/net/gianfar.c
drivers/net/hamachi.c
drivers/net/hp100.c
drivers/net/ioc3-eth.c
drivers/net/irda/mcs7780.c
drivers/net/irda/w83977af_ir.c
drivers/net/ixgb/ixgb.h
drivers/net/ixgb/ixgb_ethtool.c
drivers/net/ixgb/ixgb_hw.c
drivers/net/ixgb/ixgb_ids.h
drivers/net/ixgb/ixgb_main.c
drivers/net/lance.c
drivers/net/myri10ge/myri10ge.c
drivers/net/myri10ge/myri10ge_mcp.h
drivers/net/natsemi.c
drivers/net/ne2k-pci.c
drivers/net/netx-eth.c
drivers/net/ns83820.c
drivers/net/pci-skeleton.c
drivers/net/pcmcia/axnet_cs.c
drivers/net/pcmcia/fmvj18x_cs.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/pcnet32.c
drivers/net/phy/smsc.c
drivers/net/phy/vitesse.c
drivers/net/qla3xxx.c [new file with mode: 0644]
drivers/net/qla3xxx.h [new file with mode: 0644]
drivers/net/r8169.c
drivers/net/rrunner.c
drivers/net/s2io.c
drivers/net/saa9730.c
drivers/net/sb1250-mac.c
drivers/net/sis190.c
drivers/net/sis900.c
drivers/net/sk98lin/skge.c
drivers/net/skfp/skfddi.c
drivers/net/skge.c
drivers/net/skge.h
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/slhc.c
drivers/net/smc911x.c
drivers/net/starfire.c
drivers/net/sun3lance.c
drivers/net/sundance.c
drivers/net/sungem.c
drivers/net/tc35815.c
drivers/net/tg3.c
drivers/net/tokenring/3c359.c
drivers/net/tokenring/lanstreamer.c
drivers/net/tulip/21142.c
drivers/net/tulip/de2104x.c
drivers/net/tulip/de4x5.c
drivers/net/tulip/dmfe.c
drivers/net/tulip/eeprom.c
drivers/net/tulip/interrupt.c
drivers/net/tulip/media.c
drivers/net/tulip/pnic.c
drivers/net/tulip/pnic2.c
drivers/net/tulip/timer.c
drivers/net/tulip/tulip.h
drivers/net/tulip/tulip_core.c
drivers/net/tulip/uli526x.c
drivers/net/tulip/winbond-840.c
drivers/net/tulip/xircom_tulip_cb.c
drivers/net/typhoon.c
drivers/net/ucc_geth.c
drivers/net/via-rhine.c
drivers/net/via-velocity.c
drivers/net/via-velocity.h
drivers/net/wan/cycx_main.c
drivers/net/wan/dlci.c
drivers/net/wan/dscc4.c
drivers/net/wan/farsync.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wan/pc300_drv.c
drivers/net/wan/pci200syn.c
drivers/net/wan/sdla.c
drivers/net/wan/wanxl.c
drivers/net/wireless/Kconfig
drivers/net/wireless/airo.c
drivers/net/wireless/atmel_pci.c
drivers/net/wireless/bcm43xx/bcm43xx.h
drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h
drivers/net/wireless/bcm43xx/bcm43xx_dma.c
drivers/net/wireless/bcm43xx/bcm43xx_dma.h
drivers/net/wireless/bcm43xx/bcm43xx_leds.c
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/bcm43xx/bcm43xx_main.h
drivers/net/wireless/bcm43xx/bcm43xx_phy.c
drivers/net/wireless/bcm43xx/bcm43xx_pio.c
drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
drivers/net/wireless/bcm43xx/bcm43xx_wx.c
drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
drivers/net/wireless/hostap/hostap_cs.c
drivers/net/wireless/ipw2100.c
drivers/net/wireless/ipw2200.c
drivers/net/wireless/ipw2200.h
drivers/net/wireless/orinoco.c
drivers/net/wireless/orinoco.h
drivers/net/wireless/orinoco_nortel.c
drivers/net/wireless/orinoco_pci.c
drivers/net/wireless/orinoco_plx.c
drivers/net/wireless/orinoco_tmd.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/isl_ioctl.h
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/prism54/islpci_dev.h
drivers/net/wireless/prism54/islpci_hotplug.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/zd1211rw/Makefile
drivers/net/wireless/zd1211rw/zd_chip.c
drivers/net/wireless/zd1211rw/zd_chip.h
drivers/net/wireless/zd1211rw/zd_def.h
drivers/net/wireless/zd1211rw/zd_ieee80211.h
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/wireless/zd1211rw/zd_mac.h
drivers/net/wireless/zd1211rw/zd_netdev.c
drivers/net/wireless/zd1211rw/zd_rf.c
drivers/net/wireless/zd1211rw/zd_rf.h
drivers/net/wireless/zd1211rw/zd_rf_al2230.c
drivers/net/wireless/zd1211rw/zd_rf_al7230b.c [new file with mode: 0644]
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/wireless/zd1211rw/zd_usb.h
drivers/net/yellowfin.c
include/net/ieee80211.h
include/net/ieee80211softmac.h
net/ieee80211/ieee80211_crypt_ccmp.c
net/ieee80211/ieee80211_crypt_tkip.c
net/ieee80211/ieee80211_crypt_wep.c
net/ieee80211/ieee80211_rx.c
net/ieee80211/ieee80211_tx.c
net/ieee80211/softmac/ieee80211softmac_assoc.c
net/ieee80211/softmac/ieee80211softmac_io.c
net/ieee80211/softmac/ieee80211softmac_module.c
net/ieee80211/softmac/ieee80211softmac_priv.h

diff --git a/Documentation/networking/LICENSE.qla3xxx b/Documentation/networking/LICENSE.qla3xxx
new file mode 100644 (file)
index 0000000..2f2077e
--- /dev/null
@@ -0,0 +1,46 @@
+Copyright (c)  2003-2006 QLogic Corporation
+QLogic Linux Networking HBA Driver
+
+This program includes a device driver for Linux 2.6 that may be
+distributed with QLogic hardware specific firmware binary file.
+You may modify and redistribute the device driver code under the
+GNU General Public License as published by the Free Software
+Foundation (version 2 or a later version).
+
+You may redistribute the hardware specific firmware binary file
+under the following terms:
+
+       1. Redistribution of source code (only if applicable),
+          must retain the above copyright notice, this list of
+          conditions and the following disclaimer.
+
+       2. Redistribution in binary form must reproduce the above
+          copyright notice, this list of conditions and the
+          following disclaimer in the documentation and/or other
+          materials provided with the distribution.
+
+       3. The name of QLogic Corporation may not be used to
+          endorse or promote products derived from this software
+          without specific prior written permission
+
+REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
+THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
+CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
+OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
+TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
+ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
+COMBINATION WITH THIS PROGRAM.
+
index 25cd707..d13f985 100644 (file)
@@ -449,9 +449,9 @@ L:  linux-hams@vger.kernel.org
 W:     http://www.baycom.org/~tom/ham/ham.html
 S:     Maintained
 
-BCM43XX WIRELESS DRIVER
-P:     Michael Buesch
-M:     mb@bu3sch.de
+BCM43XX WIRELESS DRIVER (SOFTMAC BASED VERSION)
+P:     Larry Finger
+M:     Larry.Finger@lwfinger.net
 P:     Stefano Brivio
 M:     st3@riseup.net
 W:     http://bcm43xx.berlios.de/
@@ -2366,6 +2366,12 @@ M:       linux-driver@qlogic.com
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 
+QLOGIC QLA3XXX NETWORK DRIVER
+P:     Ron Mercer
+M:     linux-driver@qlogic.com
+L:     netdev@vger.kernel.org
+S:     Supported
+
 QNX4 FILESYSTEM
 P:     Anders Larsen
 M:     al@alarsen.net
@@ -2616,6 +2622,18 @@ P:       Nicolas Pitre
 M:     nico@cam.org
 S:     Maintained
 
+SOFTMAC LAYER (IEEE 802.11)
+P:     Johannes Berg
+M:     johannes@sipsolutions.net
+P:     Joe Jezak
+M:     josejx@gentoo.org
+P:     Daniel Drake
+M:     dsd@gentoo.org
+W:     http://softmac.sipsolutions.net/
+L:     softmac-dev@sipsolutions.net
+L:     netdev@vger.kernel.org
+S:     Maintained
+
 SOFTWARE RAID (Multiple Disks) SUPPORT
 P:     Ingo Molnar
 M:     mingo@redhat.com
@@ -2889,8 +2907,8 @@ W:        http://www.auk.cx/tms380tr/
 S:     Maintained
 
 TULIP NETWORK DRIVER
-P:     Jeff Garzik
-M:     jgarzik@pobox.com
+P:     Valerie Henson
+M:     val_henson@linux.intel.com
 L:     tulip-users@lists.sourceforge.net
 W:     http://sourceforge.net/projects/tulip/
 S:     Maintained
@@ -3341,6 +3359,15 @@ W:       http://www.qsl.net/dl1bke/
 L:     linux-hams@vger.kernel.org
 S:     Maintained
 
+ZD1211RW WIRELESS DRIVER
+P:     Daniel Drake
+M:     dsd@gentoo.org
+P:     Ulrich Kunitz
+M:     kune@deine-taler.de
+W:     http://zd1211.ath.cx/wiki/DriverRewrite
+L:     zd1211-devs@lists.sourceforge.net (subscribers-only)
+S:     Maintained
+
 ZF MACHZ WATCHDOG
 P:     Fernando Fuganti
 M:     fuganti@netbank.com.br
index a4f7288..3ef567b 100644 (file)
@@ -5,6 +5,7 @@
 config ISDN_PPP
        bool "Support synchronous PPP"
        depends on INET
+       select SLHC
        help
          Over digital connections such as ISDN, there is no need to
          synchronize sender and recipient's clocks with start and stop bits
index 07136ec..d7b115a 100644 (file)
@@ -120,7 +120,6 @@ static const char version[] =
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/errno.h>
-#include <linux/config.h>      /* for CONFIG_IP_MULTICAST */
 #include <linux/spinlock.h>
 #include <linux/ethtool.h>
 #include <linux/delay.h>
index 80e8ca0..415d081 100644 (file)
@@ -2928,7 +2928,7 @@ static void set_rx_mode(struct net_device *dev)
        int new_mode;
 
        if (dev->flags & IFF_PROMISC) {
-               if (vortex_debug > 0)
+               if (vortex_debug > 3)
                        printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
                new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
        } else  if ((dev->mc_list)  ||  (dev->flags & IFF_ALLMULTI)) {
@@ -3169,7 +3169,7 @@ static int __init vortex_init(void)
 {
        int pci_rc, eisa_rc;
 
-       pci_rc = pci_module_init(&vortex_driver);
+       pci_rc = pci_register_driver(&vortex_driver);
        eisa_rc = vortex_eisa_init();
 
        if (pci_rc == 0)
index 1428bb7..4cdb6b2 100644 (file)
@@ -48,7 +48,7 @@
  */
 
 #define DRV_NAME               "8139cp"
-#define DRV_VERSION            "1.2"
+#define DRV_VERSION            "1.3"
 #define DRV_RELDATE            "Mar 22, 2004"
 
 
@@ -942,8 +942,6 @@ static void __cp_set_rx_mode (struct net_device *dev)
        /* Note: do not reorder, GCC is clever about common statements. */
        if (dev->flags & IFF_PROMISC) {
                /* Unconditionally log net taps. */
-               printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-                       dev->name);
                rx_mode =
                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
                    AcceptAllPhys;
@@ -2098,7 +2096,7 @@ static int __init cp_init (void)
 #ifdef MODULE
        printk("%s", version);
 #endif
-       return pci_module_init (&cp_driver);
+       return pci_register_driver(&cp_driver);
 }
 
 static void __exit cp_exit (void)
index e4f4eaf..10301d3 100644 (file)
@@ -90,7 +90,7 @@
 */
 
 #define DRV_NAME       "8139too"
-#define DRV_VERSION    "0.9.27"
+#define DRV_VERSION    "0.9.28"
 
 
 #include <linux/module.h>
@@ -2512,9 +2512,6 @@ static void __set_rx_mode (struct net_device *dev)
 
        /* Note: do not reorder, GCC is clever about common statements. */
        if (dev->flags & IFF_PROMISC) {
-               /* Unconditionally log net taps. */
-               printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-                       dev->name);
                rx_mode =
                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
                    AcceptAllPhys;
@@ -2629,7 +2626,7 @@ static int __init rtl8139_init_module (void)
        printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
 #endif
 
-       return pci_module_init (&rtl8139_pci_driver);
+       return pci_register_driver(&rtl8139_pci_driver);
 }
 
 
index d2935ae..3eb7048 100644 (file)
@@ -299,7 +299,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
         *      Slow phase with lock held.
         */
         
-       disable_irq_nosync(dev->irq);
+       disable_irq_nosync_lockdep(dev->irq);
        
        spin_lock(&ei_local->page_lock);
        
@@ -338,7 +338,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
                outb_p(ENISR_ALL, e8390_base + EN0_IMR);
                spin_unlock(&ei_local->page_lock);
-               enable_irq(dev->irq);
+               enable_irq_lockdep(dev->irq);
                ei_local->stat.tx_errors++;
                return 1;
        }
@@ -379,7 +379,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
        outb_p(ENISR_ALL, e8390_base + EN0_IMR);
        
        spin_unlock(&ei_local->page_lock);
-       enable_irq(dev->irq);
+       enable_irq_lockdep(dev->irq);
 
        dev_kfree_skb (skb);
        ei_local->stat.tx_bytes += send_length;
@@ -505,9 +505,9 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 void ei_poll(struct net_device *dev)
 {
-       disable_irq(dev->irq);
+       disable_irq_lockdep(dev->irq);
        ei_interrupt(dev->irq, dev, NULL);
-       enable_irq(dev->irq);
+       enable_irq_lockdep(dev->irq);
 }
 #endif
 
index a2bd811..de4f9e1 100644 (file)
@@ -1411,6 +1411,22 @@ config FORCEDETH
          <file:Documentation/networking/net-modules.txt>.  The module will be
          called forcedeth.
 
+config FORCEDETH_NAPI
+       bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)"
+       depends on FORCEDETH && EXPERIMENTAL
+       help
+         NAPI is a new driver API designed to reduce CPU and interrupt load
+         when the driver is receiving lots of packets from the card. It is
+         still somewhat experimental and thus not yet enabled by default.
+
+         If your estimated Rx load is 10kpps or more, or if the card will be
+         deployed on potentially unfriendly networks (e.g. in a firewall),
+         then say Y here.
+
+         See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+         information.
+
+         If in doubt, say N.
 
 config CS89x0
        tristate "CS89x0 support"
@@ -2290,6 +2306,15 @@ config MV643XX_ETH_2
          This enables support for Port 2 of the Marvell MV643XX Gigabit
          Ethernet.
 
+config QLA3XXX
+       tristate "QLogic QLA3XXX Network Driver Support"
+       depends on PCI
+       help
+         This driver supports QLogic ISP3XXX gigabit Ethernet cards.
+
+         To compile this driver as a module, choose M here: the module
+         will be called qla3xxx.
+
 endmenu
 
 #
@@ -2550,6 +2575,7 @@ config PLIP
 
 config PPP
        tristate "PPP (point-to-point protocol) support"
+       select SLHC
        ---help---
          PPP (Point to Point Protocol) is a newer and better SLIP.  It serves
          the same purpose: sending Internet traffic over telephone (and other
@@ -2730,6 +2756,7 @@ config SLIP
 config SLIP_COMPRESSED
        bool "CSLIP compressed headers"
        depends on SLIP
+       select SLHC
        ---help---
          This protocol is faster than SLIP because it uses compression on the
          TCP/IP headers (not on the data itself), but it has to be supported
@@ -2742,6 +2769,12 @@ config SLIP_COMPRESSED
          <http://www.tldp.org/docs.html#howto>, explains how to configure
          CSLIP. This won't enlarge your kernel.
 
+config SLHC
+       tristate
+       help
+         This option enables Van Jacobsen serial line header compression
+         routines.
+
 config SLIP_SMART
        bool "Keepalive and linefill"
        depends on SLIP
index 8427bf9..6ff1764 100644 (file)
@@ -2,10 +2,6 @@
 # Makefile for the Linux network (ethercard) device drivers.
 #
 
-ifeq ($(CONFIG_ISDN_PPP),y)
-  obj-$(CONFIG_ISDN) += slhc.o
-endif
-
 obj-$(CONFIG_E1000) += e1000/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IXGB) += ixgb/
@@ -113,8 +109,9 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
 obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_QLA3XXX) += qla3xxx.o
 
-obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
+obj-$(CONFIG_PPP) += ppp_generic.o
 obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
 obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
 obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
@@ -123,9 +120,7 @@ obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 
 obj-$(CONFIG_SLIP) += slip.o
-ifeq ($(CONFIG_SLIP_COMPRESSED),y)
-  obj-$(CONFIG_SLIP) += slhc.o
-endif
+obj-$(CONFIG_SLHC) += slhc.o
 
 obj-$(CONFIG_DUMMY) += dummy.o
 obj-$(CONFIG_IFB) += ifb.o
index 1c01e9b..c0f3574 100644 (file)
@@ -725,7 +725,7 @@ static struct pci_driver acenic_pci_driver = {
 
 static int __init acenic_init(void)
 {
-       return pci_module_init(&acenic_pci_driver);
+       return pci_register_driver(&acenic_pci_driver);
 }
 
 static void __exit acenic_exit(void)
index ed322a7..f83df12 100644 (file)
@@ -101,9 +101,9 @@ Revision History:
 
 #include "amd8111e.h"
 #define MODULE_NAME    "amd8111e"
-#define MODULE_VERS    "3.0.5"
+#define MODULE_VERS    "3.0.6"
 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
-MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.3");
+MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.6");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
 module_param_array(speed_duplex, int, NULL, 0);
@@ -1527,7 +1527,6 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
        u32 mc_filter[2] ;
        int i,bit_num;
        if(dev->flags & IFF_PROMISC){
-               printk(KERN_INFO "%s: Setting  promiscuous mode.\n",dev->name);
                writel( VAL2 | PROM, lp->mmio + CMD2);
                return;
        }
@@ -2158,7 +2157,7 @@ static struct pci_driver amd8111e_driver = {
 
 static int __init amd8111e_init(void)
 {
-       return pci_module_init(&amd8111e_driver);
+       return pci_register_driver(&amd8111e_driver);
 }
 
 static void __exit amd8111e_cleanup(void)
index 979a33d..fc256c1 100644 (file)
@@ -177,7 +177,7 @@ static struct pci_driver com20020pci_driver = {
 static int __init com20020pci_init(void)
 {
        BUGLVL(D_NORMAL) printk(VERSION);
-       return pci_module_init(&com20020pci_driver);
+       return pci_register_driver(&com20020pci_driver);
 }
 
 static void __exit com20020pci_cleanup(void)
index cc721ad..3aef3c1 100644 (file)
@@ -825,8 +825,6 @@ static void set_multicast_list(struct net_device *dev)
     ariadne_init_ring(dev);
 
     if (dev->flags & IFF_PROMISC) {
-       /* Log any net taps. */
-       printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
        lance->RAP = CSR15;             /* Mode Register */
        lance->RDP = PROM;              /* Set promiscuous mode */
     } else {
index 4ca061c..1a85451 100644 (file)
@@ -58,7 +58,7 @@
 #include <asm/dma.h>
 
 static char version[] __initdata =
-       "at1700.c:v1.15 4/7/98  Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+       "at1700.c:v1.16 9/11/06  Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
 
 #define DRV_NAME "at1700"
 
@@ -851,8 +851,6 @@ set_rx_mode(struct net_device *dev)
        int i;
 
        if (dev->flags & IFF_PROMISC) {
-               /* Unconditionally log net taps. */
-               printk("%s: Promiscuous mode enabled.\n", dev->name);
                memset(mc_filter, 0xff, sizeof(mc_filter));
                outb(3, ioaddr + RX_MODE);      /* Enable promiscuous mode */
        } else if (dev->mc_count > MC_FILTERBREAK
index 91783a8..465efe7 100644 (file)
@@ -1121,7 +1121,7 @@ static void set_multicast_list( struct net_device *dev )
 
        if (dev->flags & IFF_PROMISC) {
                /* Log any net taps. */
-               DPRINTK( 1, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+               DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
                REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
        } else {
                short multicast_table[4];
index 55f6e3f..85be0e6 100644 (file)
@@ -72,7 +72,7 @@ static int au1000_debug = 3;
 #endif
 
 #define DRV_NAME       "au1000_eth"
-#define DRV_VERSION    "1.5"
+#define DRV_VERSION    "1.6"
 #define DRV_AUTHOR     "Pete Popov <ppopov@embeddedalley.com>"
 #define DRV_DESC       "Au1xxx on-chip Ethernet driver"
 
@@ -1292,7 +1292,6 @@ static void set_rx_mode(struct net_device *dev)
 
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
                aup->mac->control |= MAC_PROMISCUOUS;
-               printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
        } else if ((dev->flags & IFF_ALLMULTI)  ||
                           dev->mc_count > MULTICAST_FILTER_LIMIT) {
                aup->mac->control |= MAC_PASS_ALL_MULTI;
index bea0fc0..17eb291 100644 (file)
@@ -2354,7 +2354,7 @@ static int __init b44_init(void)
        dma_desc_align_mask = ~(dma_desc_align_size - 1);
        dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
 
-       return pci_module_init(&b44_driver);
+       return pci_register_driver(&b44_driver);
 }
 
 static void __exit b44_cleanup(void)
index 652eb05..654b903 100644 (file)
@@ -6016,7 +6016,7 @@ static struct pci_driver bnx2_pci_driver = {
 
 static int __init bnx2_init(void)
 {
-       return pci_module_init(&bnx2_pci_driver);
+       return pci_register_driver(&bnx2_pci_driver);
 }
 
 static void __exit bnx2_cleanup(void)
index a31544c..26040ab 100644 (file)
@@ -5245,7 +5245,7 @@ static int __init cas_init(void)
        else
                link_transition_timeout = 0;
 
-       return pci_module_init(&cas_driver);
+       return pci_register_driver(&cas_driver);
 }
 
 static void __exit cas_cleanup(void)
index e678724..b6de184 100644 (file)
@@ -1243,7 +1243,7 @@ static struct pci_driver driver = {
 
 static int __init t1_init_module(void)
 {
-       return pci_module_init(&driver);
+       return pci_register_driver(&driver);
 }
 
 static void __exit t1_cleanup_module(void)
index 91cc8cb..7d06ded 100644 (file)
@@ -3444,7 +3444,7 @@ static int __init dfx_init(void)
 {
        int rc_pci, rc_eisa;
 
-       rc_pci = pci_module_init(&dfx_driver);
+       rc_pci = pci_register_driver(&dfx_driver);
        if (rc_pci >= 0) dfx_have_pci = 1;
        
        rc_eisa = dfx_eisa_init();
index 402961e..a572c29 100644 (file)
@@ -1815,7 +1815,7 @@ static struct pci_driver rio_driver = {
 static int __init
 rio_init (void)
 {
-       return pci_module_init (&rio_driver);
+       return pci_register_driver(&rio_driver);
 }
 
 static void __exit
index ce850f1..47d9708 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
 
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms of the GNU General Public License as published by the Free
 
 
 #define DRV_NAME               "e100"
-#define DRV_EXT                "-NAPI"
-#define DRV_VERSION            "3.5.10-k2"DRV_EXT
+#define DRV_EXT                        "-NAPI"
+#define DRV_VERSION            "3.5.16-k2"DRV_EXT
 #define DRV_DESCRIPTION                "Intel(R) PRO/100 Network Driver"
-#define DRV_COPYRIGHT          "Copyright(c) 1999-2005 Intel Corporation"
+#define DRV_COPYRIGHT          "Copyright(c) 1999-2006 Intel Corporation"
 #define PFX                    DRV_NAME ": "
 
 #define E100_WATCHDOG_PERIOD   (2 * HZ)
@@ -1395,15 +1395,11 @@ static int e100_phy_init(struct nic *nic)
        }
 
        if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
-          (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
-               /* enable/disable MDI/MDI-X auto-switching.
-                  MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
-               if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
-                  (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
-                  !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
-                       mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
-               else
-                       mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
+          (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
+               !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+               /* enable/disable MDI/MDI-X auto-switching. */
+               mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
+                               nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
        }
 
        return 0;
@@ -1767,11 +1763,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
 {
-       if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN)))
+       if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
                return -ENOMEM;
 
        /* Align, init, and map the RFD. */
-       rx->skb->dev = nic->netdev;
        skb_reserve(rx->skb, NET_IP_ALIGN);
        memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
        rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
@@ -2147,7 +2142,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
 
        e100_start_receiver(nic, NULL);
 
-       if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
+       if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
                err = -ENOMEM;
                goto err_loopback_none;
        }
@@ -2799,6 +2794,7 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
        /* Detach; put netif into state similar to hotplug unplug. */
        netif_poll_enable(netdev);
        netif_device_detach(netdev);
+       pci_disable_device(pdev);
 
        /* Request a slot reset. */
        return PCI_ERS_RESULT_NEED_RESET;
@@ -2877,7 +2873,7 @@ static int __init e100_init_module(void)
                printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
                printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
        }
-       return pci_module_init(&e100_driver);
+       return pci_register_driver(&e100_driver);
 }
 
 static void __exit e100_cleanup_module(void)
index d304297..98afa9c 100644 (file)
@@ -242,12 +242,10 @@ struct e1000_adapter {
        struct timer_list watchdog_timer;
        struct timer_list phy_info_timer;
        struct vlan_group *vlgrp;
-       uint16_t mng_vlan_id;
+       uint16_t mng_vlan_id;
        uint32_t bd_number;
        uint32_t rx_buffer_len;
-       uint32_t part_num;
        uint32_t wol;
-       uint32_t ksp3_port_a;
        uint32_t smartspeed;
        uint32_t en_mng_pt;
        uint16_t link_speed;
@@ -342,7 +340,9 @@ struct e1000_adapter {
        boolean_t tso_force;
 #endif
        boolean_t smart_power_down;     /* phy smart power down */
+       boolean_t quad_port_a;
        unsigned long flags;
+       uint32_t eeprom_wol;
 };
 
 enum e1000_state_t {
index 88a82ba..3fccffd 100644 (file)
@@ -183,6 +183,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                return -EINVAL;
        }
 
+       while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+               msleep(1);
+
        if (ecmd->autoneg == AUTONEG_ENABLE) {
                hw->autoneg = 1;
                if (hw->media_type == e1000_media_type_fiber)
@@ -199,16 +202,20 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                                                  ADVERTISED_TP;
                ecmd->advertising = hw->autoneg_advertised;
        } else
-               if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
+               if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+                       clear_bit(__E1000_RESETTING, &adapter->flags);
                        return -EINVAL;
+               }
 
        /* reset the link */
 
-       if (netif_running(adapter->netdev))
-               e1000_reinit_locked(adapter);
-       else
+       if (netif_running(adapter->netdev)) {
+               e1000_down(adapter);
+               e1000_up(adapter);
+       } else
                e1000_reset(adapter);
 
+       clear_bit(__E1000_RESETTING, &adapter->flags);
        return 0;
 }
 
@@ -238,9 +245,13 @@ e1000_set_pauseparam(struct net_device *netdev,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       int retval = 0;
 
        adapter->fc_autoneg = pause->autoneg;
 
+       while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+               msleep(1);
+
        if (pause->rx_pause && pause->tx_pause)
                hw->fc = e1000_fc_full;
        else if (pause->rx_pause && !pause->tx_pause)
@@ -253,15 +264,17 @@ e1000_set_pauseparam(struct net_device *netdev,
        hw->original_fc = hw->fc;
 
        if (adapter->fc_autoneg == AUTONEG_ENABLE) {
-               if (netif_running(adapter->netdev))
-                       e1000_reinit_locked(adapter);
-               else
+               if (netif_running(adapter->netdev)) {
+                       e1000_down(adapter);
+                       e1000_up(adapter);
+               } else
                        e1000_reset(adapter);
        } else
-               return ((hw->media_type == e1000_media_type_fiber) ?
-                       e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+               retval = ((hw->media_type == e1000_media_type_fiber) ?
+                          e1000_setup_link(hw) : e1000_force_mac_fc(hw));
 
-       return 0;
+       clear_bit(__E1000_RESETTING, &adapter->flags);
+       return retval;
 }
 
 static uint32_t
@@ -415,12 +428,12 @@ e1000_get_regs(struct net_device *netdev,
                regs_buff[23] = regs_buff[18]; /* mdix mode */
                e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
        } else {
-               e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+               e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
                regs_buff[13] = (uint32_t)phy_data; /* cable length */
                regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
                regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
                regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
-               e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+               e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
                regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
                regs_buff[18] = regs_buff[13]; /* cable polarity */
                regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
@@ -696,7 +709,6 @@ e1000_set_ringparam(struct net_device *netdev,
        }
 
        clear_bit(__E1000_RESETTING, &adapter->flags);
-
        return 0;
 err_setup_tx:
        e1000_free_all_rx_resources(adapter);
@@ -881,16 +893,17 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 
        *data = 0;
 
+       /* NOTE: we don't test MSI interrupts here, yet */
        /* Hook up test interrupt handler just for this test */
        if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
-                        netdev->name, netdev)) {
-               shared_int = FALSE;
-       else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
-                             netdev->name, netdev)){
+                        netdev->name, netdev))
+               shared_int = FALSE;
+       else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+                             netdev->name, netdev)) {
                *data = 1;
                return -1;
        }
-       DPRINTK(PROBE,INFO, "testing %s interrupt\n",
+       DPRINTK(HW, INFO, "testing %s interrupt\n",
                (shared_int ? "shared" : "unshared"));
 
        /* Disable all the interrupts */
@@ -1256,11 +1269,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
                e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
                /* autoneg off */
                e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
-       } else if (adapter->hw.phy_type == e1000_phy_gg82563) {
+       } else if (adapter->hw.phy_type == e1000_phy_gg82563)
                e1000_write_phy_reg(&adapter->hw,
                                    GG82563_PHY_KMRN_MODE_CTRL,
                                    0x1CC);
-       }
 
        ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
 
@@ -1288,9 +1300,9 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
        }
 
        if (adapter->hw.media_type == e1000_media_type_copper &&
-          adapter->hw.phy_type == e1000_phy_m88) {
+          adapter->hw.phy_type == e1000_phy_m88)
                ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
-       else {
+       else {
                /* Set the ILOS bit on the fiber Nic is half
                 * duplex link is detected. */
                stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
@@ -1426,11 +1438,10 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
        case e1000_82546_rev_3:
        default:
                hw->autoneg = TRUE;
-               if (hw->phy_type == e1000_phy_gg82563) {
+               if (hw->phy_type == e1000_phy_gg82563)
                        e1000_write_phy_reg(hw,
                                            GG82563_PHY_KMRN_MODE_CTRL,
                                            0x180);
-               }
                e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
                if (phy_reg & MII_CR_LOOPBACK) {
                        phy_reg &= ~MII_CR_LOOPBACK;
@@ -1590,6 +1601,8 @@ e1000_diag_test_count(struct net_device *netdev)
        return E1000_TEST_LEN;
 }
 
+extern void e1000_power_up_phy(struct e1000_adapter *);
+
 static void
 e1000_diag_test(struct net_device *netdev,
                   struct ethtool_test *eth_test, uint64_t *data)
@@ -1606,6 +1619,8 @@ e1000_diag_test(struct net_device *netdev,
                uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
                uint8_t autoneg = adapter->hw.autoneg;
 
+               DPRINTK(HW, INFO, "offline testing starting\n");
+
                /* Link test performed before hardware reset so autoneg doesn't
                 * interfere with test result */
                if (e1000_link_test(adapter, &data[4]))
@@ -1629,6 +1644,8 @@ e1000_diag_test(struct net_device *netdev,
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                e1000_reset(adapter);
+               /* make sure the phy is powered up */
+               e1000_power_up_phy(adapter);
                if (e1000_loopback_test(adapter, &data[3]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1642,6 +1659,7 @@ e1000_diag_test(struct net_device *netdev,
                if (if_running)
                        dev_open(netdev);
        } else {
+               DPRINTK(HW, INFO, "online testing starting\n");
                /* Online tests */
                if (e1000_link_test(adapter, &data[4]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1657,14 +1675,12 @@ e1000_diag_test(struct net_device *netdev,
        msleep_interruptible(4 * 1000);
 }
 
-static void
-e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
 {
-       struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       int retval = 1; /* fail by default */
 
-       switch (adapter->hw.device_id) {
-       case E1000_DEV_ID_82542:
+       switch (hw->device_id) {
        case E1000_DEV_ID_82543GC_FIBER:
        case E1000_DEV_ID_82543GC_COPPER:
        case E1000_DEV_ID_82544EI_FIBER:
@@ -1672,52 +1688,87 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        case E1000_DEV_ID_82545EM_FIBER:
        case E1000_DEV_ID_82545EM_COPPER:
        case E1000_DEV_ID_82546GB_QUAD_COPPER:
+       case E1000_DEV_ID_82546GB_PCIE:
+               /* these don't support WoL at all */
                wol->supported = 0;
-               wol->wolopts   = 0;
-               return;
-
-       case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
-               /* device id 10B5 port-A supports wol */
-               if (!adapter->ksp3_port_a) {
-                       wol->supported = 0;
-                       return;
-               }
-               /* KSP3 does not suppport UCAST wake-ups for any interface */
-               wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
-
-               if (adapter->wol & E1000_WUFC_EX)
-                       DPRINTK(DRV, ERR, "Interface does not support "
-                       "directed (unicast) frame wake-up packets\n");
-               wol->wolopts = 0;
-               goto do_defaults;
-
+               break;
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
        case E1000_DEV_ID_82571EB_FIBER:
-               /* Wake events only supported on port A for dual fiber */
+       case E1000_DEV_ID_82571EB_SERDES:
+       case E1000_DEV_ID_82571EB_COPPER:
+               /* Wake events not supported on port B */
                if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
                        wol->supported = 0;
-                       wol->wolopts   = 0;
-                       return;
+                       break;
                }
-               /* Fall Through */
-
+               /* return success for non excluded adapter ports */
+               retval = 0;
+               break;
+       case E1000_DEV_ID_82571EB_QUAD_COPPER:
+       case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+               /* quad port adapters only support WoL on port A */
+               if (!adapter->quad_port_a) {
+                       wol->supported = 0;
+                       break;
+               }
+               /* return success for non excluded adapter ports */
+               retval = 0;
+               break;
        default:
-               wol->supported = WAKE_UCAST | WAKE_MCAST |
-                                WAKE_BCAST | WAKE_MAGIC;
-               wol->wolopts = 0;
+               /* dual port cards only support WoL on port A from now on
+                * unless it was enabled in the eeprom for port B
+                * so exclude FUNC_1 ports from having WoL enabled */
+               if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 &&
+                   !adapter->eeprom_wol) {
+                       wol->supported = 0;
+                       break;
+               }
 
-do_defaults:
-               if (adapter->wol & E1000_WUFC_EX)
-                       wol->wolopts |= WAKE_UCAST;
-               if (adapter->wol & E1000_WUFC_MC)
-                       wol->wolopts |= WAKE_MCAST;
-               if (adapter->wol & E1000_WUFC_BC)
-                       wol->wolopts |= WAKE_BCAST;
-               if (adapter->wol & E1000_WUFC_MAG)
-                       wol->wolopts |= WAKE_MAGIC;
+               retval = 0;
+       }
+
+       return retval;
+}
+
+static void
+e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       wol->supported = WAKE_UCAST | WAKE_MCAST |
+                        WAKE_BCAST | WAKE_MAGIC;
+       wol->wolopts = 0;
+
+       /* this function will set ->supported = 0 and return 1 if wol is not
+        * supported by this hardware */
+       if (e1000_wol_exclusion(adapter, wol))
                return;
+
+       /* apply any specific unsupported masks here */
+       switch (adapter->hw.device_id) {
+       case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+               /* KSP3 does not suppport UCAST wake-ups */
+               wol->supported &= ~WAKE_UCAST;
+
+               if (adapter->wol & E1000_WUFC_EX)
+                       DPRINTK(DRV, ERR, "Interface does not support "
+                       "directed (unicast) frame wake-up packets\n");
+               break;
+       default:
+               break;
        }
+
+       if (adapter->wol & E1000_WUFC_EX)
+               wol->wolopts |= WAKE_UCAST;
+       if (adapter->wol & E1000_WUFC_MC)
+               wol->wolopts |= WAKE_MCAST;
+       if (adapter->wol & E1000_WUFC_BC)
+               wol->wolopts |= WAKE_BCAST;
+       if (adapter->wol & E1000_WUFC_MAG)
+               wol->wolopts |= WAKE_MAGIC;
+
+       return;
 }
 
 static int
@@ -1726,51 +1777,35 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       switch (adapter->hw.device_id) {
-       case E1000_DEV_ID_82542:
-       case E1000_DEV_ID_82543GC_FIBER:
-       case E1000_DEV_ID_82543GC_COPPER:
-       case E1000_DEV_ID_82544EI_FIBER:
-       case E1000_DEV_ID_82546EB_QUAD_COPPER:
-       case E1000_DEV_ID_82546GB_QUAD_COPPER:
-       case E1000_DEV_ID_82545EM_FIBER:
-       case E1000_DEV_ID_82545EM_COPPER:
+       if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+               return -EOPNOTSUPP;
+
+       if (e1000_wol_exclusion(adapter, wol))
                return wol->wolopts ? -EOPNOTSUPP : 0;
 
+       switch (hw->device_id) {
        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
-               /* device id 10B5 port-A supports wol */
-               if (!adapter->ksp3_port_a)
-                       return wol->wolopts ? -EOPNOTSUPP : 0;
-
                if (wol->wolopts & WAKE_UCAST) {
                        DPRINTK(DRV, ERR, "Interface does not support "
                        "directed (unicast) frame wake-up packets\n");
                        return -EOPNOTSUPP;
                }
-
-       case E1000_DEV_ID_82546EB_FIBER:
-       case E1000_DEV_ID_82546GB_FIBER:
-       case E1000_DEV_ID_82571EB_FIBER:
-               /* Wake events only supported on port A for dual fiber */
-               if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
-                       return wol->wolopts ? -EOPNOTSUPP : 0;
-               /* Fall Through */
-
+               break;
        default:
-               if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
-                       return -EOPNOTSUPP;
+               break;
+       }
 
-               adapter->wol = 0;
+       /* these settings will always override what we currently have */
+       adapter->wol = 0;
 
-               if (wol->wolopts & WAKE_UCAST)
-                       adapter->wol |= E1000_WUFC_EX;
-               if (wol->wolopts & WAKE_MCAST)
-                       adapter->wol |= E1000_WUFC_MC;
-               if (wol->wolopts & WAKE_BCAST)
-                       adapter->wol |= E1000_WUFC_BC;
-               if (wol->wolopts & WAKE_MAGIC)
-                       adapter->wol |= E1000_WUFC_MAG;
-       }
+       if (wol->wolopts & WAKE_UCAST)
+               adapter->wol |= E1000_WUFC_EX;
+       if (wol->wolopts & WAKE_MCAST)
+               adapter->wol |= E1000_WUFC_MC;
+       if (wol->wolopts & WAKE_BCAST)
+               adapter->wol |= E1000_WUFC_BC;
+       if (wol->wolopts & WAKE_MAGIC)
+               adapter->wol |= E1000_WUFC_MAG;
 
        return 0;
 }
@@ -1895,8 +1930,8 @@ static struct ethtool_ops e1000_ethtool_ops = {
        .get_regs               = e1000_get_regs,
        .get_wol                = e1000_get_wol,
        .set_wol                = e1000_set_wol,
-       .get_msglevel           = e1000_get_msglevel,
-       .set_msglevel           = e1000_set_msglevel,
+       .get_msglevel           = e1000_get_msglevel,
+       .set_msglevel           = e1000_set_msglevel,
        .nway_reset             = e1000_nway_reset,
        .get_link               = ethtool_op_get_link,
        .get_eeprom_len         = e1000_get_eeprom_len,
@@ -1904,17 +1939,17 @@ static struct ethtool_ops e1000_ethtool_ops = {
        .set_eeprom             = e1000_set_eeprom,
        .get_ringparam          = e1000_get_ringparam,
        .set_ringparam          = e1000_set_ringparam,
-       .get_pauseparam         = e1000_get_pauseparam,
-       .set_pauseparam         = e1000_set_pauseparam,
-       .get_rx_csum            = e1000_get_rx_csum,
-       .set_rx_csum            = e1000_set_rx_csum,
-       .get_tx_csum            = e1000_get_tx_csum,
-       .set_tx_csum            = e1000_set_tx_csum,
-       .get_sg                 = ethtool_op_get_sg,
-       .set_sg                 = ethtool_op_set_sg,
+       .get_pauseparam         = e1000_get_pauseparam,
+       .set_pauseparam         = e1000_set_pauseparam,
+       .get_rx_csum            = e1000_get_rx_csum,
+       .set_rx_csum            = e1000_set_rx_csum,
+       .get_tx_csum            = e1000_get_tx_csum,
+       .set_tx_csum            = e1000_set_tx_csum,
+       .get_sg                 = ethtool_op_get_sg,
+       .set_sg                 = ethtool_op_set_sg,
 #ifdef NETIF_F_TSO
-       .get_tso                = ethtool_op_get_tso,
-       .set_tso                = e1000_set_tso,
+       .get_tso                = ethtool_op_get_tso,
+       .set_tso                = e1000_set_tso,
 #endif
        .self_test_count        = e1000_diag_test_count,
        .self_test              = e1000_diag_test,
@@ -1922,7 +1957,7 @@ static struct ethtool_ops e1000_ethtool_ops = {
        .phys_id                = e1000_phys_id,
        .get_stats_count        = e1000_get_stats_count,
        .get_ethtool_stats      = e1000_get_ethtool_stats,
-       .get_perm_addr          = ethtool_op_get_perm_addr,
+       .get_perm_addr          = ethtool_op_get_perm_addr,
 };
 
 void e1000_set_ethtool_ops(struct net_device *netdev)
index b3b9191..a6f8f4f 100644 (file)
@@ -31,6 +31,7 @@
  * Shared functions for accessing and configuring the MAC
  */
 
+
 #include "e1000_hw.h"
 
 static int32_t e1000_set_phy_type(struct e1000_hw *hw);
@@ -166,10 +167,10 @@ e1000_set_phy_type(struct e1000_hw *hw)
 {
     DEBUGFUNC("e1000_set_phy_type");
 
-    if(hw->mac_type == e1000_undefined)
+    if (hw->mac_type == e1000_undefined)
         return -E1000_ERR_PHY_TYPE;
 
-    switch(hw->phy_id) {
+    switch (hw->phy_id) {
     case M88E1000_E_PHY_ID:
     case M88E1000_I_PHY_ID:
     case M88E1011_I_PHY_ID:
@@ -177,10 +178,10 @@ e1000_set_phy_type(struct e1000_hw *hw)
         hw->phy_type = e1000_phy_m88;
         break;
     case IGP01E1000_I_PHY_ID:
-        if(hw->mac_type == e1000_82541 ||
-           hw->mac_type == e1000_82541_rev_2 ||
-           hw->mac_type == e1000_82547 ||
-           hw->mac_type == e1000_82547_rev_2) {
+        if (hw->mac_type == e1000_82541 ||
+            hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547 ||
+            hw->mac_type == e1000_82547_rev_2) {
             hw->phy_type = e1000_phy_igp;
             break;
         }
@@ -207,6 +208,7 @@ e1000_set_phy_type(struct e1000_hw *hw)
     return E1000_SUCCESS;
 }
 
+
 /******************************************************************************
  * IGP phy init script - initializes the GbE PHY
  *
@@ -220,7 +222,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_phy_init_script");
 
-    if(hw->phy_init_script) {
+    if (hw->phy_init_script) {
         msec_delay(20);
 
         /* Save off the current value of register 0x2F5B to be restored at
@@ -236,7 +238,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
 
         msec_delay(5);
 
-        switch(hw->mac_type) {
+        switch (hw->mac_type) {
         case e1000_82541:
         case e1000_82547:
             e1000_write_phy_reg(hw, 0x1F95, 0x0001);
@@ -273,22 +275,22 @@ e1000_phy_init_script(struct e1000_hw *hw)
         /* Now enable the transmitter */
         e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
 
-        if(hw->mac_type == e1000_82547) {
+        if (hw->mac_type == e1000_82547) {
             uint16_t fused, fine, coarse;
 
             /* Move to analog registers page */
             e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
 
-            if(!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+            if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
                 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
 
                 fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
                 coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
 
-                if(coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+                if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
                     coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
                     fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
-                } else if(coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+                } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
                     fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
 
                 fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
@@ -387,6 +389,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
     case E1000_DEV_ID_82571EB_COPPER:
     case E1000_DEV_ID_82571EB_FIBER:
     case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82571EB_QUAD_COPPER:
             hw->mac_type = e1000_82571;
         break;
     case E1000_DEV_ID_82572EI_COPPER:
@@ -418,7 +421,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
         return -E1000_ERR_MAC_TYPE;
     }
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_ich8lan:
         hw->swfwhw_semaphore_present = TRUE;
         hw->asf_firmware_present = TRUE;
@@ -456,7 +459,7 @@ e1000_set_media_type(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_set_media_type");
 
-    if(hw->mac_type != e1000_82543) {
+    if (hw->mac_type != e1000_82543) {
         /* tbi_compatibility is only valid on 82543 */
         hw->tbi_compatibility_en = FALSE;
     }
@@ -516,16 +519,16 @@ e1000_reset_hw(struct e1000_hw *hw)
     DEBUGFUNC("e1000_reset_hw");
 
     /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
-    if(hw->mac_type == e1000_82542_rev2_0) {
+    if (hw->mac_type == e1000_82542_rev2_0) {
         DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
         e1000_pci_clear_mwi(hw);
     }
 
-    if(hw->bus_type == e1000_bus_type_pci_express) {
+    if (hw->bus_type == e1000_bus_type_pci_express) {
         /* Prevent the PCI-E bus from sticking if there is no TLP connection
          * on the last TLP read/write transaction when MAC is reset.
          */
-        if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+        if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
             DEBUGOUT("PCI-E Master disable polling has failed.\n");
         }
     }
@@ -553,14 +556,14 @@ e1000_reset_hw(struct e1000_hw *hw)
     ctrl = E1000_READ_REG(hw, CTRL);
 
     /* Must reset the PHY before resetting the MAC */
-    if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
         E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
         msec_delay(5);
     }
 
     /* Must acquire the MDIO ownership before MAC reset.
      * Ownership defaults to firmware after a reset. */
-    if(hw->mac_type == e1000_82573) {
+    if (hw->mac_type == e1000_82573) {
         timeout = 10;
 
         extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
@@ -570,14 +573,14 @@ e1000_reset_hw(struct e1000_hw *hw)
             E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
             extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
 
-            if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
                 break;
             else
                 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
 
             msec_delay(2);
             timeout--;
-        } while(timeout);
+        } while (timeout);
     }
 
     /* Workaround for ICH8 bit corruption issue in FIFO memory */
@@ -595,7 +598,7 @@ e1000_reset_hw(struct e1000_hw *hw)
      */
     DEBUGOUT("Issuing a global reset to MAC\n");
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
         case e1000_82544:
         case e1000_82540:
         case e1000_82545:
@@ -634,7 +637,7 @@ e1000_reset_hw(struct e1000_hw *hw)
      * device.  Later controllers reload the EEPROM automatically, so just wait
      * for reload to complete.
      */
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
         case e1000_82542_rev2_0:
         case e1000_82542_rev2_1:
         case e1000_82543:
@@ -669,7 +672,7 @@ e1000_reset_hw(struct e1000_hw *hw)
         case e1000_ich8lan:
         case e1000_80003es2lan:
             ret_val = e1000_get_auto_rd_done(hw);
-            if(ret_val)
+            if (ret_val)
                 /* We don't want to continue accessing MAC registers. */
                 return ret_val;
             break;
@@ -680,13 +683,13 @@ e1000_reset_hw(struct e1000_hw *hw)
     }
 
     /* Disable HW ARPs on ASF enabled adapters */
-    if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
+    if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
         manc = E1000_READ_REG(hw, MANC);
         manc &= ~(E1000_MANC_ARP_EN);
         E1000_WRITE_REG(hw, MANC, manc);
     }
 
-    if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
         e1000_phy_init_script(hw);
 
         /* Configure activity LED after PHY reset */
@@ -704,8 +707,8 @@ e1000_reset_hw(struct e1000_hw *hw)
     icr = E1000_READ_REG(hw, ICR);
 
     /* If MWI was previously enabled, reenable it. */
-    if(hw->mac_type == e1000_82542_rev2_0) {
-        if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
             e1000_pci_set_mwi(hw);
     }
 
@@ -745,9 +748,20 @@ e1000_init_hw(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_init_hw");
 
+    /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
+    if (hw->mac_type == e1000_ich8lan) {
+        reg_data = E1000_READ_REG(hw, TARC0);
+        reg_data |= 0x30000000;
+        E1000_WRITE_REG(hw, TARC0, reg_data);
+
+        reg_data = E1000_READ_REG(hw, STATUS);
+        reg_data &= ~0x80000000;
+        E1000_WRITE_REG(hw, STATUS, reg_data);
+    }
+
     /* Initialize Identification LED */
     ret_val = e1000_id_led_init(hw);
-    if(ret_val) {
+    if (ret_val) {
         DEBUGOUT("Error Initializing Identification LED\n");
         return ret_val;
     }
@@ -765,7 +779,7 @@ e1000_init_hw(struct e1000_hw *hw)
     }
 
     /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
-    if(hw->mac_type == e1000_82542_rev2_0) {
+    if (hw->mac_type == e1000_82542_rev2_0) {
         DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
         e1000_pci_clear_mwi(hw);
         E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST);
@@ -779,11 +793,11 @@ e1000_init_hw(struct e1000_hw *hw)
     e1000_init_rx_addrs(hw);
 
     /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
-    if(hw->mac_type == e1000_82542_rev2_0) {
+    if (hw->mac_type == e1000_82542_rev2_0) {
         E1000_WRITE_REG(hw, RCTL, 0);
         E1000_WRITE_FLUSH(hw);
         msec_delay(1);
-        if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+        if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
             e1000_pci_set_mwi(hw);
     }
 
@@ -792,7 +806,7 @@ e1000_init_hw(struct e1000_hw *hw)
     mta_size = E1000_MC_TBL_SIZE;
     if (hw->mac_type == e1000_ich8lan)
         mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
-    for(i = 0; i < mta_size; i++) {
+    for (i = 0; i < mta_size; i++) {
         E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
         /* use write flush to prevent Memory Write Block (MWB) from
          * occuring when accessing our register space */
@@ -804,18 +818,18 @@ e1000_init_hw(struct e1000_hw *hw)
      * gives equal priority to transmits and receives.  Valid only on
      * 82542 and 82543 silicon.
      */
-    if(hw->dma_fairness && hw->mac_type <= e1000_82543) {
+    if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
         ctrl = E1000_READ_REG(hw, CTRL);
         E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
     }
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82545_rev_3:
     case e1000_82546_rev_3:
         break;
     default:
         /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
-        if(hw->bus_type == e1000_bus_type_pcix) {
+        if (hw->bus_type == e1000_bus_type_pcix) {
             e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word);
             e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI,
                 &pcix_stat_hi_word);
@@ -823,9 +837,9 @@ e1000_init_hw(struct e1000_hw *hw)
                 PCIX_COMMAND_MMRBC_SHIFT;
             stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
                 PCIX_STATUS_HI_MMRBC_SHIFT;
-            if(stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+            if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
                 stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
-            if(cmd_mmrbc > stat_mmrbc) {
+            if (cmd_mmrbc > stat_mmrbc) {
                 pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK;
                 pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
                 e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER,
@@ -843,7 +857,7 @@ e1000_init_hw(struct e1000_hw *hw)
     ret_val = e1000_setup_link(hw);
 
     /* Set the transmit descriptor write-back policy */
-    if(hw->mac_type > e1000_82544) {
+    if (hw->mac_type > e1000_82544) {
         ctrl = E1000_READ_REG(hw, TXDCTL);
         ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
         switch (hw->mac_type) {
@@ -894,14 +908,13 @@ e1000_init_hw(struct e1000_hw *hw)
     case e1000_ich8lan:
         ctrl = E1000_READ_REG(hw, TXDCTL1);
         ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
-        if(hw->mac_type >= e1000_82571)
+        if (hw->mac_type >= e1000_82571)
             ctrl |= E1000_TXDCTL_COUNT_DESC;
         E1000_WRITE_REG(hw, TXDCTL1, ctrl);
         break;
     }
 
 
-
     if (hw->mac_type == e1000_82573) {
         uint32_t gcr = E1000_READ_REG(hw, GCR);
         gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
@@ -945,10 +958,10 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_adjust_serdes_amplitude");
 
-    if(hw->media_type != e1000_media_type_internal_serdes)
+    if (hw->media_type != e1000_media_type_internal_serdes)
         return E1000_SUCCESS;
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82545_rev_3:
     case e1000_82546_rev_3:
         break;
@@ -961,11 +974,11 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
         return ret_val;
     }
 
-    if(eeprom_data != EEPROM_RESERVED_WORD) {
+    if (eeprom_data != EEPROM_RESERVED_WORD) {
         /* Adjust SERDES output amplitude only. */
         eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
         ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     }
 
@@ -1033,10 +1046,10 @@ e1000_setup_link(struct e1000_hw *hw)
      * in case we get disconnected and then reconnected into a different
      * hub or switch with different Flow Control capabilities.
      */
-    if(hw->mac_type == e1000_82542_rev2_0)
+    if (hw->mac_type == e1000_82542_rev2_0)
         hw->fc &= (~e1000_fc_tx_pause);
 
-    if((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+    if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
         hw->fc &= (~e1000_fc_rx_pause);
 
     hw->original_fc = hw->fc;
@@ -1051,12 +1064,12 @@ e1000_setup_link(struct e1000_hw *hw)
      * or e1000_phy_setup() is called.
      */
     if (hw->mac_type == e1000_82543) {
-               ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
-                                                                       1, &eeprom_data);
-               if (ret_val) {
-                       DEBUGOUT("EEPROM Read Error\n");
-                       return -E1000_ERR_EEPROM;
-               }
+        ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                    1, &eeprom_data);
+        if (ret_val) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
         ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
                     SWDPIO__EXT_SHIFT);
         E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
@@ -1089,14 +1102,14 @@ e1000_setup_link(struct e1000_hw *hw)
      * ability to transmit pause frames in not enabled, then these
      * registers will be set to 0.
      */
-    if(!(hw->fc & e1000_fc_tx_pause)) {
+    if (!(hw->fc & e1000_fc_tx_pause)) {
         E1000_WRITE_REG(hw, FCRTL, 0);
         E1000_WRITE_REG(hw, FCRTH, 0);
     } else {
         /* We need to set up the Receive Threshold high and low water marks
          * as well as (optionally) enabling the transmission of XON frames.
          */
-        if(hw->fc_send_xon) {
+        if (hw->fc_send_xon) {
             E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
             E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
         } else {
@@ -1143,11 +1156,11 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
      * the EEPROM.
      */
     ctrl = E1000_READ_REG(hw, CTRL);
-    if(hw->media_type == e1000_media_type_fiber)
+    if (hw->media_type == e1000_media_type_fiber)
         signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
 
     ret_val = e1000_adjust_serdes_amplitude(hw);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     /* Take the link out of reset */
@@ -1155,7 +1168,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
 
     /* Adjust VCO speed to improve BER performance */
     ret_val = e1000_set_vco_speed(hw);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     e1000_config_collision_dist(hw);
@@ -1226,15 +1239,15 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
      * less than 500 milliseconds even if the other end is doing it in SW).
      * For internal serdes, we just assume a signal is present, then poll.
      */
-    if(hw->media_type == e1000_media_type_internal_serdes ||
+    if (hw->media_type == e1000_media_type_internal_serdes ||
        (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) {
         DEBUGOUT("Looking for Link\n");
-        for(i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+        for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
             msec_delay(10);
             status = E1000_READ_REG(hw, STATUS);
-            if(status & E1000_STATUS_LU) break;
+            if (status & E1000_STATUS_LU) break;
         }
-        if(i == (LINK_UP_TIMEOUT / 10)) {
+        if (i == (LINK_UP_TIMEOUT / 10)) {
             DEBUGOUT("Never got a valid link from auto-neg!!!\n");
             hw->autoneg_failed = 1;
             /* AutoNeg failed to achieve a link, so we'll call
@@ -1243,7 +1256,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
              * non-autonegotiating link partners.
              */
             ret_val = e1000_check_for_link(hw);
-            if(ret_val) {
+            if (ret_val) {
                 DEBUGOUT("Error while checking for link\n");
                 return ret_val;
             }
@@ -1277,7 +1290,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
      * the PHY speed and duplex configuration is. In addition, we need to
      * perform a hardware reset on the PHY to take it out of reset.
      */
-    if(hw->mac_type > e1000_82543) {
+    if (hw->mac_type > e1000_82543) {
         ctrl |= E1000_CTRL_SLU;
         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
         E1000_WRITE_REG(hw, CTRL, ctrl);
@@ -1285,13 +1298,13 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
         ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
         E1000_WRITE_REG(hw, CTRL, ctrl);
         ret_val = e1000_phy_hw_reset(hw);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     }
 
     /* Make sure we have a valid PHY */
     ret_val = e1000_detect_gig_phy(hw);
-    if(ret_val) {
+    if (ret_val) {
         DEBUGOUT("Error, did not detect valid phy.\n");
         return ret_val;
     }
@@ -1299,19 +1312,19 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
 
     /* Set PHY to class A mode (if necessary) */
     ret_val = e1000_set_phy_mode(hw);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
-    if((hw->mac_type == e1000_82545_rev_3) ||
+    if ((hw->mac_type == e1000_82545_rev_3) ||
        (hw->mac_type == e1000_82546_rev_3)) {
         ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
         phy_data |= 0x00000008;
         ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
     }
 
-    if(hw->mac_type <= e1000_82543 ||
-       hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
-       hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+    if (hw->mac_type <= e1000_82543 ||
+        hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+        hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
         hw->phy_reset_disable = FALSE;
 
    return E1000_SUCCESS;
@@ -1341,7 +1354,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
         return ret_val;
     }
 
-    /* Wait 10ms for MAC to configure PHY from eeprom settings */
+    /* Wait 15ms for MAC to configure PHY from eeprom settings */
     msec_delay(15);
     if (hw->mac_type != e1000_ich8lan) {
     /* Configure activity LED after PHY reset */
@@ -1351,11 +1364,14 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
     E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
     }
 
-    /* disable lplu d3 during driver init */
-    ret_val = e1000_set_d3_lplu_state(hw, FALSE);
-    if (ret_val) {
-        DEBUGOUT("Error Disabling LPLU D3\n");
-        return ret_val;
+    /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+    if (hw->phy_type == e1000_phy_igp) {
+        /* disable lplu d3 during driver init */
+        ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+        if (ret_val) {
+            DEBUGOUT("Error Disabling LPLU D3\n");
+            return ret_val;
+        }
     }
 
     /* disable lplu d0 during driver init */
@@ -1393,45 +1409,45 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
         }
     }
     ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     /* set auto-master slave resolution settings */
-    if(hw->autoneg) {
+    if (hw->autoneg) {
         e1000_ms_type phy_ms_setting = hw->master_slave;
 
-        if(hw->ffe_config_state == e1000_ffe_config_active)
+        if (hw->ffe_config_state == e1000_ffe_config_active)
             hw->ffe_config_state = e1000_ffe_config_enabled;
 
-        if(hw->dsp_config_state == e1000_dsp_config_activated)
+        if (hw->dsp_config_state == e1000_dsp_config_activated)
             hw->dsp_config_state = e1000_dsp_config_enabled;
 
         /* when autonegotiation advertisment is only 1000Mbps then we
           * should disable SmartSpeed and enable Auto MasterSlave
           * resolution as hardware default. */
-        if(hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+        if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
             /* Disable SmartSpeed */
-            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
-            if(ret_val)
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
                 return ret_val;
             phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-            ret_val = e1000_write_phy_reg(hw,
-                                                  IGP01E1000_PHY_PORT_CONFIG,
-                                                  phy_data);
-            if(ret_val)
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
                 return ret_val;
             /* Set auto Master/Slave resolution process */
             ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
             phy_data &= ~CR_1000T_MS_ENABLE;
             ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         }
 
         ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         /* load defaults for future use */
@@ -1455,7 +1471,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
             break;
         }
         ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     }
 
@@ -1476,12 +1492,12 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_copper_link_ggp_setup");
 
-    if(!hw->phy_reset_disable) {
+    if (!hw->phy_reset_disable) {
 
         /* Enable CRS on TX for half-duplex operation. */
         ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
                                      &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -1490,7 +1506,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
 
         ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
                                       phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         /* Options:
@@ -1501,7 +1517,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
          *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
          */
         ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
@@ -1526,11 +1542,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
          *   1 - Enabled
          */
         phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
-        if(hw->disable_polarity_correction == 1)
+        if (hw->disable_polarity_correction == 1)
             phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
         ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
 
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         /* SW Reset the PHY so all changes take effect */
@@ -1586,9 +1602,9 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
                 return ret_val;
 
             phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
-
             ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
                                           phy_data);
+
             if (ret_val)
                 return ret_val;
         }
@@ -1623,12 +1639,12 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_copper_link_mgp_setup");
 
-    if(hw->phy_reset_disable)
+    if (hw->phy_reset_disable)
         return E1000_SUCCESS;
 
     /* Enable CRS on TX. This must be set for half-duplex operation. */
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
@@ -1665,7 +1681,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
      *   1 - Enabled
      */
     phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
-    if(hw->disable_polarity_correction == 1)
+    if (hw->disable_polarity_correction == 1)
         phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
     if (ret_val)
@@ -1705,7 +1721,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
 
     /* SW Reset the PHY so all changes take effect */
     ret_val = e1000_phy_reset(hw);
-    if(ret_val) {
+    if (ret_val) {
         DEBUGOUT("Error Resetting the PHY\n");
         return ret_val;
     }
@@ -1735,7 +1751,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
     /* If autoneg_advertised is zero, we assume it was not defaulted
      * by the calling code so we set to advertise full capability.
      */
-    if(hw->autoneg_advertised == 0)
+    if (hw->autoneg_advertised == 0)
         hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
     /* IFE phy only supports 10/100 */
@@ -1744,7 +1760,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
 
     DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
     ret_val = e1000_phy_setup_autoneg(hw);
-    if(ret_val) {
+    if (ret_val) {
         DEBUGOUT("Error Setting up Auto-Negotiation\n");
         return ret_val;
     }
@@ -1754,20 +1770,20 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
      * the Auto Neg Restart bit in the PHY control register.
      */
     ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
     ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     /* Does the user want to wait for Auto-Neg to complete here, or
      * check at a later time (for example, callback routine).
      */
-    if(hw->wait_autoneg_complete) {
+    if (hw->wait_autoneg_complete) {
         ret_val = e1000_wait_autoneg(hw);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error while waiting for autoneg to complete\n");
             return ret_val;
         }
@@ -1778,7 +1794,6 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
     return E1000_SUCCESS;
 }
 
-
 /******************************************************************************
 * Config the MAC and the PHY after link is up.
 *   1) Set up the MAC to the current PHY speed/duplex
@@ -1797,25 +1812,25 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
     int32_t ret_val;
     DEBUGFUNC("e1000_copper_link_postconfig");
 
-    if(hw->mac_type >= e1000_82544) {
+    if (hw->mac_type >= e1000_82544) {
         e1000_config_collision_dist(hw);
     } else {
         ret_val = e1000_config_mac_to_phy(hw);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error configuring MAC to PHY settings\n");
             return ret_val;
         }
     }
     ret_val = e1000_config_fc_after_link_up(hw);
-    if(ret_val) {
+    if (ret_val) {
         DEBUGOUT("Error Configuring Flow Control\n");
         return ret_val;
     }
 
     /* Config DSP to improve Giga link quality */
-    if(hw->phy_type == e1000_phy_igp) {
+    if (hw->phy_type == e1000_phy_igp) {
         ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error Configuring DSP after link up\n");
             return ret_val;
         }
@@ -1861,7 +1876,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
 
     /* Check if it is a valid PHY and set PHY mode if necessary. */
     ret_val = e1000_copper_link_preconfig(hw);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     switch (hw->mac_type) {
@@ -1882,30 +1897,30 @@ e1000_setup_copper_link(struct e1000_hw *hw)
         hw->phy_type == e1000_phy_igp_3 ||
         hw->phy_type == e1000_phy_igp_2) {
         ret_val = e1000_copper_link_igp_setup(hw);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     } else if (hw->phy_type == e1000_phy_m88) {
         ret_val = e1000_copper_link_mgp_setup(hw);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     } else if (hw->phy_type == e1000_phy_gg82563) {
         ret_val = e1000_copper_link_ggp_setup(hw);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     }
 
-    if(hw->autoneg) {
+    if (hw->autoneg) {
         /* Setup autoneg and flow control advertisement
           * and perform autonegotiation */
         ret_val = e1000_copper_link_autoneg(hw);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     } else {
         /* PHY will be set to 10H, 10F, 100H,or 100F
           * depending on value from forced_speed_duplex. */
         DEBUGOUT("Forcing speed and duplex\n");
         ret_val = e1000_phy_force_speed_duplex(hw);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error Forcing Speed and Duplex\n");
             return ret_val;
         }
@@ -1914,18 +1929,18 @@ e1000_setup_copper_link(struct e1000_hw *hw)
     /* Check link status. Wait up to 100 microseconds for link to become
      * valid.
      */
-    for(i = 0; i < 10; i++) {
+    for (i = 0; i < 10; i++) {
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
-        if(phy_data & MII_SR_LINK_STATUS) {
+        if (phy_data & MII_SR_LINK_STATUS) {
             /* Config the MAC and PHY after link is up */
             ret_val = e1000_copper_link_postconfig(hw);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             DEBUGOUT("Valid link established!!!\n");
@@ -2027,7 +2042,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
 
     /* Read the MII Auto-Neg Advertisement Register (Address 4). */
     ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     if (hw->phy_type != e1000_phy_ife) {
@@ -2055,36 +2070,36 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
     DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
 
     /* Do we want to advertise 10 Mb Half Duplex? */
-    if(hw->autoneg_advertised & ADVERTISE_10_HALF) {
+    if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
         DEBUGOUT("Advertise 10mb Half duplex\n");
         mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
     }
 
     /* Do we want to advertise 10 Mb Full Duplex? */
-    if(hw->autoneg_advertised & ADVERTISE_10_FULL) {
+    if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
         DEBUGOUT("Advertise 10mb Full duplex\n");
         mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
     }
 
     /* Do we want to advertise 100 Mb Half Duplex? */
-    if(hw->autoneg_advertised & ADVERTISE_100_HALF) {
+    if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
         DEBUGOUT("Advertise 100mb Half duplex\n");
         mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
     }
 
     /* Do we want to advertise 100 Mb Full Duplex? */
-    if(hw->autoneg_advertised & ADVERTISE_100_FULL) {
+    if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
         DEBUGOUT("Advertise 100mb Full duplex\n");
         mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
     }
 
     /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
-    if(hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+    if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
         DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
     }
 
     /* Do we want to advertise 1000 Mb Full Duplex? */
-    if(hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+    if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
         DEBUGOUT("Advertise 1000mb Full duplex\n");
         mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
         if (hw->phy_type == e1000_phy_ife) {
@@ -2146,7 +2161,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
     }
 
     ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
@@ -2194,7 +2209,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
 
     /* Read the MII Control Register. */
     ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     /* We need to disable autoneg in order to force link and duplex. */
@@ -2202,8 +2217,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
     mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
 
     /* Are we forcing Full or Half Duplex? */
-    if(hw->forced_speed_duplex == e1000_100_full ||
-       hw->forced_speed_duplex == e1000_10_full) {
+    if (hw->forced_speed_duplex == e1000_100_full ||
+        hw->forced_speed_duplex == e1000_10_full) {
         /* We want to force full duplex so we SET the full duplex bits in the
          * Device and MII Control Registers.
          */
@@ -2220,7 +2235,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
     }
 
     /* Are we forcing 100Mbps??? */
-    if(hw->forced_speed_duplex == e1000_100_full ||
+    if (hw->forced_speed_duplex == e1000_100_full ||
        hw->forced_speed_duplex == e1000_100_half) {
         /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
         ctrl |= E1000_CTRL_SPD_100;
@@ -2243,7 +2258,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
     if ((hw->phy_type == e1000_phy_m88) ||
         (hw->phy_type == e1000_phy_gg82563)) {
         ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
@@ -2251,7 +2266,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
          */
         phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
         ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
@@ -2275,20 +2290,20 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
          * forced whenever speed or duplex are forced.
          */
         ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
         phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
 
         ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     }
 
     /* Write back the modified PHY MII control register. */
     ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     udelay(1);
@@ -2300,50 +2315,50 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
      * only if the user has set wait_autoneg_complete to 1, which is
      * the default.
      */
-    if(hw->wait_autoneg_complete) {
+    if (hw->wait_autoneg_complete) {
         /* We will wait for autoneg to complete. */
         DEBUGOUT("Waiting for forced speed/duplex link.\n");
         mii_status_reg = 0;
 
         /* We will wait for autoneg to complete or 4.5 seconds to expire. */
-        for(i = PHY_FORCE_TIME; i > 0; i--) {
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
             /* Read the MII Status Register and wait for Auto-Neg Complete bit
              * to be set.
              */
             ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
-            if(mii_status_reg & MII_SR_LINK_STATUS) break;
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
             msec_delay(100);
         }
-        if((i == 0) &&
+        if ((i == 0) &&
            ((hw->phy_type == e1000_phy_m88) ||
             (hw->phy_type == e1000_phy_gg82563))) {
             /* We didn't get link.  Reset the DSP and wait again for link. */
             ret_val = e1000_phy_reset_dsp(hw);
-            if(ret_val) {
+            if (ret_val) {
                 DEBUGOUT("Error Resetting PHY DSP\n");
                 return ret_val;
             }
         }
         /* This loop will early-out if the link condition has been met.  */
-        for(i = PHY_FORCE_TIME; i > 0; i--) {
-            if(mii_status_reg & MII_SR_LINK_STATUS) break;
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
             msec_delay(100);
             /* Read the MII Status Register and wait for Auto-Neg Complete bit
              * to be set.
              */
             ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         }
     }
@@ -2354,32 +2369,31 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
          * defaults back to a 2.5MHz clock when the PHY is reset.
          */
         ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data |= M88E1000_EPSCR_TX_CLK_25;
         ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         /* In addition, because of the s/w reset above, we need to enable CRS on
          * TX.  This must be set for both full and half duplex operation.
          */
         ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
         ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
-        if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
-           (!hw->autoneg) &&
-           (hw->forced_speed_duplex == e1000_10_full ||
-            hw->forced_speed_duplex == e1000_10_half)) {
+        if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+            (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+             hw->forced_speed_duplex == e1000_10_half)) {
             ret_val = e1000_polarity_reversal_workaround(hw);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         }
     } else if (hw->phy_type == e1000_phy_gg82563) {
@@ -2470,10 +2484,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
      * registers depending on negotiated values.
      */
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
-    if(phy_data & M88E1000_PSSR_DPLX)
+    if (phy_data & M88E1000_PSSR_DPLX)
         ctrl |= E1000_CTRL_FD;
     else
         ctrl &= ~E1000_CTRL_FD;
@@ -2483,9 +2497,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
     /* Set up speed in the Device Control register depending on
      * negotiated values.
      */
-    if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
         ctrl |= E1000_CTRL_SPD_1000;
-    else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+    else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
         ctrl |= E1000_CTRL_SPD_100;
 
     /* Write the configured values back to the Device Control Reg. */
@@ -2553,7 +2567,7 @@ e1000_force_mac_fc(struct e1000_hw *hw)
     }
 
     /* Disable TX Flow Control for 82542 (rev 2.0) */
-    if(hw->mac_type == e1000_82542_rev2_0)
+    if (hw->mac_type == e1000_82542_rev2_0)
         ctrl &= (~E1000_CTRL_TFCE);
 
     E1000_WRITE_REG(hw, CTRL, ctrl);
@@ -2587,11 +2601,12 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
      * so we had to force link.  In this case, we need to force the
      * configuration of the MAC to match the "fc" parameter.
      */
-    if(((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
-       ((hw->media_type == e1000_media_type_internal_serdes) && (hw->autoneg_failed)) ||
-       ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+    if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_internal_serdes) &&
+         (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
         ret_val = e1000_force_mac_fc(hw);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error forcing flow control settings\n");
             return ret_val;
         }
@@ -2602,19 +2617,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
      * has completed, and if so, how the PHY and link partner has
      * flow control configured.
      */
-    if((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+    if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
         /* Read the MII Status Register and check to see if AutoNeg
          * has completed.  We read this twice because this reg has
          * some "sticky" (latched) bits.
          */
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
-        if(mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+        if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
             /* The AutoNeg process has completed, so we now need to
              * read both the Auto Negotiation Advertisement Register
              * (Address 4) and the Auto_Negotiation Base Page Ability
@@ -2623,11 +2638,11 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
              */
             ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
                                          &mii_nway_adv_reg);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
             ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
                                          &mii_nway_lp_ability_reg);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             /* Two bits in the Auto Negotiation Advertisement Register
@@ -2664,15 +2679,15 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
              *   1   |   DC    |   1   |   DC    | e1000_fc_full
              *
              */
-            if((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-               (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+            if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
                 /* Now we need to check if the user selected RX ONLY
                  * of pause frames.  In this case, we had to advertise
                  * FULL flow control because we could not advertise RX
                  * ONLY. Hence, we must now check to see if we need to
                  * turn OFF  the TRANSMISSION of PAUSE frames.
                  */
-                if(hw->original_fc == e1000_fc_full) {
+                if (hw->original_fc == e1000_fc_full) {
                     hw->fc = e1000_fc_full;
                     DEBUGOUT("Flow Control = FULL.\n");
                 } else {
@@ -2688,10 +2703,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
              *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
              *
              */
-            else if(!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                    (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
-                    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                    (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+            else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                 hw->fc = e1000_fc_tx_pause;
                 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
             }
@@ -2703,10 +2718,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
              *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
              *
              */
-            else if((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                    (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
-                    !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                    (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+            else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                 hw->fc = e1000_fc_rx_pause;
                 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
             }
@@ -2730,9 +2745,9 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
              * be asked to delay transmission of packets than asking
              * our link partner to pause transmission of frames.
              */
-            else if((hw->original_fc == e1000_fc_none ||
-                     hw->original_fc == e1000_fc_tx_pause) ||
-                    hw->fc_strict_ieee) {
+            else if ((hw->original_fc == e1000_fc_none ||
+                      hw->original_fc == e1000_fc_tx_pause) ||
+                      hw->fc_strict_ieee) {
                 hw->fc = e1000_fc_none;
                 DEBUGOUT("Flow Control = NONE.\n");
             } else {
@@ -2745,19 +2760,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
              * enabled per IEEE 802.3 spec.
              */
             ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
-            if(ret_val) {
+            if (ret_val) {
                 DEBUGOUT("Error getting link speed and duplex\n");
                 return ret_val;
             }
 
-            if(duplex == HALF_DUPLEX)
+            if (duplex == HALF_DUPLEX)
                 hw->fc = e1000_fc_none;
 
             /* Now we call a subroutine to actually force the MAC
              * controller to use the correct flow control settings.
              */
             ret_val = e1000_force_mac_fc(hw);
-            if(ret_val) {
+            if (ret_val) {
                 DEBUGOUT("Error forcing flow control settings\n");
                 return ret_val;
             }
@@ -2796,13 +2811,13 @@ e1000_check_for_link(struct e1000_hw *hw)
      * set when the optics detect a signal. On older adapters, it will be
      * cleared when there is a signal.  This applies to fiber media only.
      */
-    if((hw->media_type == e1000_media_type_fiber) ||
-       (hw->media_type == e1000_media_type_internal_serdes)) {
+    if ((hw->media_type == e1000_media_type_fiber) ||
+        (hw->media_type == e1000_media_type_internal_serdes)) {
         rxcw = E1000_READ_REG(hw, RXCW);
 
-        if(hw->media_type == e1000_media_type_fiber) {
+        if (hw->media_type == e1000_media_type_fiber) {
             signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
-            if(status & E1000_STATUS_LU)
+            if (status & E1000_STATUS_LU)
                 hw->get_link_status = FALSE;
         }
     }
@@ -2813,20 +2828,20 @@ e1000_check_for_link(struct e1000_hw *hw)
      * receive a Link Status Change interrupt or we have Rx Sequence
      * Errors.
      */
-    if((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+    if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
         /* First we want to see if the MII Status Register reports
          * link.  If so, then we want to get the current speed/duplex
          * of the PHY.
          * Read the register twice since the link bit is sticky.
          */
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
-        if(phy_data & MII_SR_LINK_STATUS) {
+        if (phy_data & MII_SR_LINK_STATUS) {
             hw->get_link_status = FALSE;
             /* Check if there was DownShift, must be checked immediately after
              * link-up */
@@ -2840,10 +2855,10 @@ e1000_check_for_link(struct e1000_hw *hw)
              * happen due to the execution of this workaround.
              */
 
-            if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
-               (!hw->autoneg) &&
-               (hw->forced_speed_duplex == e1000_10_full ||
-                hw->forced_speed_duplex == e1000_10_half)) {
+            if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+                (!hw->autoneg) &&
+                (hw->forced_speed_duplex == e1000_10_full ||
+                 hw->forced_speed_duplex == e1000_10_half)) {
                 E1000_WRITE_REG(hw, IMC, 0xffffffff);
                 ret_val = e1000_polarity_reversal_workaround(hw);
                 icr = E1000_READ_REG(hw, ICR);
@@ -2860,7 +2875,7 @@ e1000_check_for_link(struct e1000_hw *hw)
         /* If we are forcing speed/duplex, then we simply return since
          * we have already determined whether we have link or not.
          */
-        if(!hw->autoneg) return -E1000_ERR_CONFIG;
+        if (!hw->autoneg) return -E1000_ERR_CONFIG;
 
         /* optimize the dsp settings for the igp phy */
         e1000_config_dsp_after_link_change(hw, TRUE);
@@ -2873,11 +2888,11 @@ e1000_check_for_link(struct e1000_hw *hw)
          * speed/duplex on the MAC to the current PHY speed/duplex
          * settings.
          */
-        if(hw->mac_type >= e1000_82544)
+        if (hw->mac_type >= e1000_82544)
             e1000_config_collision_dist(hw);
         else {
             ret_val = e1000_config_mac_to_phy(hw);
-            if(ret_val) {
+            if (ret_val) {
                 DEBUGOUT("Error configuring MAC to PHY settings\n");
                 return ret_val;
             }
@@ -2888,7 +2903,7 @@ e1000_check_for_link(struct e1000_hw *hw)
          * have had to re-autoneg with a different link partner.
          */
         ret_val = e1000_config_fc_after_link_up(hw);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error configuring flow control\n");
             return ret_val;
         }
@@ -2900,7 +2915,7 @@ e1000_check_for_link(struct e1000_hw *hw)
          * at gigabit speed, then TBI compatibility is not needed.  If we are
          * at gigabit speed, we turn on TBI compatibility.
          */
-        if(hw->tbi_compatibility_en) {
+        if (hw->tbi_compatibility_en) {
             uint16_t speed, duplex;
             ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
             if (ret_val) {
@@ -2911,7 +2926,7 @@ e1000_check_for_link(struct e1000_hw *hw)
                 /* If link speed is not set to gigabit speed, we do not need
                  * to enable TBI compatibility.
                  */
-                if(hw->tbi_compatibility_on) {
+                if (hw->tbi_compatibility_on) {
                     /* If we previously were in the mode, turn it off. */
                     rctl = E1000_READ_REG(hw, RCTL);
                     rctl &= ~E1000_RCTL_SBP;
@@ -2924,7 +2939,7 @@ e1000_check_for_link(struct e1000_hw *hw)
                  * packets. Some frames have an additional byte on the end and
                  * will look like CRC errors to to the hardware.
                  */
-                if(!hw->tbi_compatibility_on) {
+                if (!hw->tbi_compatibility_on) {
                     hw->tbi_compatibility_on = TRUE;
                     rctl = E1000_READ_REG(hw, RCTL);
                     rctl |= E1000_RCTL_SBP;
@@ -2940,12 +2955,12 @@ e1000_check_for_link(struct e1000_hw *hw)
      * auto-negotiation time to complete, in case the cable was just plugged
      * in. The autoneg_failed flag does this.
      */
-    else if((((hw->media_type == e1000_media_type_fiber) &&
+    else if ((((hw->media_type == e1000_media_type_fiber) &&
               ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
-             (hw->media_type == e1000_media_type_internal_serdes)) &&
-            (!(status & E1000_STATUS_LU)) &&
-            (!(rxcw & E1000_RXCW_C))) {
-        if(hw->autoneg_failed == 0) {
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (!(status & E1000_STATUS_LU)) &&
+              (!(rxcw & E1000_RXCW_C))) {
+        if (hw->autoneg_failed == 0) {
             hw->autoneg_failed = 1;
             return 0;
         }
@@ -2961,7 +2976,7 @@ e1000_check_for_link(struct e1000_hw *hw)
 
         /* Configure Flow Control after forcing link up. */
         ret_val = e1000_config_fc_after_link_up(hw);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error configuring flow control\n");
             return ret_val;
         }
@@ -2971,9 +2986,9 @@ e1000_check_for_link(struct e1000_hw *hw)
      * Device Control register in an attempt to auto-negotiate with our link
      * partner.
      */
-    else if(((hw->media_type == e1000_media_type_fiber) ||
-             (hw->media_type == e1000_media_type_internal_serdes)) &&
-            (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+    else if (((hw->media_type == e1000_media_type_fiber) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
         DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
         E1000_WRITE_REG(hw, TXCW, hw->txcw);
         E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2983,12 +2998,12 @@ e1000_check_for_link(struct e1000_hw *hw)
     /* If we force link for non-auto-negotiation switch, check link status
      * based on MAC synchronization for internal serdes media type.
      */
-    else if((hw->media_type == e1000_media_type_internal_serdes) &&
-            !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+    else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+             !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
         /* SYNCH bit and IV bit are sticky. */
         udelay(10);
-        if(E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
-            if(!(rxcw & E1000_RXCW_IV)) {
+        if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
+            if (!(rxcw & E1000_RXCW_IV)) {
                 hw->serdes_link_down = FALSE;
                 DEBUGOUT("SERDES: Link is up.\n");
             }
@@ -2997,8 +3012,8 @@ e1000_check_for_link(struct e1000_hw *hw)
             DEBUGOUT("SERDES: Link is down.\n");
         }
     }
-    if((hw->media_type == e1000_media_type_internal_serdes) &&
-       (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+    if ((hw->media_type == e1000_media_type_internal_serdes) &&
+        (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
         hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS));
     }
     return E1000_SUCCESS;
@@ -3022,12 +3037,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
 
     DEBUGFUNC("e1000_get_speed_and_duplex");
 
-    if(hw->mac_type >= e1000_82543) {
+    if (hw->mac_type >= e1000_82543) {
         status = E1000_READ_REG(hw, STATUS);
-        if(status & E1000_STATUS_SPEED_1000) {
+        if (status & E1000_STATUS_SPEED_1000) {
             *speed = SPEED_1000;
             DEBUGOUT("1000 Mbs, ");
-        } else if(status & E1000_STATUS_SPEED_100) {
+        } else if (status & E1000_STATUS_SPEED_100) {
             *speed = SPEED_100;
             DEBUGOUT("100 Mbs, ");
         } else {
@@ -3035,7 +3050,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
             DEBUGOUT("10 Mbs, ");
         }
 
-        if(status & E1000_STATUS_FD) {
+        if (status & E1000_STATUS_FD) {
             *duplex = FULL_DUPLEX;
             DEBUGOUT("Full Duplex\n");
         } else {
@@ -3052,18 +3067,18 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
      * if it is operating at half duplex.  Here we set the duplex settings to
      * match the duplex in the link partner's capabilities.
      */
-    if(hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+    if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
         ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
-        if(!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+        if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
             *duplex = HALF_DUPLEX;
         else {
             ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
-            if((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+            if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
                (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
                 *duplex = HALF_DUPLEX;
         }
@@ -3104,17 +3119,17 @@ e1000_wait_autoneg(struct e1000_hw *hw)
     DEBUGOUT("Waiting for Auto-Neg to complete.\n");
 
     /* We will wait for autoneg to complete or 4.5 seconds to expire. */
-    for(i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+    for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
         /* Read the MII Status Register and wait for Auto-Neg
          * Complete bit to be set.
          */
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
-        if(phy_data & MII_SR_AUTONEG_COMPLETE) {
+        if (phy_data & MII_SR_AUTONEG_COMPLETE) {
             return E1000_SUCCESS;
         }
         msec_delay(100);
@@ -3187,14 +3202,16 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
     /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
     ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
 
-    while(mask) {
+    while (mask) {
         /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
          * then raising and lowering the Management Data Clock. A "0" is
          * shifted out to the PHY by setting the MDIO bit to "0" and then
          * raising and lowering the clock.
          */
-        if(data & mask) ctrl |= E1000_CTRL_MDIO;
-        else ctrl &= ~E1000_CTRL_MDIO;
+        if (data & mask)
+            ctrl |= E1000_CTRL_MDIO;
+        else
+            ctrl &= ~E1000_CTRL_MDIO;
 
         E1000_WRITE_REG(hw, CTRL, ctrl);
         E1000_WRITE_FLUSH(hw);
@@ -3245,12 +3262,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
     e1000_raise_mdi_clk(hw, &ctrl);
     e1000_lower_mdi_clk(hw, &ctrl);
 
-    for(data = 0, i = 0; i < 16; i++) {
+    for (data = 0, i = 0; i < 16; i++) {
         data = data << 1;
         e1000_raise_mdi_clk(hw, &ctrl);
         ctrl = E1000_READ_REG(hw, CTRL);
         /* Check to see if we shifted in a "1". */
-        if(ctrl & E1000_CTRL_MDIO) data |= 1;
+        if (ctrl & E1000_CTRL_MDIO)
+            data |= 1;
         e1000_lower_mdi_clk(hw, &ctrl);
     }
 
@@ -3276,7 +3294,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
     if (!hw->swfw_sync_present)
         return e1000_get_hw_eeprom_semaphore(hw);
 
-    while(timeout) {
+    while (timeout) {
             if (e1000_get_hw_eeprom_semaphore(hw))
                 return -E1000_ERR_SWFW_SYNC;
 
@@ -3365,7 +3383,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
        (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
         ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
                                          (uint16_t)reg_addr);
-        if(ret_val) {
+        if (ret_val) {
             e1000_swfw_sync_release(hw, swfw);
             return ret_val;
         }
@@ -3410,12 +3428,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw,
 
     DEBUGFUNC("e1000_read_phy_reg_ex");
 
-    if(reg_addr > MAX_PHY_REG_ADDRESS) {
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
         DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
         return -E1000_ERR_PARAM;
     }
 
-    if(hw->mac_type > e1000_82543) {
+    if (hw->mac_type > e1000_82543) {
         /* Set up Op-code, Phy Address, and register address in the MDI
          * Control register.  The MAC will take care of interfacing with the
          * PHY to retrieve the desired data.
@@ -3427,16 +3445,16 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw,
         E1000_WRITE_REG(hw, MDIC, mdic);
 
         /* Poll the ready bit to see if the MDI read completed */
-        for(i = 0; i < 64; i++) {
+        for (i = 0; i < 64; i++) {
             udelay(50);
             mdic = E1000_READ_REG(hw, MDIC);
-            if(mdic & E1000_MDIC_READY) break;
+            if (mdic & E1000_MDIC_READY) break;
         }
-        if(!(mdic & E1000_MDIC_READY)) {
+        if (!(mdic & E1000_MDIC_READY)) {
             DEBUGOUT("MDI Read did not complete\n");
             return -E1000_ERR_PHY;
         }
-        if(mdic & E1000_MDIC_ERROR) {
+        if (mdic & E1000_MDIC_ERROR) {
             DEBUGOUT("MDI Error\n");
             return -E1000_ERR_PHY;
         }
@@ -3505,7 +3523,7 @@ e1000_write_phy_reg(struct e1000_hw *hw,
        (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
         ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
                                          (uint16_t)reg_addr);
-        if(ret_val) {
+        if (ret_val) {
             e1000_swfw_sync_release(hw, swfw);
             return ret_val;
         }
@@ -3550,12 +3568,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
 
     DEBUGFUNC("e1000_write_phy_reg_ex");
 
-    if(reg_addr > MAX_PHY_REG_ADDRESS) {
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
         DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
         return -E1000_ERR_PARAM;
     }
 
-    if(hw->mac_type > e1000_82543) {
+    if (hw->mac_type > e1000_82543) {
         /* Set up Op-code, Phy Address, register address, and data intended
          * for the PHY register in the MDI Control register.  The MAC will take
          * care of interfacing with the PHY to send the desired data.
@@ -3568,12 +3586,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
         E1000_WRITE_REG(hw, MDIC, mdic);
 
         /* Poll the ready bit to see if the MDI read completed */
-        for(i = 0; i < 640; i++) {
+        for (i = 0; i < 641; i++) {
             udelay(5);
             mdic = E1000_READ_REG(hw, MDIC);
-            if(mdic & E1000_MDIC_READY) break;
+            if (mdic & E1000_MDIC_READY) break;
         }
-        if(!(mdic & E1000_MDIC_READY)) {
+        if (!(mdic & E1000_MDIC_READY)) {
             DEBUGOUT("MDI Write did not complete\n");
             return -E1000_ERR_PHY;
         }
@@ -3685,7 +3703,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
 
     DEBUGOUT("Resetting Phy...\n");
 
-    if(hw->mac_type > e1000_82543) {
+    if (hw->mac_type > e1000_82543) {
         if ((hw->mac_type == e1000_80003es2lan) &&
             (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
             swfw = E1000_SWFW_PHY1_SM;
@@ -3733,7 +3751,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
     }
     udelay(150);
 
-    if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
         /* Configure activity LED after PHY reset */
         led_ctrl = E1000_READ_REG(hw, LEDCTL);
         led_ctrl &= IGP_ACTIVITY_LED_MASK;
@@ -3743,14 +3761,13 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
 
     /* Wait for FW to finish PHY configuration. */
     ret_val = e1000_get_phy_cfg_done(hw);
+    if (ret_val != E1000_SUCCESS)
+        return ret_val;
     e1000_release_software_semaphore(hw);
 
-        if ((hw->mac_type == e1000_ich8lan) &&
-            (hw->phy_type == e1000_phy_igp_3)) {
-            ret_val = e1000_init_lcd_from_nvm(hw);
-            if (ret_val)
-                return ret_val;
-        }
+    if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
+        ret_val = e1000_init_lcd_from_nvm(hw);
+
     return ret_val;
 }
 
@@ -3781,25 +3798,25 @@ e1000_phy_reset(struct e1000_hw *hw)
     case e1000_82572:
     case e1000_ich8lan:
         ret_val = e1000_phy_hw_reset(hw);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         break;
     default:
         ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data |= MII_CR_RESET;
         ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         udelay(1);
         break;
     }
 
-    if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+    if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
         e1000_phy_init_script(hw);
 
     return E1000_SUCCESS;
@@ -3877,8 +3894,8 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
     if (hw->kmrn_lock_loss_workaround_disabled)
         return E1000_SUCCESS;
 
-    /* Make sure link is up before proceeding. If not just return.
-     * Attempting this while link is negotiating fouls up link
+    /* Make sure link is up before proceeding.  If not just return.
+     * Attempting this while link is negotiating fouled up link
      * stability */
     ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
     ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
@@ -3955,34 +3972,34 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
     hw->phy_id = (uint32_t) (phy_id_high << 16);
     udelay(20);
     ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK);
     hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82543:
-        if(hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
         break;
     case e1000_82544:
-        if(hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
         break;
     case e1000_82540:
     case e1000_82545:
     case e1000_82545_rev_3:
     case e1000_82546:
     case e1000_82546_rev_3:
-        if(hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
         break;
     case e1000_82541:
     case e1000_82541_rev_2:
     case e1000_82547:
     case e1000_82547_rev_2:
-        if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
         break;
     case e1000_82573:
-        if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
+        if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
         break;
     case e1000_80003es2lan:
         if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
@@ -4021,14 +4038,14 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
     do {
         if (hw->phy_type != e1000_phy_gg82563) {
             ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
-            if(ret_val) break;
+            if (ret_val) break;
         }
         ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
-        if(ret_val) break;
+        if (ret_val) break;
         ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
-        if(ret_val) break;
+        if (ret_val) break;
         ret_val = E1000_SUCCESS;
-    } while(0);
+    } while (0);
 
     return ret_val;
 }
@@ -4060,23 +4077,23 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
 
     /* Check polarity status */
     ret_val = e1000_check_polarity(hw, &polarity);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_info->cable_polarity = polarity;
 
     ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >>
                           IGP01E1000_PSSR_MDIX_SHIFT;
 
-    if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+    if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
        IGP01E1000_PSSR_SPEED_1000MBPS) {
         /* Local/Remote Receiver Information are only valid at 1000 Mbps */
         ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
@@ -4086,19 +4103,19 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
 
         /* Get cable length */
         ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         /* Translate to old method */
         average = (max_length + min_length) / 2;
 
-        if(average <= e1000_igp_cable_length_50)
+        if (average <= e1000_igp_cable_length_50)
             phy_info->cable_length = e1000_cable_length_50;
-        else if(average <= e1000_igp_cable_length_80)
+        else if (average <= e1000_igp_cable_length_80)
             phy_info->cable_length = e1000_cable_length_50_80;
-        else if(average <= e1000_igp_cable_length_110)
+        else if (average <= e1000_igp_cable_length_110)
             phy_info->cable_length = e1000_cable_length_80_110;
-        else if(average <= e1000_igp_cable_length_140)
+        else if (average <= e1000_igp_cable_length_140)
             phy_info->cable_length = e1000_cable_length_110_140;
         else
             phy_info->cable_length = e1000_cable_length_140;
@@ -4174,7 +4191,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
     phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
 
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_info->extended_10bt_distance =
@@ -4186,12 +4203,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
 
     /* Check polarity status */
     ret_val = e1000_check_polarity(hw, &polarity);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
     phy_info->cable_polarity = polarity;
 
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >>
@@ -4214,7 +4231,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
         }
 
         ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
@@ -4251,20 +4268,20 @@ e1000_phy_get_info(struct e1000_hw *hw,
     phy_info->local_rx = e1000_1000t_rx_status_undefined;
     phy_info->remote_rx = e1000_1000t_rx_status_undefined;
 
-    if(hw->media_type != e1000_media_type_copper) {
+    if (hw->media_type != e1000_media_type_copper) {
         DEBUGOUT("PHY info is only valid for copper media\n");
         return -E1000_ERR_CONFIG;
     }
 
     ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
-    if((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+    if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
         DEBUGOUT("PHY info is only valid if link is up\n");
         return -E1000_ERR_CONFIG;
     }
@@ -4284,7 +4301,7 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
 {
     DEBUGFUNC("e1000_validate_mdi_settings");
 
-    if(!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+    if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
         DEBUGOUT("Invalid MDI setting detected\n");
         hw->mdix = 1;
         return -E1000_ERR_CONFIG;
@@ -4331,7 +4348,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
         eeprom->type = e1000_eeprom_microwire;
         eeprom->opcode_bits = 3;
         eeprom->delay_usec = 50;
-        if(eecd & E1000_EECD_SIZE) {
+        if (eecd & E1000_EECD_SIZE) {
             eeprom->word_size = 256;
             eeprom->address_bits = 8;
         } else {
@@ -4399,7 +4416,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
         }
         eeprom->use_eerd = TRUE;
         eeprom->use_eewr = TRUE;
-        if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+        if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
             eeprom->type = e1000_eeprom_flash;
             eeprom->word_size = 2048;
 
@@ -4460,17 +4477,17 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
         /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
          * 32KB (incremented by powers of 2).
          */
-        if(hw->mac_type <= e1000_82547_rev_2) {
+        if (hw->mac_type <= e1000_82547_rev_2) {
             /* Set to default value for initial eeprom read. */
             eeprom->word_size = 64;
             ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
             eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
             /* 256B eeprom size was not supported in earlier hardware, so we
              * bump eeprom_size up one to ensure that "1" (which maps to 256B)
              * is never the result used in the shifting logic below. */
-            if(eeprom_size)
+            if (eeprom_size)
                 eeprom_size++;
         } else {
             eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >>
@@ -4555,7 +4572,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
          */
         eecd &= ~E1000_EECD_DI;
 
-        if(data & mask)
+        if (data & mask)
             eecd |= E1000_EECD_DI;
 
         E1000_WRITE_REG(hw, EECD, eecd);
@@ -4568,7 +4585,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
 
         mask = mask >> 1;
 
-    } while(mask);
+    } while (mask);
 
     /* We leave the "DI" bit set to "0" when we leave this routine. */
     eecd &= ~E1000_EECD_DI;
@@ -4600,14 +4617,14 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
     eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
     data = 0;
 
-    for(i = 0; i < count; i++) {
+    for (i = 0; i < count; i++) {
         data = data << 1;
         e1000_raise_ee_clk(hw, &eecd);
 
         eecd = E1000_READ_REG(hw, EECD);
 
         eecd &= ~(E1000_EECD_DI);
-        if(eecd & E1000_EECD_DO)
+        if (eecd & E1000_EECD_DO)
             data |= 1;
 
         e1000_lower_ee_clk(hw, &eecd);
@@ -4638,17 +4655,17 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
 
     if (hw->mac_type != e1000_82573) {
         /* Request EEPROM Access */
-        if(hw->mac_type > e1000_82544) {
+        if (hw->mac_type > e1000_82544) {
             eecd |= E1000_EECD_REQ;
             E1000_WRITE_REG(hw, EECD, eecd);
             eecd = E1000_READ_REG(hw, EECD);
-            while((!(eecd & E1000_EECD_GNT)) &&
+            while ((!(eecd & E1000_EECD_GNT)) &&
                   (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
                 i++;
                 udelay(5);
                 eecd = E1000_READ_REG(hw, EECD);
             }
-            if(!(eecd & E1000_EECD_GNT)) {
+            if (!(eecd & E1000_EECD_GNT)) {
                 eecd &= ~E1000_EECD_REQ;
                 E1000_WRITE_REG(hw, EECD, eecd);
                 DEBUGOUT("Could not acquire EEPROM grant\n");
@@ -4691,7 +4708,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
 
     eecd = E1000_READ_REG(hw, EECD);
 
-    if(eeprom->type == e1000_eeprom_microwire) {
+    if (eeprom->type == e1000_eeprom_microwire) {
         eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
         E1000_WRITE_REG(hw, EECD, eecd);
         E1000_WRITE_FLUSH(hw);
@@ -4714,7 +4731,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
         E1000_WRITE_REG(hw, EECD, eecd);
         E1000_WRITE_FLUSH(hw);
         udelay(eeprom->delay_usec);
-    } else if(eeprom->type == e1000_eeprom_spi) {
+    } else if (eeprom->type == e1000_eeprom_spi) {
         /* Toggle CS to flush commands */
         eecd |= E1000_EECD_CS;
         E1000_WRITE_REG(hw, EECD, eecd);
@@ -4748,7 +4765,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
         E1000_WRITE_REG(hw, EECD, eecd);
 
         udelay(hw->eeprom.delay_usec);
-    } else if(hw->eeprom.type == e1000_eeprom_microwire) {
+    } else if (hw->eeprom.type == e1000_eeprom_microwire) {
         /* cleanup eeprom */
 
         /* CS on Microwire is active-high */
@@ -4770,7 +4787,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
     }
 
     /* Stop requesting EEPROM access */
-    if(hw->mac_type > e1000_82544) {
+    if (hw->mac_type > e1000_82544) {
         eecd &= ~E1000_EECD_REQ;
         E1000_WRITE_REG(hw, EECD, eecd);
     }
@@ -4808,12 +4825,12 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
         retry_count += 5;
 
         e1000_standby_eeprom(hw);
-    } while(retry_count < EEPROM_MAX_RETRY_SPI);
+    } while (retry_count < EEPROM_MAX_RETRY_SPI);
 
     /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
      * only 0-5mSec on 5V devices)
      */
-    if(retry_count >= EEPROM_MAX_RETRY_SPI) {
+    if (retry_count >= EEPROM_MAX_RETRY_SPI) {
         DEBUGOUT("SPI EEPROM Status error\n");
         return -E1000_ERR_EEPROM;
     }
@@ -4844,7 +4861,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
     /* A check for invalid values:  offset too large, too many words, and not
      * enough words.
      */
-    if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
        (words == 0)) {
         DEBUGOUT("\"words\" parameter out of bounds\n");
         return -E1000_ERR_EEPROM;
@@ -4852,7 +4869,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
 
     /* FLASH reads without acquiring the semaphore are safe */
     if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
-    hw->eeprom.use_eerd == FALSE) {
+        hw->eeprom.use_eerd == FALSE) {
         switch (hw->mac_type) {
         case e1000_80003es2lan:
             break;
@@ -4879,7 +4896,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
         uint16_t word_in;
         uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
 
-        if(e1000_spi_eeprom_ready(hw)) {
+        if (e1000_spi_eeprom_ready(hw)) {
             e1000_release_eeprom(hw);
             return -E1000_ERR_EEPROM;
         }
@@ -4887,7 +4904,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
         e1000_standby_eeprom(hw);
 
         /* Some SPI eeproms use the 8th address bit embedded in the opcode */
-        if((eeprom->address_bits == 8) && (offset >= 128))
+        if ((eeprom->address_bits == 8) && (offset >= 128))
             read_opcode |= EEPROM_A8_OPCODE_SPI;
 
         /* Send the READ command (opcode + addr)  */
@@ -4903,7 +4920,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
             word_in = e1000_shift_in_ee_bits(hw, 16);
             data[i] = (word_in >> 8) | (word_in << 8);
         }
-    } else if(eeprom->type == e1000_eeprom_microwire) {
+    } else if (eeprom->type == e1000_eeprom_microwire) {
         for (i = 0; i < words; i++) {
             /* Send the READ command (opcode + addr)  */
             e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
@@ -4948,7 +4965,7 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
         E1000_WRITE_REG(hw, EERD, eerd);
         error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
 
-        if(error) {
+        if (error) {
             break;
         }
         data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
@@ -4985,7 +5002,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
                          E1000_EEPROM_RW_REG_START;
 
         error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
-        if(error) {
+        if (error) {
             break;
         }
 
@@ -4993,7 +5010,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
 
         error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
 
-        if(error) {
+        if (error) {
             break;
         }
     }
@@ -5014,13 +5031,13 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
     uint32_t i, reg = 0;
     int32_t done = E1000_ERR_EEPROM;
 
-    for(i = 0; i < attempts; i++) {
-        if(eerd == E1000_EEPROM_POLL_READ)
+    for (i = 0; i < attempts; i++) {
+        if (eerd == E1000_EEPROM_POLL_READ)
             reg = E1000_READ_REG(hw, EERD);
         else
             reg = E1000_READ_REG(hw, EEWR);
 
-        if(reg & E1000_EEPROM_RW_REG_DONE) {
+        if (reg & E1000_EEPROM_RW_REG_DONE) {
             done = E1000_SUCCESS;
             break;
         }
@@ -5052,7 +5069,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
         eecd = ((eecd >> 15) & 0x03);
 
         /* If both bits are set, device is Flash type */
-        if(eecd == 0x03) {
+        if (eecd == 0x03) {
             return FALSE;
         }
     }
@@ -5117,7 +5134,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
         checksum += eeprom_data;
     }
 
-    if(checksum == (uint16_t) EEPROM_SUM)
+    if (checksum == (uint16_t) EEPROM_SUM)
         return E1000_SUCCESS;
     else {
         DEBUGOUT("EEPROM Checksum Invalid\n");
@@ -5142,15 +5159,15 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_update_eeprom_checksum");
 
-    for(i = 0; i < EEPROM_CHECKSUM_REG; i++) {
-        if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+    for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
             DEBUGOUT("EEPROM Read Error\n");
             return -E1000_ERR_EEPROM;
         }
         checksum += eeprom_data;
     }
     checksum = (uint16_t) EEPROM_SUM - checksum;
-    if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+    if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
         DEBUGOUT("EEPROM Write Error\n");
         return -E1000_ERR_EEPROM;
     } else if (hw->eeprom.type == e1000_eeprom_flash) {
@@ -5192,14 +5209,14 @@ e1000_write_eeprom(struct e1000_hw *hw,
     /* A check for invalid values:  offset too large, too many words, and not
      * enough words.
      */
-    if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
        (words == 0)) {
         DEBUGOUT("\"words\" parameter out of bounds\n");
         return -E1000_ERR_EEPROM;
     }
 
     /* 82573 writes only through eewr */
-    if(eeprom->use_eewr == TRUE)
+    if (eeprom->use_eewr == TRUE)
         return e1000_write_eeprom_eewr(hw, offset, words, data);
 
     if (eeprom->type == e1000_eeprom_ich8)
@@ -5209,7 +5226,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
     if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
         return -E1000_ERR_EEPROM;
 
-    if(eeprom->type == e1000_eeprom_microwire) {
+    if (eeprom->type == e1000_eeprom_microwire) {
         status = e1000_write_eeprom_microwire(hw, offset, words, data);
     } else {
         status = e1000_write_eeprom_spi(hw, offset, words, data);
@@ -5245,7 +5262,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
     while (widx < words) {
         uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI;
 
-        if(e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+        if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
 
         e1000_standby_eeprom(hw);
 
@@ -5256,7 +5273,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
         e1000_standby_eeprom(hw);
 
         /* Some SPI eeproms use the 8th address bit embedded in the opcode */
-        if((eeprom->address_bits == 8) && (offset >= 128))
+        if ((eeprom->address_bits == 8) && (offset >= 128))
             write_opcode |= EEPROM_A8_OPCODE_SPI;
 
         /* Send the Write command (8-bit opcode + addr) */
@@ -5278,7 +5295,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
              * operation, while the smaller eeproms are capable of an 8-byte
              * PAGE WRITE operation.  Break the inner loop to pass new address
              */
-            if((((offset + widx)*2) % eeprom->page_size) == 0) {
+            if ((((offset + widx)*2) % eeprom->page_size) == 0) {
                 e1000_standby_eeprom(hw);
                 break;
             }
@@ -5344,12 +5361,12 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
          * signal that the command has been completed by raising the DO signal.
          * If DO does not go high in 10 milliseconds, then error out.
          */
-        for(i = 0; i < 200; i++) {
+        for (i = 0; i < 200; i++) {
             eecd = E1000_READ_REG(hw, EECD);
-            if(eecd & E1000_EECD_DO) break;
+            if (eecd & E1000_EECD_DO) break;
             udelay(50);
         }
-        if(i == 200) {
+        if (i == 200) {
             DEBUGOUT("EEPROM Write did not complete\n");
             return -E1000_ERR_EEPROM;
         }
@@ -5539,40 +5556,6 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
     return error;
 }
 
-/******************************************************************************
- * Reads the adapter's part number from the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- * part_num - Adapter's part number
- *****************************************************************************/
-int32_t
-e1000_read_part_num(struct e1000_hw *hw,
-                    uint32_t *part_num)
-{
-    uint16_t offset = EEPROM_PBA_BYTE_1;
-    uint16_t eeprom_data;
-
-    DEBUGFUNC("e1000_read_part_num");
-
-    /* Get word 0 from EEPROM */
-    if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
-        DEBUGOUT("EEPROM Read Error\n");
-        return -E1000_ERR_EEPROM;
-    }
-    /* Save word 0 in upper half of part_num */
-    *part_num = (uint32_t) (eeprom_data << 16);
-
-    /* Get word 1 from EEPROM */
-    if(e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) {
-        DEBUGOUT("EEPROM Read Error\n");
-        return -E1000_ERR_EEPROM;
-    }
-    /* Save word 1 in lower half of part_num */
-    *part_num |= eeprom_data;
-
-    return E1000_SUCCESS;
-}
-
 /******************************************************************************
  * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
  * second function of dual function devices
@@ -5587,9 +5570,9 @@ e1000_read_mac_addr(struct e1000_hw * hw)
 
     DEBUGFUNC("e1000_read_mac_addr");
 
-    for(i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+    for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
         offset = i >> 1;
-        if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+        if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
             DEBUGOUT("EEPROM Read Error\n");
             return -E1000_ERR_EEPROM;
         }
@@ -5604,12 +5587,12 @@ e1000_read_mac_addr(struct e1000_hw * hw)
     case e1000_82546_rev_3:
     case e1000_82571:
     case e1000_80003es2lan:
-        if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+        if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
             hw->perm_mac_addr[5] ^= 0x01;
         break;
     }
 
-    for(i = 0; i < NODE_ADDRESS_SIZE; i++)
+    for (i = 0; i < NODE_ADDRESS_SIZE; i++)
         hw->mac_addr[i] = hw->perm_mac_addr[i];
     return E1000_SUCCESS;
 }
@@ -5648,7 +5631,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
 
     /* Zero out the other 15 receive addresses. */
     DEBUGOUT("Clearing RAR[1-15]\n");
-    for(i = 1; i < rar_num; i++) {
+    for (i = 1; i < rar_num; i++) {
         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
         E1000_WRITE_FLUSH(hw);
         E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -5699,7 +5682,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
     if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
         num_rar_entry -= 1;
 
-    for(i = rar_used_count; i < num_rar_entry; i++) {
+    for (i = rar_used_count; i < num_rar_entry; i++) {
         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
         E1000_WRITE_FLUSH(hw);
         E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -5711,13 +5694,13 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
     num_mta_entry = E1000_NUM_MTA_REGISTERS;
     if (hw->mac_type == e1000_ich8lan)
         num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
-    for(i = 0; i < num_mta_entry; i++) {
+    for (i = 0; i < num_mta_entry; i++) {
         E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
         E1000_WRITE_FLUSH(hw);
     }
 
     /* Add the new addresses */
-    for(i = 0; i < mc_addr_count; i++) {
+    for (i = 0; i < mc_addr_count; i++) {
         DEBUGOUT(" Adding the multicast addresses:\n");
         DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
                   mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)],
@@ -5849,7 +5832,7 @@ e1000_mta_set(struct e1000_hw *hw,
      * in the MTA, save off the previous entry before writing and
      * restore the old value after writing.
      */
-    if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+    if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
         temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
         E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
         E1000_WRITE_FLUSH(hw);
@@ -5999,7 +5982,7 @@ e1000_id_led_init(struct e1000_hw * hw)
 
     DEBUGFUNC("e1000_id_led_init");
 
-    if(hw->mac_type < e1000_82540) {
+    if (hw->mac_type < e1000_82540) {
         /* Nothing to do */
         return E1000_SUCCESS;
     }
@@ -6009,7 +5992,7 @@ e1000_id_led_init(struct e1000_hw * hw)
     hw->ledctl_mode1 = hw->ledctl_default;
     hw->ledctl_mode2 = hw->ledctl_default;
 
-    if(e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+    if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
         DEBUGOUT("EEPROM Read Error\n");
         return -E1000_ERR_EEPROM;
     }
@@ -6026,7 +6009,7 @@ e1000_id_led_init(struct e1000_hw * hw)
     }
     for (i = 0; i < 4; i++) {
         temp = (eeprom_data >> (i << 2)) & led_mask;
-        switch(temp) {
+        switch (temp) {
         case ID_LED_ON1_DEF2:
         case ID_LED_ON1_ON2:
         case ID_LED_ON1_OFF2:
@@ -6043,7 +6026,7 @@ e1000_id_led_init(struct e1000_hw * hw)
             /* Do nothing */
             break;
         }
-        switch(temp) {
+        switch (temp) {
         case ID_LED_DEF1_ON2:
         case ID_LED_ON1_ON2:
         case ID_LED_OFF1_ON2:
@@ -6077,7 +6060,7 @@ e1000_setup_led(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_setup_led");
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82542_rev2_0:
     case e1000_82542_rev2_1:
     case e1000_82543:
@@ -6091,16 +6074,16 @@ e1000_setup_led(struct e1000_hw *hw)
         /* Turn off PHY Smart Power Down (if enabled) */
         ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
                                      &hw->phy_spd_default);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
                                       (uint16_t)(hw->phy_spd_default &
                                       ~IGP01E1000_GMII_SPD));
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         /* Fall Through */
     default:
-        if(hw->media_type == e1000_media_type_fiber) {
+        if (hw->media_type == e1000_media_type_fiber) {
             ledctl = E1000_READ_REG(hw, LEDCTL);
             /* Save current LEDCTL settings */
             hw->ledctl_default = ledctl;
@@ -6111,7 +6094,7 @@ e1000_setup_led(struct e1000_hw *hw)
             ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
                        E1000_LEDCTL_LED0_MODE_SHIFT);
             E1000_WRITE_REG(hw, LEDCTL, ledctl);
-        } else if(hw->media_type == e1000_media_type_copper)
+        } else if (hw->media_type == e1000_media_type_copper)
             E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
         break;
     }
@@ -6119,6 +6102,7 @@ e1000_setup_led(struct e1000_hw *hw)
     return E1000_SUCCESS;
 }
 
+
 /******************************************************************************
  * Used on 82571 and later Si that has LED blink bits.
  * Callers must use their own timer and should have already called
@@ -6169,7 +6153,7 @@ e1000_cleanup_led(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_cleanup_led");
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82542_rev2_0:
     case e1000_82542_rev2_1:
     case e1000_82543:
@@ -6183,7 +6167,7 @@ e1000_cleanup_led(struct e1000_hw *hw)
         /* Turn on PHY Smart Power Down (if previously enabled) */
         ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
                                       hw->phy_spd_default);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         /* Fall Through */
     default:
@@ -6211,7 +6195,7 @@ e1000_led_on(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_led_on");
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82542_rev2_0:
     case e1000_82542_rev2_1:
     case e1000_82543:
@@ -6220,7 +6204,7 @@ e1000_led_on(struct e1000_hw *hw)
         ctrl |= E1000_CTRL_SWDPIO0;
         break;
     case e1000_82544:
-        if(hw->media_type == e1000_media_type_fiber) {
+        if (hw->media_type == e1000_media_type_fiber) {
             /* Set SW Defineable Pin 0 to turn on the LED */
             ctrl |= E1000_CTRL_SWDPIN0;
             ctrl |= E1000_CTRL_SWDPIO0;
@@ -6231,7 +6215,7 @@ e1000_led_on(struct e1000_hw *hw)
         }
         break;
     default:
-        if(hw->media_type == e1000_media_type_fiber) {
+        if (hw->media_type == e1000_media_type_fiber) {
             /* Clear SW Defineable Pin 0 to turn on the LED */
             ctrl &= ~E1000_CTRL_SWDPIN0;
             ctrl |= E1000_CTRL_SWDPIO0;
@@ -6262,7 +6246,7 @@ e1000_led_off(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_led_off");
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82542_rev2_0:
     case e1000_82542_rev2_1:
     case e1000_82543:
@@ -6271,7 +6255,7 @@ e1000_led_off(struct e1000_hw *hw)
         ctrl |= E1000_CTRL_SWDPIO0;
         break;
     case e1000_82544:
-        if(hw->media_type == e1000_media_type_fiber) {
+        if (hw->media_type == e1000_media_type_fiber) {
             /* Clear SW Defineable Pin 0 to turn off the LED */
             ctrl &= ~E1000_CTRL_SWDPIN0;
             ctrl |= E1000_CTRL_SWDPIO0;
@@ -6282,7 +6266,7 @@ e1000_led_off(struct e1000_hw *hw)
         }
         break;
     default:
-        if(hw->media_type == e1000_media_type_fiber) {
+        if (hw->media_type == e1000_media_type_fiber) {
             /* Set SW Defineable Pin 0 to turn off the LED */
             ctrl |= E1000_CTRL_SWDPIN0;
             ctrl |= E1000_CTRL_SWDPIO0;
@@ -6306,7 +6290,7 @@ e1000_led_off(struct e1000_hw *hw)
  *
  * hw - Struct containing variables accessed by shared code
  *****************************************************************************/
-static void
+void
 e1000_clear_hw_cntrs(struct e1000_hw *hw)
 {
     volatile uint32_t temp;
@@ -6369,7 +6353,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
     temp = E1000_READ_REG(hw, MPTC);
     temp = E1000_READ_REG(hw, BPTC);
 
-    if(hw->mac_type < e1000_82543) return;
+    if (hw->mac_type < e1000_82543) return;
 
     temp = E1000_READ_REG(hw, ALGNERRC);
     temp = E1000_READ_REG(hw, RXERRC);
@@ -6378,13 +6362,13 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
     temp = E1000_READ_REG(hw, TSCTC);
     temp = E1000_READ_REG(hw, TSCTFC);
 
-    if(hw->mac_type <= e1000_82544) return;
+    if (hw->mac_type <= e1000_82544) return;
 
     temp = E1000_READ_REG(hw, MGTPRC);
     temp = E1000_READ_REG(hw, MGTPDC);
     temp = E1000_READ_REG(hw, MGTPTC);
 
-    if(hw->mac_type <= e1000_82547_rev_2) return;
+    if (hw->mac_type <= e1000_82547_rev_2) return;
 
     temp = E1000_READ_REG(hw, IAC);
     temp = E1000_READ_REG(hw, ICRXOC);
@@ -6415,8 +6399,8 @@ e1000_reset_adaptive(struct e1000_hw *hw)
 {
     DEBUGFUNC("e1000_reset_adaptive");
 
-    if(hw->adaptive_ifs) {
-        if(!hw->ifs_params_forced) {
+    if (hw->adaptive_ifs) {
+        if (!hw->ifs_params_forced) {
             hw->current_ifs_val = 0;
             hw->ifs_min_val = IFS_MIN;
             hw->ifs_max_val = IFS_MAX;
@@ -6443,12 +6427,12 @@ e1000_update_adaptive(struct e1000_hw *hw)
 {
     DEBUGFUNC("e1000_update_adaptive");
 
-    if(hw->adaptive_ifs) {
-        if((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
-            if(hw->tx_packet_delta > MIN_NUM_XMITS) {
+    if (hw->adaptive_ifs) {
+        if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+            if (hw->tx_packet_delta > MIN_NUM_XMITS) {
                 hw->in_ifs_mode = TRUE;
-                if(hw->current_ifs_val < hw->ifs_max_val) {
-                    if(hw->current_ifs_val == 0)
+                if (hw->current_ifs_val < hw->ifs_max_val) {
+                    if (hw->current_ifs_val == 0)
                         hw->current_ifs_val = hw->ifs_min_val;
                     else
                         hw->current_ifs_val += hw->ifs_step_size;
@@ -6456,7 +6440,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
                 }
             }
         } else {
-            if(hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+            if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
                 hw->current_ifs_val = 0;
                 hw->in_ifs_mode = FALSE;
                 E1000_WRITE_REG(hw, AIT, 0);
@@ -6503,46 +6487,46 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
      * This could be simplified if all environments supported
      * 64-bit integers.
      */
-    if(carry_bit && ((stats->gorcl & 0x80000000) == 0))
+    if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
         stats->gorch++;
     /* Is this a broadcast or multicast?  Check broadcast first,
      * since the test for a multicast frame will test positive on
      * a broadcast frame.
      */
-    if((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
+    if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
         /* Broadcast packet */
         stats->bprc++;
-    else if(*mac_addr & 0x01)
+    else if (*mac_addr & 0x01)
         /* Multicast packet */
         stats->mprc++;
 
-    if(frame_len == hw->max_frame_size) {
+    if (frame_len == hw->max_frame_size) {
         /* In this case, the hardware has overcounted the number of
          * oversize frames.
          */
-        if(stats->roc > 0)
+        if (stats->roc > 0)
             stats->roc--;
     }
 
     /* Adjust the bin counters when the extra byte put the frame in the
      * wrong bin. Remember that the frame_len was adjusted above.
      */
-    if(frame_len == 64) {
+    if (frame_len == 64) {
         stats->prc64++;
         stats->prc127--;
-    } else if(frame_len == 127) {
+    } else if (frame_len == 127) {
         stats->prc127++;
         stats->prc255--;
-    } else if(frame_len == 255) {
+    } else if (frame_len == 255) {
         stats->prc255++;
         stats->prc511--;
-    } else if(frame_len == 511) {
+    } else if (frame_len == 511) {
         stats->prc511++;
         stats->prc1023--;
-    } else if(frame_len == 1023) {
+    } else if (frame_len == 1023) {
         stats->prc1023++;
         stats->prc1522--;
-    } else if(frame_len == 1522) {
+    } else if (frame_len == 1522) {
         stats->prc1522++;
     }
 }
@@ -6582,10 +6566,10 @@ e1000_get_bus_info(struct e1000_hw *hw)
         hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
                        e1000_bus_type_pcix : e1000_bus_type_pci;
 
-        if(hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+        if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
             hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
                             e1000_bus_speed_66 : e1000_bus_speed_120;
-        } else if(hw->bus_type == e1000_bus_type_pci) {
+        } else if (hw->bus_type == e1000_bus_type_pci) {
             hw->bus_speed = (status & E1000_STATUS_PCI66) ?
                             e1000_bus_speed_66 : e1000_bus_speed_33;
         } else {
@@ -6680,11 +6664,11 @@ e1000_get_cable_length(struct e1000_hw *hw,
     *min_length = *max_length = 0;
 
     /* Use old method for Phy older than IGP */
-    if(hw->phy_type == e1000_phy_m88) {
+    if (hw->phy_type == e1000_phy_m88) {
 
         ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
                                      &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
                        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
@@ -6743,7 +6727,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
             return -E1000_ERR_PHY;
             break;
         }
-    } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+    } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
         uint16_t cur_agc_value;
         uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
         uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
@@ -6752,10 +6736,10 @@ e1000_get_cable_length(struct e1000_hw *hw,
                                                           IGP01E1000_PHY_AGC_C,
                                                           IGP01E1000_PHY_AGC_D};
         /* Read the AGC registers for all channels */
-        for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
 
             ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
@@ -6805,7 +6789,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
             if (ret_val)
                 return ret_val;
 
-           /* Getting bits 15:9, which represent the combination of course and
+            /* Getting bits 15:9, which represent the combination of course and
              * fine gain values.  The result is a number that can be put into
              * the lookup table to obtain the approximate cable length. */
             cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
@@ -6870,7 +6854,7 @@ e1000_check_polarity(struct e1000_hw *hw,
         /* return the Polarity bit in the Status register. */
         ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
                                      &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
         *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
                     M88E1000_PSSR_REV_POLARITY_SHIFT;
@@ -6880,18 +6864,18 @@ e1000_check_polarity(struct e1000_hw *hw,
         /* Read the Status register to check the speed */
         ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
                                      &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
          * find the polarity status */
-        if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+        if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
            IGP01E1000_PSSR_SPEED_1000MBPS) {
 
             /* Read the GIG initialization PCS register (0x00B4) */
             ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
                                          &phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             /* Check the polarity bits */
@@ -6940,7 +6924,7 @@ e1000_check_downshift(struct e1000_hw *hw)
         hw->phy_type == e1000_phy_igp_2) {
         ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
                                      &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
@@ -6948,7 +6932,7 @@ e1000_check_downshift(struct e1000_hw *hw)
                (hw->phy_type == e1000_phy_gg82563)) {
         ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
                                      &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
@@ -6988,42 +6972,42 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
 
     DEBUGFUNC("e1000_config_dsp_after_link_change");
 
-    if(hw->phy_type != e1000_phy_igp)
+    if (hw->phy_type != e1000_phy_igp)
         return E1000_SUCCESS;
 
-    if(link_up) {
+    if (link_up) {
         ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
-        if(ret_val) {
+        if (ret_val) {
             DEBUGOUT("Error getting link speed and duplex\n");
             return ret_val;
         }
 
-        if(speed == SPEED_1000) {
+        if (speed == SPEED_1000) {
 
             ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
             if (ret_val)
                 return ret_val;
 
-            if((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+            if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
                 min_length >= e1000_igp_cable_length_50) {
 
-                for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
                     ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
                                                  &phy_data);
-                    if(ret_val)
+                    if (ret_val)
                         return ret_val;
 
                     phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
 
                     ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
                                                   phy_data);
-                    if(ret_val)
+                    if (ret_val)
                         return ret_val;
                 }
                 hw->dsp_config_state = e1000_dsp_config_activated;
             }
 
-            if((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+            if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
                (min_length < e1000_igp_cable_length_50)) {
 
                 uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
@@ -7032,70 +7016,70 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
                 /* clear previous idle error counts */
                 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
                                              &phy_data);
-                if(ret_val)
+                if (ret_val)
                     return ret_val;
 
-                for(i = 0; i < ffe_idle_err_timeout; i++) {
+                for (i = 0; i < ffe_idle_err_timeout; i++) {
                     udelay(1000);
                     ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
                                                  &phy_data);
-                    if(ret_val)
+                    if (ret_val)
                         return ret_val;
 
                     idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
-                    if(idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+                    if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
                         hw->ffe_config_state = e1000_ffe_config_active;
 
                         ret_val = e1000_write_phy_reg(hw,
                                     IGP01E1000_PHY_DSP_FFE,
                                     IGP01E1000_PHY_DSP_FFE_CM_CP);
-                        if(ret_val)
+                        if (ret_val)
                             return ret_val;
                         break;
                     }
 
-                    if(idle_errs)
+                    if (idle_errs)
                         ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
                 }
             }
         }
     } else {
-        if(hw->dsp_config_state == e1000_dsp_config_activated) {
+        if (hw->dsp_config_state == e1000_dsp_config_activated) {
             /* Save off the current value of register 0x2F5B to be restored at
              * the end of the routines. */
             ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
 
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             /* Disable the PHY transmitter */
             ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
 
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             msec_delay_irq(20);
 
             ret_val = e1000_write_phy_reg(hw, 0x0000,
                                           IGP01E1000_IEEE_FORCE_GIGA);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
-            for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+            for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
                 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
-                if(ret_val)
+                if (ret_val)
                     return ret_val;
 
                 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
                 phy_data |=  IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
 
                 ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
-                if(ret_val)
+                if (ret_val)
                     return ret_val;
             }
 
             ret_val = e1000_write_phy_reg(hw, 0x0000,
                                           IGP01E1000_IEEE_RESTART_AUTONEG);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             msec_delay_irq(20);
@@ -7103,40 +7087,40 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
             /* Now enable the transmitter */
             ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
 
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             hw->dsp_config_state = e1000_dsp_config_enabled;
         }
 
-        if(hw->ffe_config_state == e1000_ffe_config_active) {
+        if (hw->ffe_config_state == e1000_ffe_config_active) {
             /* Save off the current value of register 0x2F5B to be restored at
              * the end of the routines. */
             ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
 
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             /* Disable the PHY transmitter */
             ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
 
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             msec_delay_irq(20);
 
             ret_val = e1000_write_phy_reg(hw, 0x0000,
                                           IGP01E1000_IEEE_FORCE_GIGA);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
             ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
                                           IGP01E1000_PHY_DSP_FFE_DEFAULT);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             ret_val = e1000_write_phy_reg(hw, 0x0000,
                                           IGP01E1000_IEEE_RESTART_AUTONEG);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             msec_delay_irq(20);
@@ -7144,7 +7128,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
             /* Now enable the transmitter */
             ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
 
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             hw->ffe_config_state = e1000_ffe_config_enabled;
@@ -7169,20 +7153,20 @@ e1000_set_phy_mode(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_set_phy_mode");
 
-    if((hw->mac_type == e1000_82545_rev_3) &&
-       (hw->media_type == e1000_media_type_copper)) {
+    if ((hw->mac_type == e1000_82545_rev_3) &&
+        (hw->media_type == e1000_media_type_copper)) {
         ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
-        if(ret_val) {
+        if (ret_val) {
             return ret_val;
         }
 
-        if((eeprom_data != EEPROM_RESERVED_WORD) &&
-           (eeprom_data & EEPROM_PHY_CLASS_A)) {
+        if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+            (eeprom_data & EEPROM_PHY_CLASS_A)) {
             ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
             ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             hw->phy_reset_disable = FALSE;
@@ -7233,16 +7217,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
         phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
     } else {
         ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     }
 
-    if(!active) {
-        if(hw->mac_type == e1000_82541_rev_2 ||
-           hw->mac_type == e1000_82547_rev_2) {
+    if (!active) {
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
             phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
             ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         } else {
             if (hw->mac_type == e1000_ich8lan) {
@@ -7264,13 +7248,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
         if (hw->smart_speed == e1000_smart_speed_on) {
             ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
                                          &phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
             ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
                                           phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         } else if (hw->smart_speed == e1000_smart_speed_off) {
             ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
@@ -7281,19 +7265,19 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
             phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
             ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
                                           phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         }
 
-    } else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
-              (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
-              (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+    } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
 
-        if(hw->mac_type == e1000_82541_rev_2 ||
+        if (hw->mac_type == e1000_82541_rev_2 ||
             hw->mac_type == e1000_82547_rev_2) {
             phy_data |= IGP01E1000_GMII_FLEX_SPD;
             ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         } else {
             if (hw->mac_type == e1000_ich8lan) {
@@ -7310,12 +7294,12 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
 
         /* When LPLU is enabled we should disable SmartSpeed */
         ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
         ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
     }
@@ -7345,14 +7329,14 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
     uint16_t phy_data;
     DEBUGFUNC("e1000_set_d0_lplu_state");
 
-    if(hw->mac_type <= e1000_82547_rev_2)
+    if (hw->mac_type <= e1000_82547_rev_2)
         return E1000_SUCCESS;
 
     if (hw->mac_type == e1000_ich8lan) {
         phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
     } else {
         ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
     }
 
@@ -7374,13 +7358,13 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
         if (hw->smart_speed == e1000_smart_speed_on) {
             ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
                                          &phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
 
             phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
             ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
                                           phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         } else if (hw->smart_speed == e1000_smart_speed_off) {
             ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
@@ -7391,7 +7375,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
             phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
             ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
                                           phy_data);
-            if(ret_val)
+            if (ret_val)
                 return ret_val;
         }
 
@@ -7410,12 +7394,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
 
         /* When LPLU is enabled we should disable SmartSpeed */
         ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
         ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
     }
@@ -7436,7 +7420,7 @@ e1000_set_vco_speed(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_set_vco_speed");
 
-    switch(hw->mac_type) {
+    switch (hw->mac_type) {
     case e1000_82545_rev_3:
     case e1000_82546_rev_3:
        break;
@@ -7447,39 +7431,39 @@ e1000_set_vco_speed(struct e1000_hw *hw)
     /* Set PHY register 30, page 5, bit 8 to 0 */
 
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     /* Set PHY register 30, page 4, bit 11 to 1 */
 
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     phy_data |= M88E1000_PHY_VCO_REG_BIT11;
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     return E1000_SUCCESS;
@@ -7558,7 +7542,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
 {
     uint8_t *tmp;
     uint8_t *bufptr = buffer;
-    uint32_t data;
+    uint32_t data = 0;
     uint16_t remaining, i, j, prev_bytes;
 
     /* sum = only sum of the data and it is not checksum */
@@ -7638,7 +7622,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
 
     buffer = (uint8_t *) hdr;
     i = length;
-    while(i--)
+    while (i--)
         sum += buffer[i];
 
     hdr->checksum = 0 - sum;
@@ -7661,8 +7645,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
  * returns  - E1000_SUCCESS for success.
  ****************************************************************************/
 static int32_t
-e1000_mng_write_commit(
-    struct e1000_hw * hw)
+e1000_mng_write_commit(struct e1000_hw * hw)
 {
     uint32_t hicr;
 
@@ -7834,31 +7817,31 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
     /* Disable the transmitter on the PHY */
 
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     /* This loop will early-out if the NO link condition has been met. */
-    for(i = PHY_FORCE_TIME; i > 0; i--) {
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
         /* Read the MII Status Register and wait for Link Status bit
          * to be clear.
          */
 
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
-        if((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+        if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
         msec_delay_irq(100);
     }
 
@@ -7868,40 +7851,40 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
     /* Now we will re-enable th transmitter on the PHY */
 
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
     msec_delay_irq(50);
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
     msec_delay_irq(50);
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
     msec_delay_irq(50);
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
-    if(ret_val)
+    if (ret_val)
         return ret_val;
 
     /* This loop will early-out if the link condition has been met. */
-    for(i = PHY_FORCE_TIME; i > 0; i--) {
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
         /* Read the MII Status Register and wait for Link Status bit
          * to be set.
          */
 
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
         ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if(ret_val)
+        if (ret_val)
             return ret_val;
 
-        if(mii_status_reg & MII_SR_LINK_STATUS) break;
+        if (mii_status_reg & MII_SR_LINK_STATUS) break;
         msec_delay_irq(100);
     }
     return E1000_SUCCESS;
@@ -7980,15 +7963,15 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
 
     e1000_set_pci_express_master_disable(hw);
 
-    while(timeout) {
-        if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+    while (timeout) {
+        if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
             break;
         else
             udelay(100);
         timeout--;
     }
 
-    if(!timeout) {
+    if (!timeout) {
         DEBUGOUT("Master requests are pending.\n");
         return -E1000_ERR_MASTER_REQUESTS_PENDING;
     }
@@ -8029,7 +8012,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
             timeout--;
         }
 
-        if(!timeout) {
+        if (!timeout) {
             DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
             return -E1000_ERR_RESET;
         }
@@ -8110,7 +8093,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
 
-    if(!hw->eeprom_semaphore_present)
+    if (!hw->eeprom_semaphore_present)
         return E1000_SUCCESS;
 
     if (hw->mac_type == e1000_80003es2lan) {
@@ -8121,20 +8104,20 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
 
     /* Get the FW semaphore. */
     timeout = hw->eeprom.word_size + 1;
-    while(timeout) {
+    while (timeout) {
         swsm = E1000_READ_REG(hw, SWSM);
         swsm |= E1000_SWSM_SWESMBI;
         E1000_WRITE_REG(hw, SWSM, swsm);
         /* if we managed to set the bit we got the semaphore. */
         swsm = E1000_READ_REG(hw, SWSM);
-        if(swsm & E1000_SWSM_SWESMBI)
+        if (swsm & E1000_SWSM_SWESMBI)
             break;
 
         udelay(50);
         timeout--;
     }
 
-    if(!timeout) {
+    if (!timeout) {
         /* Release semaphores */
         e1000_put_hw_eeprom_semaphore(hw);
         DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
@@ -8159,7 +8142,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
 
-    if(!hw->eeprom_semaphore_present)
+    if (!hw->eeprom_semaphore_present)
         return;
 
     swsm = E1000_READ_REG(hw, SWSM);
@@ -8192,16 +8175,16 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
     if (hw->mac_type != e1000_80003es2lan)
         return E1000_SUCCESS;
 
-    while(timeout) {
+    while (timeout) {
         swsm = E1000_READ_REG(hw, SWSM);
         /* If SMBI bit cleared, it is now set and we hold the semaphore */
-        if(!(swsm & E1000_SWSM_SMBI))
+        if (!(swsm & E1000_SWSM_SMBI))
             break;
         msec_delay_irq(1);
         timeout--;
     }
 
-    if(!timeout) {
+    if (!timeout) {
         DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
         return -E1000_ERR_RESET;
     }
@@ -8277,7 +8260,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
     case e1000_82573:
     case e1000_80003es2lan:
         fwsm = E1000_READ_REG(hw, FWSM);
-        if((fwsm & E1000_FWSM_MODE_MASK) != 0)
+        if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
             return TRUE;
         break;
     case e1000_ich8lan:
index 375b955..a170e96 100644 (file)
@@ -336,9 +336,9 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
 #define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8   /* Host Interface data length */
 
 #define E1000_MNG_DHCP_COMMAND_TIMEOUT  10      /* Time in ms to process MNG command */
-#define E1000_MNG_DHCP_COOKIE_OFFSET   0x6F0   /* Cookie offset */
-#define E1000_MNG_DHCP_COOKIE_LENGTH   0x10    /* Cookie length */
-#define E1000_MNG_IAMT_MODE            0x3
+#define E1000_MNG_DHCP_COOKIE_OFFSET    0x6F0   /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH    0x10    /* Cookie length */
+#define E1000_MNG_IAMT_MODE             0x3
 #define E1000_MNG_ICH_IAMT_MODE         0x2
 #define E1000_IAMT_SIGNATURE            0x544D4149 /* Intel(R) Active Management Technology signature */
 
@@ -385,7 +385,7 @@ struct e1000_host_mng_dhcp_cookie{
 #endif
 
 int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
-                                                       uint16_t length);
+                                  uint16_t length);
 boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
 boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
 
@@ -470,6 +470,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
 #define E1000_DEV_ID_82571EB_COPPER      0x105E
 #define E1000_DEV_ID_82571EB_FIBER       0x105F
 #define E1000_DEV_ID_82571EB_SERDES      0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
 #define E1000_DEV_ID_82572EI_COPPER      0x107D
 #define E1000_DEV_ID_82572EI_FIBER       0x107E
 #define E1000_DEV_ID_82572EI_SERDES      0x107F
@@ -523,7 +524,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
 
 
 /* 802.1q VLAN Packet Sizes */
-#define VLAN_TAG_SIZE                     4     /* 802.3ac tag (not DMAed) */
+#define VLAN_TAG_SIZE  4     /* 802.3ac tag (not DMAed) */
 
 /* Ethertype field values */
 #define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
@@ -697,6 +698,7 @@ union e1000_rx_desc_packet_split {
     E1000_RXDEXT_STATERR_CXE |            \
     E1000_RXDEXT_STATERR_RXE)
 
+
 /* Transmit Descriptor */
 struct e1000_tx_desc {
     uint64_t buffer_addr;       /* Address of the descriptor's data buffer */
@@ -2086,7 +2088,7 @@ struct e1000_hw {
 #define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000 /* Enable IP address
                                                     * filtering */
 #define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
-#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_BR_EN         0x01000000 /* Enable broadcast filtering */
 #define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
 #define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
 #define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
@@ -2172,7 +2174,7 @@ struct e1000_host_command_info {
 
 #define E1000_MDALIGN          4096
 
-/* PCI-Ex registers */
+/* PCI-Ex registers*/
 
 /* PCI-Ex Control Register */
 #define E1000_GCR_RXD_NO_SNOOP          0x00000001
@@ -2224,7 +2226,7 @@ struct e1000_host_command_info {
 #define EEPROM_EWDS_OPCODE_MICROWIRE  0x10 /* EEPROM erast/write disable */
 
 /* EEPROM Commands - SPI */
-#define EEPROM_MAX_RETRY_SPI    5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_MAX_RETRY_SPI        5000 /* Max wait of 5ms, for RDY signal */
 #define EEPROM_READ_OPCODE_SPI      0x03  /* EEPROM read opcode */
 #define EEPROM_WRITE_OPCODE_SPI     0x02  /* EEPROM write opcode */
 #define EEPROM_A8_OPCODE_SPI        0x08  /* opcode bit-3 = address bit-8 */
@@ -3082,10 +3084,10 @@ struct e1000_host_command_info {
 
 /* DSP Distance Register (Page 5, Register 26) */
 #define GG82563_DSPD_CABLE_LENGTH               0x0007 /* 0 = <50M;
-                                                             1 = 50-80M;
-                                                             2 = 80-110M;
-                                                             3 = 110-140M;
-                                                             4 = >140M */
+                                                          1 = 50-80M;
+                                                          2 = 80-110M;
+                                                          3 = 110-140M;
+                                                          4 = >140M */
 
 /* Kumeran Mode Control Register (Page 193, Register 16) */
 #define GG82563_KMCR_PHY_LEDS_EN                    0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
index 98ef9f8..dece183 100644 (file)
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
+#define DRV_VERSION "7.2.7-k2"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -48,7 +48,6 @@ static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  */
 static struct pci_device_id e1000_pci_tbl[] = {
-       INTEL_E1000_ETHERNET_DEVICE(0x1000),
        INTEL_E1000_ETHERNET_DEVICE(0x1001),
        INTEL_E1000_ETHERNET_DEVICE(0x1004),
        INTEL_E1000_ETHERNET_DEVICE(0x1008),
@@ -99,6 +98,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x1098),
        INTEL_E1000_ETHERNET_DEVICE(0x1099),
        INTEL_E1000_ETHERNET_DEVICE(0x109A),
+       INTEL_E1000_ETHERNET_DEVICE(0x10A4),
        INTEL_E1000_ETHERNET_DEVICE(0x10B5),
        INTEL_E1000_ETHERNET_DEVICE(0x10B9),
        INTEL_E1000_ETHERNET_DEVICE(0x10BA),
@@ -245,7 +245,7 @@ e1000_init_module(void)
 
        printk(KERN_INFO "%s\n", e1000_copyright);
 
-       ret = pci_module_init(&e1000_driver);
+       ret = pci_register_driver(&e1000_driver);
 
        return ret;
 }
@@ -485,7 +485,7 @@ e1000_up(struct e1000_adapter *adapter)
  *
  **/
 
-static void e1000_power_up_phy(struct e1000_adapter *adapter)
+void e1000_power_up_phy(struct e1000_adapter *adapter)
 {
        uint16_t mii_reg = 0;
 
@@ -682,9 +682,9 @@ e1000_probe(struct pci_dev *pdev,
        unsigned long flash_start, flash_len;
 
        static int cards_found = 0;
-       static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
+       static int global_quad_port_a = 0; /* global ksp3 port a indication */
        int i, err, pci_using_dac;
-       uint16_t eeprom_data;
+       uint16_t eeprom_data = 0;
        uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
        if ((err = pci_enable_device(pdev)))
                return err;
@@ -696,21 +696,20 @@ e1000_probe(struct pci_dev *pdev,
                if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
                    (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
                        E1000_ERR("No usable DMA configuration, aborting\n");
-                       return err;
+                       goto err_dma;
                }
                pci_using_dac = 0;
        }
 
        if ((err = pci_request_regions(pdev, e1000_driver_name)))
-               return err;
+               goto err_pci_reg;
 
        pci_set_master(pdev);
 
+       err = -ENOMEM;
        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
-       if (!netdev) {
-               err = -ENOMEM;
+       if (!netdev)
                goto err_alloc_etherdev;
-       }
 
        SET_MODULE_OWNER(netdev);
        SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -725,11 +724,10 @@ e1000_probe(struct pci_dev *pdev,
        mmio_start = pci_resource_start(pdev, BAR_0);
        mmio_len = pci_resource_len(pdev, BAR_0);
 
+       err = -EIO;
        adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
-       if (!adapter->hw.hw_addr) {
-               err = -EIO;
+       if (!adapter->hw.hw_addr)
                goto err_ioremap;
-       }
 
        for (i = BAR_1; i <= BAR_5; i++) {
                if (pci_resource_len(pdev, i) == 0)
@@ -774,6 +772,7 @@ e1000_probe(struct pci_dev *pdev,
        if ((err = e1000_sw_init(adapter)))
                goto err_sw_init;
 
+       err = -EIO;
        /* Flash BAR mapping must happen after e1000_sw_init
         * because it depends on mac_type */
        if ((adapter->hw.mac_type == e1000_ich8lan) &&
@@ -781,24 +780,13 @@ e1000_probe(struct pci_dev *pdev,
                flash_start = pci_resource_start(pdev, 1);
                flash_len = pci_resource_len(pdev, 1);
                adapter->hw.flash_address = ioremap(flash_start, flash_len);
-               if (!adapter->hw.flash_address) {
-                       err = -EIO;
+               if (!adapter->hw.flash_address)
                        goto err_flashmap;
-               }
        }
 
-       if ((err = e1000_check_phy_reset_block(&adapter->hw)))
+       if (e1000_check_phy_reset_block(&adapter->hw))
                DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
 
-       /* if ksp3, indicate if it's port a being setup */
-       if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
-                       e1000_ksp3_port_a == 0)
-               adapter->ksp3_port_a = 1;
-       e1000_ksp3_port_a++;
-       /* Reset for multiple KP3 adapters */
-       if (e1000_ksp3_port_a == 4)
-               e1000_ksp3_port_a = 0;
-
        if (adapter->hw.mac_type >= e1000_82543) {
                netdev->features = NETIF_F_SG |
                                   NETIF_F_HW_CSUM |
@@ -830,7 +818,7 @@ e1000_probe(struct pci_dev *pdev,
 
        if (e1000_init_eeprom_params(&adapter->hw)) {
                E1000_ERR("EEPROM initialization failed\n");
-               return -EIO;
+               goto err_eeprom;
        }
 
        /* before reading the EEPROM, reset the controller to
@@ -842,7 +830,6 @@ e1000_probe(struct pci_dev *pdev,
 
        if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
                DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
-               err = -EIO;
                goto err_eeprom;
        }
 
@@ -855,12 +842,9 @@ e1000_probe(struct pci_dev *pdev,
 
        if (!is_valid_ether_addr(netdev->perm_addr)) {
                DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
-               err = -EIO;
                goto err_eeprom;
        }
 
-       e1000_read_part_num(&adapter->hw, &(adapter->part_num));
-
        e1000_get_bus_info(&adapter->hw);
 
        init_timer(&adapter->tx_fifo_stall_timer);
@@ -921,7 +905,38 @@ e1000_probe(struct pci_dev *pdev,
                break;
        }
        if (eeprom_data & eeprom_apme_mask)
-               adapter->wol |= E1000_WUFC_MAG;
+               adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+       /* now that we have the eeprom settings, apply the special cases
+        * where the eeprom may be wrong or the board simply won't support
+        * wake on lan on a particular port */
+       switch (pdev->device) {
+       case E1000_DEV_ID_82546GB_PCIE:
+               adapter->eeprom_wol = 0;
+               break;
+       case E1000_DEV_ID_82546EB_FIBER:
+       case E1000_DEV_ID_82546GB_FIBER:
+       case E1000_DEV_ID_82571EB_FIBER:
+               /* Wake events only supported on port A for dual fiber
+                * regardless of eeprom setting */
+               if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
+                       adapter->eeprom_wol = 0;
+               break;
+       case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+       case E1000_DEV_ID_82571EB_QUAD_COPPER:
+               /* if quad port adapter, disable WoL on all but port A */
+               if (global_quad_port_a != 0)
+                       adapter->eeprom_wol = 0;
+               else
+                       adapter->quad_port_a = 1;
+               /* Reset for multiple quad port adapters */
+               if (++global_quad_port_a == 4)
+                       global_quad_port_a = 0;
+               break;
+       }
+
+       /* initialize the wol settings based on the eeprom settings */
+       adapter->wol = adapter->eeprom_wol;
 
        /* print bus type/speed/width info */
        {
@@ -964,16 +979,33 @@ e1000_probe(struct pci_dev *pdev,
        return 0;
 
 err_register:
+       e1000_release_hw_control(adapter);
+err_eeprom:
+       if (!e1000_check_phy_reset_block(&adapter->hw))
+               e1000_phy_hw_reset(&adapter->hw);
+
        if (adapter->hw.flash_address)
                iounmap(adapter->hw.flash_address);
 err_flashmap:
+#ifdef CONFIG_E1000_NAPI
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               dev_put(&adapter->polling_netdev[i]);
+#endif
+
+       kfree(adapter->tx_ring);
+       kfree(adapter->rx_ring);
+#ifdef CONFIG_E1000_NAPI
+       kfree(adapter->polling_netdev);
+#endif
 err_sw_init:
-err_eeprom:
        iounmap(adapter->hw.hw_addr);
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+       pci_disable_device(pdev);
        return err;
 }
 
@@ -1208,7 +1240,7 @@ e1000_open(struct net_device *netdev)
 
        err = e1000_request_irq(adapter);
        if (err)
-               goto err_up;
+               goto err_req_irq;
 
        e1000_power_up_phy(adapter);
 
@@ -1229,6 +1261,9 @@ e1000_open(struct net_device *netdev)
        return E1000_SUCCESS;
 
 err_up:
+       e1000_power_down_phy(adapter);
+       e1000_free_irq(adapter);
+err_req_irq:
        e1000_free_all_rx_resources(adapter);
 err_setup_rx:
        e1000_free_all_tx_resources(adapter);
@@ -1381,10 +1416,6 @@ setup_tx_desc_die:
  *                               (Descriptors) for all queues
  * @adapter: board private structure
  *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
  * Return 0 on success, negative on failure
  **/
 
@@ -1398,6 +1429,9 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
                if (err) {
                        DPRINTK(PROBE, ERR,
                                "Allocation for Tx Queue %u failed\n", i);
+                       for (i-- ; i >= 0; i--)
+                               e1000_free_tx_resources(adapter,
+                                                       &adapter->tx_ring[i]);
                        break;
                }
        }
@@ -1499,8 +1533,6 @@ e1000_configure_tx(struct e1000_adapter *adapter)
        } else if (hw->mac_type == e1000_80003es2lan) {
                tarc = E1000_READ_REG(hw, TARC0);
                tarc |= 1;
-               if (hw->media_type == e1000_media_type_internal_serdes)
-                       tarc |= (1 << 20);
                E1000_WRITE_REG(hw, TARC0, tarc);
                tarc = E1000_READ_REG(hw, TARC1);
                tarc |= 1;
@@ -1639,10 +1671,6 @@ setup_rx_desc_die:
  *                               (Descriptors) for all queues
  * @adapter: board private structure
  *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
  * Return 0 on success, negative on failure
  **/
 
@@ -1656,6 +1684,9 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
                if (err) {
                        DPRINTK(PROBE, ERR,
                                "Allocation for Rx Queue %u failed\n", i);
+                       for (i-- ; i >= 0; i--)
+                               e1000_free_rx_resources(adapter,
+                                                       &adapter->rx_ring[i]);
                        break;
                }
        }
@@ -2442,10 +2473,9 @@ e1000_watchdog(unsigned long data)
                         * disable receives in the ISR and
                         * reset device here in the watchdog
                         */
-                       if (adapter->hw.mac_type == e1000_80003es2lan) {
+                       if (adapter->hw.mac_type == e1000_80003es2lan)
                                /* reset device */
                                schedule_work(&adapter->reset_task);
-                       }
                }
 
                e1000_smartspeed(adapter);
@@ -2545,7 +2575,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb->h.raw - skb->data - 1;
 #ifdef NETIF_F_TSO_IPV6
-               } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
+               } else if (skb->protocol == htons(ETH_P_IPV6)) {
                        skb->nh.ipv6h->payload_len = 0;
                        skb->h.th->check =
                                ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -3680,7 +3710,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                        E1000_DBG("%s: Receive packet consumed multiple"
                                  " buffers\n", netdev->name);
                        /* recycle */
-                       buffer_info-> skb = skb;
+                       buffer_info->skb = skb;
                        goto next_desc;
                }
 
@@ -3711,7 +3741,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                            netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
                        if (new_skb) {
                                skb_reserve(new_skb, NET_IP_ALIGN);
-                               new_skb->dev = netdev;
                                memcpy(new_skb->data - NET_IP_ALIGN,
                                       skb->data - NET_IP_ALIGN,
                                       length + NET_IP_ALIGN);
@@ -3978,13 +4007,13 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
        buffer_info = &rx_ring->buffer_info[i];
 
        while (cleaned_count--) {
-               if (!(skb = buffer_info->skb))
-                       skb = netdev_alloc_skb(netdev, bufsz);
-               else {
+               skb = buffer_info->skb;
+               if (skb) {
                        skb_trim(skb, 0);
                        goto map_skb;
                }
 
+               skb = netdev_alloc_skb(netdev, bufsz);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
@@ -4009,10 +4038,10 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                                dev_kfree_skb(skb);
                                dev_kfree_skb(oldskb);
                                break; /* while !buffer_info->skb */
-                       } else {
-                               /* Use new allocation */
-                               dev_kfree_skb(oldskb);
                        }
+
+                       /* Use new allocation */
+                       dev_kfree_skb(oldskb);
                }
                /* Make buffer alignment 2 beyond a 16 byte boundary
                 * this will result in a 16 byte aligned IP header after
@@ -4020,8 +4049,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                 */
                skb_reserve(skb, NET_IP_ALIGN);
 
-               skb->dev = netdev;
-
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
 map_skb:
@@ -4135,8 +4162,6 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                 */
                skb_reserve(skb, NET_IP_ALIGN);
 
-               skb->dev = netdev;
-
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_ps_bsize0;
                buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -4628,7 +4653,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
                e1000_set_multi(netdev);
 
                /* turn on all-multi mode if wake on multicast is enabled */
-               if (adapter->wol & E1000_WUFC_MC) {
+               if (wufc & E1000_WUFC_MC) {
                        rctl = E1000_READ_REG(&adapter->hw, RCTL);
                        rctl |= E1000_RCTL_MPE;
                        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
@@ -4700,11 +4725,14 @@ e1000_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       uint32_t manc, ret_val;
+       uint32_t manc, err;
 
        pci_set_power_state(pdev, PCI_D0);
        e1000_pci_restore_state(adapter);
-       ret_val = pci_enable_device(pdev);
+       if ((err = pci_enable_device(pdev))) {
+               printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+               return err;
+       }
        pci_set_master(pdev);
 
        pci_enable_wake(pdev, PCI_D3hot, 0);
index 0ef4131..2128427 100644 (file)
@@ -324,7 +324,6 @@ e1000_check_options(struct e1000_adapter *adapter)
                DPRINTK(PROBE, NOTICE,
                       "Warning: no configuration for board #%i\n", bd);
                DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
-               bd = E1000_MAX_NIC;
        }
 
        { /* Transmit Descriptor Count */
@@ -342,9 +341,14 @@ e1000_check_options(struct e1000_adapter *adapter)
                opt.arg.r.max = mac_type < e1000_82544 ?
                        E1000_MAX_TXD : E1000_MAX_82544_TXD;
 
-               tx_ring->count = TxDescriptors[bd];
-               e1000_validate_option(&tx_ring->count, &opt, adapter);
-               E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+               if (num_TxDescriptors > bd) {
+                       tx_ring->count = TxDescriptors[bd];
+                       e1000_validate_option(&tx_ring->count, &opt, adapter);
+                       E1000_ROUNDUP(tx_ring->count,
+                                               REQ_TX_DESCRIPTOR_MULTIPLE);
+               } else {
+                       tx_ring->count = opt.def;
+               }
                for (i = 0; i < adapter->num_tx_queues; i++)
                        tx_ring[i].count = tx_ring->count;
        }
@@ -363,9 +367,14 @@ e1000_check_options(struct e1000_adapter *adapter)
                opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
                        E1000_MAX_82544_RXD;
 
-               rx_ring->count = RxDescriptors[bd];
-               e1000_validate_option(&rx_ring->count, &opt, adapter);
-               E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+               if (num_RxDescriptors > bd) {
+                       rx_ring->count = RxDescriptors[bd];
+                       e1000_validate_option(&rx_ring->count, &opt, adapter);
+                       E1000_ROUNDUP(rx_ring->count,
+                                               REQ_RX_DESCRIPTOR_MULTIPLE);
+               } else {
+                       rx_ring->count = opt.def;
+               }
                for (i = 0; i < adapter->num_rx_queues; i++)
                        rx_ring[i].count = rx_ring->count;
        }
@@ -377,9 +386,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                        .def  = OPTION_ENABLED
                };
 
-               int rx_csum = XsumRX[bd];
-               e1000_validate_option(&rx_csum, &opt, adapter);
-               adapter->rx_csum = rx_csum;
+               if (num_XsumRX > bd) {
+                       int rx_csum = XsumRX[bd];
+                       e1000_validate_option(&rx_csum, &opt, adapter);
+                       adapter->rx_csum = rx_csum;
+               } else {
+                       adapter->rx_csum = opt.def;
+               }
        }
        { /* Flow Control */
 
@@ -399,9 +412,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                                         .p = fc_list }}
                };
 
-               int fc = FlowControl[bd];
-               e1000_validate_option(&fc, &opt, adapter);
-               adapter->hw.fc = adapter->hw.original_fc = fc;
+               if (num_FlowControl > bd) {
+                       int fc = FlowControl[bd];
+                       e1000_validate_option(&fc, &opt, adapter);
+                       adapter->hw.fc = adapter->hw.original_fc = fc;
+               } else {
+                       adapter->hw.fc = adapter->hw.original_fc = opt.def;
+               }
        }
        { /* Transmit Interrupt Delay */
                struct e1000_option opt = {
@@ -413,8 +430,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                                         .max = MAX_TXDELAY }}
                };
 
-               adapter->tx_int_delay = TxIntDelay[bd];
-               e1000_validate_option(&adapter->tx_int_delay, &opt, adapter);
+               if (num_TxIntDelay > bd) {
+                       adapter->tx_int_delay = TxIntDelay[bd];
+                       e1000_validate_option(&adapter->tx_int_delay, &opt,
+                                             adapter);
+               } else {
+                       adapter->tx_int_delay = opt.def;
+               }
        }
        { /* Transmit Absolute Interrupt Delay */
                struct e1000_option opt = {
@@ -426,9 +448,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                                         .max = MAX_TXABSDELAY }}
                };
 
-               adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
-               e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
-                                     adapter);
+               if (num_TxAbsIntDelay > bd) {
+                       adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+                       e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+                                             adapter);
+               } else {
+                       adapter->tx_abs_int_delay = opt.def;
+               }
        }
        { /* Receive Interrupt Delay */
                struct e1000_option opt = {
@@ -440,8 +466,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                                         .max = MAX_RXDELAY }}
                };
 
-               adapter->rx_int_delay = RxIntDelay[bd];
-               e1000_validate_option(&adapter->rx_int_delay, &opt, adapter);
+               if (num_RxIntDelay > bd) {
+                       adapter->rx_int_delay = RxIntDelay[bd];
+                       e1000_validate_option(&adapter->rx_int_delay, &opt,
+                                             adapter);
+               } else {
+                       adapter->rx_int_delay = opt.def;
+               }
        }
        { /* Receive Absolute Interrupt Delay */
                struct e1000_option opt = {
@@ -453,9 +484,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                                         .max = MAX_RXABSDELAY }}
                };
 
-               adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
-               e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
-                                     adapter);
+               if (num_RxAbsIntDelay > bd) {
+                       adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+                       e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+                                             adapter);
+               } else {
+                       adapter->rx_abs_int_delay = opt.def;
+               }
        }
        { /* Interrupt Throttling Rate */
                struct e1000_option opt = {
@@ -467,18 +502,24 @@ e1000_check_options(struct e1000_adapter *adapter)
                                         .max = MAX_ITR }}
                };
 
-               adapter->itr = InterruptThrottleRate[bd];
-               switch (adapter->itr) {
-               case 0:
-                       DPRINTK(PROBE, INFO, "%s turned off\n", opt.name);
-                       break;
-               case 1:
-                       DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
-                               opt.name);
-                       break;
-               default:
-                       e1000_validate_option(&adapter->itr, &opt, adapter);
-                       break;
+               if (num_InterruptThrottleRate > bd) {
+                       adapter->itr = InterruptThrottleRate[bd];
+                       switch (adapter->itr) {
+                       case 0:
+                               DPRINTK(PROBE, INFO, "%s turned off\n",
+                                       opt.name);
+                               break;
+                       case 1:
+                               DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+                                       opt.name);
+                               break;
+                       default:
+                               e1000_validate_option(&adapter->itr, &opt,
+                                                     adapter);
+                               break;
+                       }
+               } else {
+                       adapter->itr = opt.def;
                }
        }
        { /* Smart Power Down */
@@ -489,9 +530,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                        .def  = OPTION_DISABLED
                };
 
-               int spd = SmartPowerDownEnable[bd];
-               e1000_validate_option(&spd, &opt, adapter);
-               adapter->smart_power_down = spd;
+               if (num_SmartPowerDownEnable > bd) {
+                       int spd = SmartPowerDownEnable[bd];
+                       e1000_validate_option(&spd, &opt, adapter);
+                       adapter->smart_power_down = spd;
+               } else {
+                       adapter->smart_power_down = opt.def;
+               }
        }
        { /* Kumeran Lock Loss Workaround */
                struct e1000_option opt = {
@@ -501,9 +546,13 @@ e1000_check_options(struct e1000_adapter *adapter)
                        .def  = OPTION_ENABLED
                };
 
+               if (num_KumeranLockLoss > bd) {
                        int kmrn_lock_loss = KumeranLockLoss[bd];
                        e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
                        adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+               } else {
+                       adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+               }
        }
 
        switch (adapter->hw.media_type) {
@@ -530,18 +579,17 @@ static void __devinit
 e1000_check_fiber_options(struct e1000_adapter *adapter)
 {
        int bd = adapter->bd_number;
-       bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
-       if ((Speed[bd] != OPTION_UNSET)) {
+       if (num_Speed > bd) {
                DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
                       "parameter ignored\n");
        }
 
-       if ((Duplex[bd] != OPTION_UNSET)) {
+       if (num_Duplex > bd) {
                DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
                       "parameter ignored\n");
        }
 
-       if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
+       if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
                DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
                                 "not valid for fiber adapters, "
                                 "parameter ignored\n");
@@ -560,7 +608,6 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
 {
        int speed, dplx, an;
        int bd = adapter->bd_number;
-       bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
 
        { /* Speed */
                struct e1000_opt_list speed_list[] = {{          0, "" },
@@ -577,8 +624,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
                                         .p = speed_list }}
                };
 
-               speed = Speed[bd];
-               e1000_validate_option(&speed, &opt, adapter);
+               if (num_Speed > bd) {
+                       speed = Speed[bd];
+                       e1000_validate_option(&speed, &opt, adapter);
+               } else {
+                       speed = opt.def;
+               }
        }
        { /* Duplex */
                struct e1000_opt_list dplx_list[] = {{           0, "" },
@@ -600,11 +651,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
                                "Speed/Duplex/AutoNeg parameter ignored.\n");
                        return;
                }
-               dplx = Duplex[bd];
-               e1000_validate_option(&dplx, &opt, adapter);
+               if (num_Duplex > bd) {
+                       dplx = Duplex[bd];
+                       e1000_validate_option(&dplx, &opt, adapter);
+               } else {
+                       dplx = opt.def;
+               }
        }
 
-       if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
+       if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
                DPRINTK(PROBE, INFO,
                       "AutoNeg specified along with Speed or Duplex, "
                       "parameter ignored\n");
@@ -653,15 +708,19 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
                                         .p = an_list }}
                };
 
-               an = AutoNeg[bd];
-               e1000_validate_option(&an, &opt, adapter);
+               if (num_AutoNeg > bd) {
+                       an = AutoNeg[bd];
+                       e1000_validate_option(&an, &opt, adapter);
+               } else {
+                       an = opt.def;
+               }
                adapter->hw.autoneg_advertised = an;
        }
 
        switch (speed + dplx) {
        case 0:
                adapter->hw.autoneg = adapter->fc_autoneg = 1;
-               if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
+               if ((num_Speed > bd) && (speed != 0 || dplx != 0))
                        DPRINTK(PROBE, INFO,
                               "Speed and duplex autonegotiation enabled\n");
                break;
index 8dc61d6..bf9efa7 100644 (file)
@@ -154,7 +154,7 @@ static const char version[] =
 #include <asm/dma.h>
 
 #define DRV_NAME "eepro"
-#define DRV_VERSION "0.13b"
+#define DRV_VERSION "0.13c"
 
 #define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) )
 /* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */
@@ -1333,7 +1333,6 @@ set_multicast_list(struct net_device *dev)
                mode = inb(ioaddr + REG3);
                outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
                eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
-               printk(KERN_INFO "%s: promiscuous mode enabled.\n", dev->name);
        }
 
        else if (dev->mc_count==0 )
index e445988..a3d515d 100644 (file)
@@ -2385,7 +2385,7 @@ static int __init eepro100_init_module(void)
 #ifdef MODULE
        printk(version);
 #endif
-       return pci_module_init(&eepro100_driver);
+       return pci_register_driver(&eepro100_driver);
 }
 
 static void __exit eepro100_cleanup_module(void)
index a67650c..b885b20 100644 (file)
@@ -26,8 +26,8 @@
 */
 
 #define DRV_NAME        "epic100"
-#define DRV_VERSION     "2.0"
-#define DRV_RELDATE     "June 27, 2006"
+#define DRV_VERSION     "2.1"
+#define DRV_RELDATE     "Sept 11, 2006"
 
 /* The user-configurable values.
    These may be modified when a driver module is loaded.*/
@@ -1386,7 +1386,6 @@ static void set_rx_mode(struct net_device *dev)
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
                outl(0x002C, ioaddr + RxCtrl);
                /* Unconditionally log net taps. */
-               printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
                memset(mc_filter, 0xff, sizeof(mc_filter));
        } else if ((dev->mc_count > 0)  ||  (dev->flags & IFF_ALLMULTI)) {
                /* There is apparently a chip bug, so the multicast filter
@@ -1604,7 +1603,7 @@ static int __init epic_init (void)
                version, version2, version3);
 #endif
 
-       return pci_module_init (&epic_driver);
+       return pci_register_driver(&epic_driver);
 }
 
 
index 567e274..56f81a2 100644 (file)
@@ -25,8 +25,8 @@
 */
 
 #define DRV_NAME       "fealnx"
-#define DRV_VERSION    "2.51"
-#define DRV_RELDATE    "Nov-17-2001"
+#define DRV_VERSION    "2.52"
+#define DRV_RELDATE    "Sep-11-2006"
 
 static int debug;              /* 1-> print debug message */
 static int max_interrupt_work = 20;
@@ -1800,8 +1800,6 @@ static void __set_rx_mode(struct net_device *dev)
        u32 rx_mode;
 
        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                memset(mc_filter, 0xff, sizeof(mc_filter));
                rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
        } else if ((dev->mc_count > multicast_filter_limit)
@@ -1984,7 +1982,7 @@ static int __init fealnx_init(void)
        printk(version);
 #endif
 
-       return pci_module_init(&fealnx_driver);
+       return pci_register_driver(&fealnx_driver);
 }
 
 static void __exit fealnx_exit(void)
index 9b40300..9eedb27 100644 (file)
@@ -2227,8 +2227,6 @@ static void set_multicast_list(struct net_device *dev)
        ep = fep->hwp;
 
        if (dev->flags&IFF_PROMISC) {
-               /* Log any net taps. */
-               printk("%s: Promiscuous mode enabled.\n", dev->name);
                ep->fec_r_cntrl |= 0x0008;
        } else {
 
index 11b8f1b..59f9a51 100644 (file)
  *     0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
  *     0.55: 22 Mar 2006: Add flow control (pause frame).
  *     0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
+ *     0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#define FORCEDETH_VERSION              "0.56"
+#ifdef CONFIG_FORCEDETH_NAPI
+#define DRIVERNAPI "-NAPI"
+#else
+#define DRIVERNAPI
+#endif
+#define FORCEDETH_VERSION              "0.57"
 #define DRV_NAME                       "forcedeth"
 
 #include <linux/module.h>
@@ -262,7 +268,8 @@ enum {
        NvRegRingSizes = 0x108,
 #define NVREG_RINGSZ_TXSHIFT 0
 #define NVREG_RINGSZ_RXSHIFT 16
-       NvRegUnknownTransmitterReg = 0x10c,
+       NvRegTransmitPoll = 0x10c,
+#define NVREG_TRANSMITPOLL_MAC_ADDR_REV        0x00008000
        NvRegLinkSpeed = 0x110,
 #define NVREG_LINKSPEED_FORCE 0x10000
 #define NVREG_LINKSPEED_10     1000
@@ -381,21 +388,21 @@ enum {
 
 /* Big endian: should work, but is untested */
 struct ring_desc {
-       u32 PacketBuffer;
-       u32 FlagLen;
+       __le32 buf;
+       __le32 flaglen;
 };
 
 struct ring_desc_ex {
-       u32 PacketBufferHigh;
-       u32 PacketBufferLow;
-       u32 TxVlan;
-       u32 FlagLen;
+       __le32 bufhigh;
+       __le32 buflow;
+       __le32 txvlan;
+       __le32 flaglen;
 };
 
-typedef union _ring_type {
+union ring_type {
        struct ring_desc* orig;
        struct ring_desc_ex* ex;
-} ring_type;
+};
 
 #define FLAG_MASK_V1 0xffff0000
 #define FLAG_MASK_V2 0xffffc000
@@ -536,6 +543,9 @@ typedef union _ring_type {
 #define PHYID1_OUI_SHFT        6
 #define PHYID2_OUI_MASK        0xfc00
 #define PHYID2_OUI_SHFT        10
+#define PHYID2_MODEL_MASK              0x03f0
+#define PHY_MODEL_MARVELL_E3016                0x220
+#define PHY_MARVELL_E3016_INITMASK     0x0300
 #define PHY_INIT1      0x0f000
 #define PHY_INIT2      0x0e00
 #define PHY_INIT3      0x01000
@@ -653,8 +663,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
 };
 
 struct register_test {
-       u32 reg;
-       u32 mask;
+       __le32 reg;
+       __le32 mask;
 };
 
 static const struct register_test nv_registers_test[] = {
@@ -694,6 +704,7 @@ struct fe_priv {
        int phyaddr;
        int wolenabled;
        unsigned int phy_oui;
+       unsigned int phy_model;
        u16 gigabit;
        int intr_test;
 
@@ -707,13 +718,14 @@ struct fe_priv {
        u32 vlanctl_bits;
        u32 driver_data;
        u32 register_size;
+       int rx_csum;
 
        void __iomem *base;
 
        /* rx specific fields.
         * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
         */
-       ring_type rx_ring;
+       union ring_type rx_ring;
        unsigned int cur_rx, refill_rx;
        struct sk_buff **rx_skbuff;
        dma_addr_t *rx_dma;
@@ -733,7 +745,7 @@ struct fe_priv {
        /*
         * tx specific fields.
         */
-       ring_type tx_ring;
+       union ring_type tx_ring;
        unsigned int next_tx, nic_tx;
        struct sk_buff **tx_skbuff;
        dma_addr_t *tx_dma;
@@ -826,13 +838,13 @@ static inline void pci_push(u8 __iomem *base)
 
 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
 {
-       return le32_to_cpu(prd->FlagLen)
+       return le32_to_cpu(prd->flaglen)
                & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
 }
 
 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
 {
-       return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
+       return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
 }
 
 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -885,7 +897,7 @@ static void free_rings(struct net_device *dev)
        struct fe_priv *np = get_nvpriv(dev);
 
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-               if(np->rx_ring.orig)
+               if (np->rx_ring.orig)
                        pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
                                            np->rx_ring.orig, np->ring_addr);
        } else {
@@ -1020,14 +1032,13 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
        return retval;
 }
 
-static int phy_reset(struct net_device *dev)
+static int phy_reset(struct net_device *dev, u32 bmcr_setup)
 {
        struct fe_priv *np = netdev_priv(dev);
        u32 miicontrol;
        unsigned int tries = 0;
 
-       miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-       miicontrol |= BMCR_RESET;
+       miicontrol = BMCR_RESET | bmcr_setup;
        if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
                return -1;
        }
@@ -1052,6 +1063,16 @@ static int phy_init(struct net_device *dev)
        u8 __iomem *base = get_hwbase(dev);
        u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
 
+       /* phy errata for E3016 phy */
+       if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+               reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+               reg &= ~PHY_MARVELL_E3016_INITMASK;
+               if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
+                       printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
+                       return PHY_ERROR;
+               }
+       }
+
        /* set advertise register */
        reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
        reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
@@ -1082,8 +1103,13 @@ static int phy_init(struct net_device *dev)
        else
                np->gigabit = 0;
 
-       /* reset the phy */
-       if (phy_reset(dev)) {
+       mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+       mii_control |= BMCR_ANENABLE;
+
+       /* reset the phy
+        * (certain phys need bmcr to be setup with reset)
+        */
+       if (phy_reset(dev, mii_control)) {
                printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
                return PHY_ERROR;
        }
@@ -1178,7 +1204,7 @@ static void nv_stop_tx(struct net_device *dev)
                        KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
 
        udelay(NV_TXSTOP_DELAY2);
-       writel(0, base + NvRegUnknownTransmitterReg);
+       writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
 }
 
 static void nv_txrx_reset(struct net_device *dev)
@@ -1258,14 +1284,14 @@ static int nv_alloc_rx(struct net_device *dev)
                np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
                                        skb->end-skb->data, PCI_DMA_FROMDEVICE);
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-                       np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
+                       np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
                        wmb();
-                       np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+                       np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
                } else {
-                       np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
-                       np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
+                       np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
+                       np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
                        wmb();
-                       np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+                       np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
                }
                dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
                                        dev->name, refill_rx);
@@ -1277,6 +1303,16 @@ static int nv_alloc_rx(struct net_device *dev)
        return 0;
 }
 
+/* If rx bufs are exhausted called after 50ms to attempt to refresh */
+#ifdef CONFIG_FORCEDETH_NAPI
+static void nv_do_rx_refill(unsigned long data)
+{
+       struct net_device *dev = (struct net_device *) data;
+
+       /* Just reschedule NAPI rx processing */
+       netif_rx_schedule(dev);
+}
+#else
 static void nv_do_rx_refill(unsigned long data)
 {
        struct net_device *dev = (struct net_device *) data;
@@ -1305,6 +1341,7 @@ static void nv_do_rx_refill(unsigned long data)
                enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
        }
 }
+#endif
 
 static void nv_init_rx(struct net_device *dev)
 {
@@ -1315,9 +1352,9 @@ static void nv_init_rx(struct net_device *dev)
        np->refill_rx = 0;
        for (i = 0; i < np->rx_ring_size; i++)
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-                       np->rx_ring.orig[i].FlagLen = 0;
+                       np->rx_ring.orig[i].flaglen = 0;
                else
-                       np->rx_ring.ex[i].FlagLen = 0;
+                       np->rx_ring.ex[i].flaglen = 0;
 }
 
 static void nv_init_tx(struct net_device *dev)
@@ -1328,9 +1365,9 @@ static void nv_init_tx(struct net_device *dev)
        np->next_tx = np->nic_tx = 0;
        for (i = 0; i < np->tx_ring_size; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-                       np->tx_ring.orig[i].FlagLen = 0;
+                       np->tx_ring.orig[i].flaglen = 0;
                else
-                       np->tx_ring.ex[i].FlagLen = 0;
+                       np->tx_ring.ex[i].flaglen = 0;
                np->tx_skbuff[i] = NULL;
                np->tx_dma[i] = 0;
        }
@@ -1373,9 +1410,9 @@ static void nv_drain_tx(struct net_device *dev)
 
        for (i = 0; i < np->tx_ring_size; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-                       np->tx_ring.orig[i].FlagLen = 0;
+                       np->tx_ring.orig[i].flaglen = 0;
                else
-                       np->tx_ring.ex[i].FlagLen = 0;
+                       np->tx_ring.ex[i].flaglen = 0;
                if (nv_release_txskb(dev, i))
                        np->stats.tx_dropped++;
        }
@@ -1387,9 +1424,9 @@ static void nv_drain_rx(struct net_device *dev)
        int i;
        for (i = 0; i < np->rx_ring_size; i++) {
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-                       np->rx_ring.orig[i].FlagLen = 0;
+                       np->rx_ring.orig[i].flaglen = 0;
                else
-                       np->rx_ring.ex[i].FlagLen = 0;
+                       np->rx_ring.ex[i].flaglen = 0;
                wmb();
                if (np->rx_skbuff[i]) {
                        pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1450,17 +1487,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                np->tx_dma_len[nr] = bcnt;
 
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-                       np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
-                       np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+                       np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
+                       np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
                } else {
-                       np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
-                       np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
-                       np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+                       np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+                       np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+                       np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
                }
                tx_flags = np->tx_flags;
                offset += bcnt;
                size -= bcnt;
-       } while(size);
+       } while (size);
 
        /* setup the fragments */
        for (i = 0; i < fragments; i++) {
@@ -1477,12 +1514,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        np->tx_dma_len[nr] = bcnt;
 
                        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-                               np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
-                               np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+                               np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
+                               np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
                        } else {
-                               np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
-                               np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
-                               np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+                               np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+                               np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+                               np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
                        }
                        offset += bcnt;
                        size -= bcnt;
@@ -1491,9 +1528,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* set last fragment flag  */
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-               np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+               np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
        } else {
-               np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+               np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
        }
 
        np->tx_skbuff[nr] = skb;
@@ -1512,10 +1549,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* set tx flags */
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-               np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+               np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
        } else {
-               np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
-               np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
+               np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
+               np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
        }
 
        dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1547,7 +1584,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void nv_tx_done(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
-       u32 Flags;
+       u32 flags;
        unsigned int i;
        struct sk_buff *skb;
 
@@ -1555,22 +1592,22 @@ static void nv_tx_done(struct net_device *dev)
                i = np->nic_tx % np->tx_ring_size;
 
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-                       Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
+                       flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
                else
-                       Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
+                       flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
 
-               dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
-                                       dev->name, np->nic_tx, Flags);
-               if (Flags & NV_TX_VALID)
+               dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
+                                       dev->name, np->nic_tx, flags);
+               if (flags & NV_TX_VALID)
                        break;
                if (np->desc_ver == DESC_VER_1) {
-                       if (Flags & NV_TX_LASTPACKET) {
+                       if (flags & NV_TX_LASTPACKET) {
                                skb = np->tx_skbuff[i];
-                               if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
+                               if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
                                             NV_TX_UNDERFLOW|NV_TX_ERROR)) {
-                                       if (Flags & NV_TX_UNDERFLOW)
+                                       if (flags & NV_TX_UNDERFLOW)
                                                np->stats.tx_fifo_errors++;
-                                       if (Flags & NV_TX_CARRIERLOST)
+                                       if (flags & NV_TX_CARRIERLOST)
                                                np->stats.tx_carrier_errors++;
                                        np->stats.tx_errors++;
                                } else {
@@ -1579,13 +1616,13 @@ static void nv_tx_done(struct net_device *dev)
                                }
                        }
                } else {
-                       if (Flags & NV_TX2_LASTPACKET) {
+                       if (flags & NV_TX2_LASTPACKET) {
                                skb = np->tx_skbuff[i];
-                               if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
+                               if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
                                             NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
-                                       if (Flags & NV_TX2_UNDERFLOW)
+                                       if (flags & NV_TX2_UNDERFLOW)
                                                np->stats.tx_fifo_errors++;
-                                       if (Flags & NV_TX2_CARRIERLOST)
+                                       if (flags & NV_TX2_CARRIERLOST)
                                                np->stats.tx_carrier_errors++;
                                        np->stats.tx_errors++;
                                } else {
@@ -1638,29 +1675,29 @@ static void nv_tx_timeout(struct net_device *dev)
                        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                                printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
                                       i,
-                                      le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
-                                      le32_to_cpu(np->tx_ring.orig[i].FlagLen),
-                                      le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
-                                      le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
-                                      le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
-                                      le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
-                                      le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
-                                      le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
+                                      le32_to_cpu(np->tx_ring.orig[i].buf),
+                                      le32_to_cpu(np->tx_ring.orig[i].flaglen),
+                                      le32_to_cpu(np->tx_ring.orig[i+1].buf),
+                                      le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+                                      le32_to_cpu(np->tx_ring.orig[i+2].buf),
+                                      le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+                                      le32_to_cpu(np->tx_ring.orig[i+3].buf),
+                                      le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
                        } else {
                                printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
                                       i,
-                                      le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
-                                      le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
-                                      le32_to_cpu(np->tx_ring.ex[i].FlagLen),
-                                      le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
-                                      le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
-                                      le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
-                                      le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
-                                      le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
-                                      le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
-                                      le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
-                                      le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
-                                      le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
+                                      le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+                                      le32_to_cpu(np->tx_ring.ex[i].buflow),
+                                      le32_to_cpu(np->tx_ring.ex[i].flaglen),
+                                      le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+                                      le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+                                      le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+                                      le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+                                      le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+                                      le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+                                      le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+                                      le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+                                      le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
                        }
                }
        }
@@ -1697,7 +1734,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
        int protolen;   /* length as stored in the proto field */
 
        /* 1) calculate len according to header */
-       if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+       if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
                protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
                hdrlen = VLAN_HLEN;
        } else {
@@ -1740,13 +1777,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
        }
 }
 
-static void nv_rx_process(struct net_device *dev)
+static int nv_rx_process(struct net_device *dev, int limit)
 {
        struct fe_priv *np = netdev_priv(dev);
-       u32 Flags;
+       u32 flags;
        u32 vlanflags = 0;
+       int count;
 
-       for (;;) {
+       for (count = 0; count < limit; ++count) {
                struct sk_buff *skb;
                int len;
                int i;
@@ -1755,18 +1793,18 @@ static void nv_rx_process(struct net_device *dev)
 
                i = np->cur_rx % np->rx_ring_size;
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-                       Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
+                       flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
                        len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
                } else {
-                       Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
+                       flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
                        len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
-                       vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
+                       vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
                }
 
-               dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
-                                       dev->name, np->cur_rx, Flags);
+               dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
+                                       dev->name, np->cur_rx, flags);
 
-               if (Flags & NV_RX_AVAIL)
+               if (flags & NV_RX_AVAIL)
                        break;  /* still owned by hardware, */
 
                /*
@@ -1780,7 +1818,7 @@ static void nv_rx_process(struct net_device *dev)
 
                {
                        int j;
-                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
+                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
                        for (j=0; j<64; j++) {
                                if ((j%16) == 0)
                                        dprintk("\n%03x:", j);
@@ -1790,30 +1828,30 @@ static void nv_rx_process(struct net_device *dev)
                }
                /* look at what we actually got: */
                if (np->desc_ver == DESC_VER_1) {
-                       if (!(Flags & NV_RX_DESCRIPTORVALID))
+                       if (!(flags & NV_RX_DESCRIPTORVALID))
                                goto next_pkt;
 
-                       if (Flags & NV_RX_ERROR) {
-                               if (Flags & NV_RX_MISSEDFRAME) {
+                       if (flags & NV_RX_ERROR) {
+                               if (flags & NV_RX_MISSEDFRAME) {
                                        np->stats.rx_missed_errors++;
                                        np->stats.rx_errors++;
                                        goto next_pkt;
                                }
-                               if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
+                               if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
                                        np->stats.rx_errors++;
                                        goto next_pkt;
                                }
-                               if (Flags & NV_RX_CRCERR) {
+                               if (flags & NV_RX_CRCERR) {
                                        np->stats.rx_crc_errors++;
                                        np->stats.rx_errors++;
                                        goto next_pkt;
                                }
-                               if (Flags & NV_RX_OVERFLOW) {
+                               if (flags & NV_RX_OVERFLOW) {
                                        np->stats.rx_over_errors++;
                                        np->stats.rx_errors++;
                                        goto next_pkt;
                                }
-                               if (Flags & NV_RX_ERROR4) {
+                               if (flags & NV_RX_ERROR4) {
                                        len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
                                        if (len < 0) {
                                                np->stats.rx_errors++;
@@ -1821,32 +1859,32 @@ static void nv_rx_process(struct net_device *dev)
                                        }
                                }
                                /* framing errors are soft errors. */
-                               if (Flags & NV_RX_FRAMINGERR) {
-                                       if (Flags & NV_RX_SUBSTRACT1) {
+                               if (flags & NV_RX_FRAMINGERR) {
+                                       if (flags & NV_RX_SUBSTRACT1) {
                                                len--;
                                        }
                                }
                        }
                } else {
-                       if (!(Flags & NV_RX2_DESCRIPTORVALID))
+                       if (!(flags & NV_RX2_DESCRIPTORVALID))
                                goto next_pkt;
 
-                       if (Flags & NV_RX2_ERROR) {
-                               if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
+                       if (flags & NV_RX2_ERROR) {
+                               if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
                                        np->stats.rx_errors++;
                                        goto next_pkt;
                                }
-                               if (Flags & NV_RX2_CRCERR) {
+                               if (flags & NV_RX2_CRCERR) {
                                        np->stats.rx_crc_errors++;
                                        np->stats.rx_errors++;
                                        goto next_pkt;
                                }
-                               if (Flags & NV_RX2_OVERFLOW) {
+                               if (flags & NV_RX2_OVERFLOW) {
                                        np->stats.rx_over_errors++;
                                        np->stats.rx_errors++;
                                        goto next_pkt;
                                }
-                               if (Flags & NV_RX2_ERROR4) {
+                               if (flags & NV_RX2_ERROR4) {
                                        len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
                                        if (len < 0) {
                                                np->stats.rx_errors++;
@@ -1854,17 +1892,17 @@ static void nv_rx_process(struct net_device *dev)
                                        }
                                }
                                /* framing errors are soft errors */
-                               if (Flags & NV_RX2_FRAMINGERR) {
-                                       if (Flags & NV_RX2_SUBSTRACT1) {
+                               if (flags & NV_RX2_FRAMINGERR) {
+                                       if (flags & NV_RX2_SUBSTRACT1) {
                                                len--;
                                        }
                                }
                        }
-                       if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
-                               Flags &= NV_RX2_CHECKSUMMASK;
-                               if (Flags == NV_RX2_CHECKSUMOK1 ||
-                                   Flags == NV_RX2_CHECKSUMOK2 ||
-                                   Flags == NV_RX2_CHECKSUMOK3) {
+                       if (np->rx_csum) {
+                               flags &= NV_RX2_CHECKSUMMASK;
+                               if (flags == NV_RX2_CHECKSUMOK1 ||
+                                   flags == NV_RX2_CHECKSUMOK2 ||
+                                   flags == NV_RX2_CHECKSUMOK3) {
                                        dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
                                        np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
                                } else {
@@ -1880,17 +1918,27 @@ static void nv_rx_process(struct net_device *dev)
                skb->protocol = eth_type_trans(skb, dev);
                dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
                                        dev->name, np->cur_rx, len, skb->protocol);
-               if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
-                       vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
-               } else {
+#ifdef CONFIG_FORCEDETH_NAPI
+               if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
+                       vlan_hwaccel_receive_skb(skb, np->vlangrp,
+                                                vlanflags & NV_RX3_VLAN_TAG_MASK);
+               else
+                       netif_receive_skb(skb);
+#else
+               if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
+                       vlan_hwaccel_rx(skb, np->vlangrp,
+                                       vlanflags & NV_RX3_VLAN_TAG_MASK);
+               else
                        netif_rx(skb);
-               }
+#endif
                dev->last_rx = jiffies;
                np->stats.rx_packets++;
                np->stats.rx_bytes += len;
 next_pkt:
                np->cur_rx++;
        }
+
+       return count;
 }
 
 static void set_bufsize(struct net_device *dev)
@@ -1990,7 +2038,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
        struct fe_priv *np = netdev_priv(dev);
        struct sockaddr *macaddr = (struct sockaddr*)addr;
 
-       if(!is_valid_ether_addr(macaddr->sa_data))
+       if (!is_valid_ether_addr(macaddr->sa_data))
                return -EADDRNOTAVAIL;
 
        /* synchronized against open : rtnl_lock() held by caller */
@@ -2032,7 +2080,6 @@ static void nv_set_multicast(struct net_device *dev)
        memset(mask, 0, sizeof(mask));
 
        if (dev->flags & IFF_PROMISC) {
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                pff |= NVREG_PFF_PROMISC;
        } else {
                pff |= NVREG_PFF_MYADDR;
@@ -2283,20 +2330,20 @@ set_speed:
                        lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
 
                        switch (adv_pause) {
-                       case (ADVERTISE_PAUSE_CAP):
+                       case ADVERTISE_PAUSE_CAP:
                                if (lpa_pause & LPA_PAUSE_CAP) {
                                        pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
                                        if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
                                                pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
                                }
                                break;
-                       case (ADVERTISE_PAUSE_ASYM):
+                       case ADVERTISE_PAUSE_ASYM:
                                if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
                                {
                                        pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
                                }
                                break;
-                       case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+                       case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
                                if (lpa_pause & LPA_PAUSE_CAP)
                                {
                                        pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
@@ -2376,14 +2423,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
                nv_tx_done(dev);
                spin_unlock(&np->lock);
 
-               nv_rx_process(dev);
-               if (nv_alloc_rx(dev)) {
-                       spin_lock(&np->lock);
-                       if (!np->in_shutdown)
-                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-                       spin_unlock(&np->lock);
-               }
-
                if (events & NVREG_IRQ_LINK) {
                        spin_lock(&np->lock);
                        nv_link_irq(dev);
@@ -2403,6 +2442,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
                        printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
                                                dev->name, events);
                }
+#ifdef CONFIG_FORCEDETH_NAPI
+               if (events & NVREG_IRQ_RX_ALL) {
+                       netif_rx_schedule(dev);
+
+                       /* Disable furthur receive irq's */
+                       spin_lock(&np->lock);
+                       np->irqmask &= ~NVREG_IRQ_RX_ALL;
+
+                       if (np->msi_flags & NV_MSI_X_ENABLED)
+                               writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+                       else
+                               writel(np->irqmask, base + NvRegIrqMask);
+                       spin_unlock(&np->lock);
+               }
+#else
+               nv_rx_process(dev, dev->weight);
+               if (nv_alloc_rx(dev)) {
+                       spin_lock(&np->lock);
+                       if (!np->in_shutdown)
+                               mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+                       spin_unlock(&np->lock);
+               }
+#endif
                if (i > max_interrupt_work) {
                        spin_lock(&np->lock);
                        /* disable interrupts on the nic */
@@ -2474,6 +2536,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
        return IRQ_RETVAL(i);
 }
 
+#ifdef CONFIG_FORCEDETH_NAPI
+static int nv_napi_poll(struct net_device *dev, int *budget)
+{
+       int pkts, limit = min(*budget, dev->quota);
+       struct fe_priv *np = netdev_priv(dev);
+       u8 __iomem *base = get_hwbase(dev);
+
+       pkts = nv_rx_process(dev, limit);
+
+       if (nv_alloc_rx(dev)) {
+               spin_lock_irq(&np->lock);
+               if (!np->in_shutdown)
+                       mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+               spin_unlock_irq(&np->lock);
+       }
+
+       if (pkts < limit) {
+               /* all done, no more packets present */
+               netif_rx_complete(dev);
+
+               /* re-enable receive interrupts */
+               spin_lock_irq(&np->lock);
+               np->irqmask |= NVREG_IRQ_RX_ALL;
+               if (np->msi_flags & NV_MSI_X_ENABLED)
+                       writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+               else
+                       writel(np->irqmask, base + NvRegIrqMask);
+               spin_unlock_irq(&np->lock);
+               return 0;
+       } else {
+               /* used up our quantum, so reschedule */
+               dev->quota -= pkts;
+               *budget -= pkts;
+               return 1;
+       }
+}
+#endif
+
+#ifdef CONFIG_FORCEDETH_NAPI
+static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
+{
+       struct net_device *dev = (struct net_device *) data;
+       u8 __iomem *base = get_hwbase(dev);
+       u32 events;
+
+       events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
+       writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
+
+       if (events) {
+               netif_rx_schedule(dev);
+               /* disable receive interrupts on the nic */
+               writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+               pci_push(base);
+       }
+       return IRQ_HANDLED;
+}
+#else
 static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
 {
        struct net_device *dev = (struct net_device *) data;
@@ -2492,7 +2611,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
                if (!(events & np->irqmask))
                        break;
 
-               nv_rx_process(dev);
+               nv_rx_process(dev, dev->weight);
                if (nv_alloc_rx(dev)) {
                        spin_lock_irq(&np->lock);
                        if (!np->in_shutdown)
@@ -2514,12 +2633,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
                        spin_unlock_irq(&np->lock);
                        break;
                }
-
        }
        dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
 
        return IRQ_RETVAL(i);
 }
+#endif
 
 static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
 {
@@ -3057,9 +3176,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                if (netif_running(dev))
                        printk(KERN_INFO "%s: link down.\n", dev->name);
                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
-
+               if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+                       bmcr |= BMCR_ANENABLE;
+                       /* reset the phy in order for settings to stick,
+                        * and cause autoneg to start */
+                       if (phy_reset(dev, bmcr)) {
+                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+                               return -EINVAL;
+                       }
+               } else {
+                       bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+                       mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+               }
        } else {
                int adv, bmcr;
 
@@ -3099,17 +3227,19 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        bmcr |= BMCR_FULLDPLX;
                if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
                        bmcr |= BMCR_SPEED100;
-               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
                if (np->phy_oui == PHY_OUI_MARVELL) {
-                       /* reset the phy */
-                       if (phy_reset(dev)) {
+                       /* reset the phy in order for forced mode settings to stick */
+                       if (phy_reset(dev, bmcr)) {
                                printk(KERN_INFO "%s: phy reset failed\n", dev->name);
                                return -EINVAL;
                        }
-               } else if (netif_running(dev)) {
-                       /* Wait a bit and then reconfigure the nic. */
-                       udelay(10);
-                       nv_linkchange(dev);
+               } else {
+                       mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+                       if (netif_running(dev)) {
+                               /* Wait a bit and then reconfigure the nic. */
+                               udelay(10);
+                               nv_linkchange(dev);
+                       }
                }
        }
 
@@ -3166,8 +3296,17 @@ static int nv_nway_reset(struct net_device *dev)
                }
 
                bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-               mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+               if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+                       bmcr |= BMCR_ANENABLE;
+                       /* reset the phy in order for settings to stick*/
+                       if (phy_reset(dev, bmcr)) {
+                               printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+                               return -EINVAL;
+                       }
+               } else {
+                       bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+                       mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+               }
 
                if (netif_running(dev)) {
                        nv_start_rx(dev);
@@ -3245,7 +3384,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
        if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
                /* fall back to old rings */
                if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-                       if(rxtx_ring)
+                       if (rxtx_ring)
                                pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
                                                    rxtx_ring, ring_addr);
                } else {
@@ -3418,7 +3557,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
 static u32 nv_get_rx_csum(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
-       return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+       return (np->rx_csum) != 0;
 }
 
 static int nv_set_rx_csum(struct net_device *dev, u32 data)
@@ -3428,22 +3567,15 @@ static int nv_set_rx_csum(struct net_device *dev, u32 data)
        int retcode = 0;
 
        if (np->driver_data & DEV_HAS_CHECKSUM) {
-
-               if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
-                   (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
-                       /* already set or unset */
-                       return 0;
-               }
-
                if (data) {
+                       np->rx_csum = 1;
                        np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
-               } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
-                       np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
                } else {
-                       printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
-                       return -EINVAL;
+                       np->rx_csum = 0;
+                       /* vlan is dependent on rx checksum offload */
+                       if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
+                               np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
                }
-
                if (netif_running(dev)) {
                        spin_lock_irq(&np->lock);
                        writel(np->txrxctl_bits, base + NvRegTxRxControl);
@@ -3481,7 +3613,7 @@ static int nv_get_stats_count(struct net_device *dev)
        struct fe_priv *np = netdev_priv(dev);
 
        if (np->driver_data & DEV_HAS_STATISTICS)
-               return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
+               return sizeof(struct nv_ethtool_stats)/sizeof(u64);
        else
                return 0;
 }
@@ -3619,7 +3751,7 @@ static int nv_loopback_test(struct net_device *dev)
        struct sk_buff *tx_skb, *rx_skb;
        dma_addr_t test_dma_addr;
        u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
-       u32 Flags;
+       u32 flags;
        int len, i, pkt_len;
        u8 *pkt_data;
        u32 filter_flags = 0;
@@ -3663,12 +3795,12 @@ static int nv_loopback_test(struct net_device *dev)
                                       tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
 
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-               np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
-               np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+               np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
+               np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
        } else {
-               np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
-               np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
-               np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+               np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
+               np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
+               np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
        }
        writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
        pci_push(get_hwbase(dev));
@@ -3677,21 +3809,21 @@ static int nv_loopback_test(struct net_device *dev)
 
        /* check for rx of the packet */
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
-               Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
+               flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
                len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
 
        } else {
-               Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
+               flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
                len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
        }
 
-       if (Flags & NV_RX_AVAIL) {
+       if (flags & NV_RX_AVAIL) {
                ret = 0;
        } else if (np->desc_ver == DESC_VER_1) {
-               if (Flags & NV_RX_ERROR)
+               if (flags & NV_RX_ERROR)
                        ret = 0;
        } else {
-               if (Flags & NV_RX2_ERROR) {
+               if (flags & NV_RX2_ERROR) {
                        ret = 0;
                }
        }
@@ -3753,6 +3885,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
        if (test->flags & ETH_TEST_FL_OFFLINE) {
                if (netif_running(dev)) {
                        netif_stop_queue(dev);
+                       netif_poll_disable(dev);
                        netif_tx_lock_bh(dev);
                        spin_lock_irq(&np->lock);
                        nv_disable_hw_interrupts(dev, np->irqmask);
@@ -3811,6 +3944,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
                        nv_start_rx(dev);
                        nv_start_tx(dev);
                        netif_start_queue(dev);
+                       netif_poll_enable(dev);
                        nv_enable_hw_interrupts(dev, np->irqmask);
                }
        }
@@ -3895,10 +4029,9 @@ static int nv_open(struct net_device *dev)
 
        dprintk(KERN_DEBUG "nv_open: begin\n");
 
-       /* 1) erase previous misconfiguration */
+       /* erase previous misconfiguration */
        if (np->driver_data & DEV_HAS_POWER_CNTRL)
                nv_mac_reset(dev);
-       /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
        writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
        writel(0, base + NvRegMulticastAddrB);
        writel(0, base + NvRegMulticastMaskA);
@@ -3913,26 +4046,22 @@ static int nv_open(struct net_device *dev)
        if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
                writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
 
-       /* 2) initialize descriptor rings */
+       /* initialize descriptor rings */
        set_bufsize(dev);
        oom = nv_init_ring(dev);
 
        writel(0, base + NvRegLinkSpeed);
-       writel(0, base + NvRegUnknownTransmitterReg);
+       writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
        nv_txrx_reset(dev);
        writel(0, base + NvRegUnknownSetupReg6);
 
        np->in_shutdown = 0;
 
-       /* 3) set mac address */
-       nv_copy_mac_to_hw(dev);
-
-       /* 4) give hw rings */
+       /* give hw rings */
        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
        writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                base + NvRegRingSizes);
 
-       /* 5) continue setup */
        writel(np->linkspeed, base + NvRegLinkSpeed);
        if (np->desc_ver == DESC_VER_1)
                writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
@@ -3950,7 +4079,6 @@ static int nv_open(struct net_device *dev)
        writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
        writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
 
-       /* 6) continue setup */
        writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
        writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
        writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
@@ -4020,6 +4148,8 @@ static int nv_open(struct net_device *dev)
        nv_start_rx(dev);
        nv_start_tx(dev);
        netif_start_queue(dev);
+       netif_poll_enable(dev);
+
        if (ret) {
                netif_carrier_on(dev);
        } else {
@@ -4049,6 +4179,7 @@ static int nv_close(struct net_device *dev)
        spin_lock_irq(&np->lock);
        np->in_shutdown = 1;
        spin_unlock_irq(&np->lock);
+       netif_poll_disable(dev);
        synchronize_irq(dev->irq);
 
        del_timer_sync(&np->oom_kick);
@@ -4076,12 +4207,6 @@ static int nv_close(struct net_device *dev)
        if (np->wolenabled)
                nv_start_rx(dev);
 
-       /* special op: write back the misordered MAC address - otherwise
-        * the next nv_probe would see a wrong address.
-        */
-       writel(np->orig_mac[0], base + NvRegMacAddrA);
-       writel(np->orig_mac[1], base + NvRegMacAddrB);
-
        /* FIXME: power down nic */
 
        return 0;
@@ -4094,7 +4219,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        unsigned long addr;
        u8 __iomem *base;
        int err, i;
-       u32 powerstate;
+       u32 powerstate, txreg;
 
        dev = alloc_etherdev(sizeof(struct fe_priv));
        err = -ENOMEM;
@@ -4190,6 +4315,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                np->pkt_limit = NV_PKTLIMIT_2;
 
        if (id->driver_data & DEV_HAS_CHECKSUM) {
+               np->rx_csum = 1;
                np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
                dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
 #ifdef NETIF_F_TSO
@@ -4269,6 +4395,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        dev->set_multicast_list = nv_set_multicast;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        dev->poll_controller = nv_poll_controller;
+#endif
+       dev->weight = 64;
+#ifdef CONFIG_FORCEDETH_NAPI
+       dev->poll = nv_napi_poll;
 #endif
        SET_ETHTOOL_OPS(dev, &ops);
        dev->tx_timeout = nv_tx_timeout;
@@ -4281,12 +4411,30 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        np->orig_mac[0] = readl(base + NvRegMacAddrA);
        np->orig_mac[1] = readl(base + NvRegMacAddrB);
 
-       dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
-       dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
-       dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
-       dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
-       dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
-       dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
+       /* check the workaround bit for correct mac address order */
+       txreg = readl(base + NvRegTransmitPoll);
+       if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
+               /* mac address is already in correct order */
+               dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
+               dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
+               dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
+               dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
+               dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
+               dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
+       } else {
+               /* need to reverse mac address to correct order */
+               dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
+               dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
+               dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+               dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+               dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
+               dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
+               /* set permanent address to be correct aswell */
+               np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+                       (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+               np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
+               writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
+       }
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
        if (!is_valid_ether_addr(dev->perm_addr)) {
@@ -4309,6 +4457,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                        dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
                        dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
 
+       /* set mac address */
+       nv_copy_mac_to_hw(dev);
+
        /* disable WOL */
        writel(0, base + NvRegWakeUpFlags);
        np->wolenabled = 0;
@@ -4369,6 +4520,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                if (id2 < 0 || id2 == 0xffff)
                        continue;
 
+               np->phy_model = id2 & PHYID2_MODEL_MASK;
                id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
                id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
                dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
@@ -4421,9 +4573,17 @@ out:
 static void __devexit nv_remove(struct pci_dev *pci_dev)
 {
        struct net_device *dev = pci_get_drvdata(pci_dev);
+       struct fe_priv *np = netdev_priv(dev);
+       u8 __iomem *base = get_hwbase(dev);
 
        unregister_netdev(dev);
 
+       /* special op: write back the misordered MAC address - otherwise
+        * the next nv_probe would see a wrong address.
+        */
+       writel(np->orig_mac[0], base + NvRegMacAddrA);
+       writel(np->orig_mac[1], base + NvRegMacAddrB);
+
        /* free all structures */
        free_rings(dev);
        iounmap(get_hwbase(dev));
@@ -4540,7 +4700,7 @@ static struct pci_driver driver = {
 static int __init init_nic(void)
 {
        printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
-       return pci_module_init(&driver);
+       return pci_register_driver(&driver);
 }
 
 static void __exit exit_nic(void)
index ebbbd6c..5130da0 100644 (file)
@@ -1708,9 +1708,6 @@ static void gfar_set_multi(struct net_device *dev)
        u32 tempval;
 
        if(dev->flags & IFF_PROMISC) {
-               if (netif_msg_drv(priv))
-                       printk(KERN_INFO "%s: Entering promiscuous mode.\n",
-                                       dev->name);
                /* Set RCTRL to PROM */
                tempval = gfar_read(&regs->rctrl);
                tempval |= RCTRL_PROM;
index 409c6aa..9927bff 100644 (file)
@@ -27,8 +27,8 @@
 */
 
 #define DRV_NAME       "hamachi"
-#define DRV_VERSION    "2.0"
-#define DRV_RELDATE    "June 27, 2006"
+#define DRV_VERSION    "2.1"
+#define DRV_RELDATE    "Sept 11, 2006"
 
 
 /* A few user-configurable values. */
@@ -1851,8 +1851,6 @@ static void set_rx_mode(struct net_device *dev)
        void __iomem *ioaddr = hmp->base;
 
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                writew(0x000F, ioaddr + AddrMode);
        } else if ((dev->mc_count > 63)  ||  (dev->flags & IFF_ALLMULTI)) {
                /* Too many to match, or accept all multicasts. */
index e7d9bf3..ff5a67d 100644 (file)
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/types.h>
-#include <linux/config.h>      /* for CONFIG_PCI */
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/bitops.h>
index 68d8af7..fbda761 100644 (file)
@@ -28,7 +28,7 @@
  */
 
 #define IOC3_NAME      "ioc3-eth"
-#define IOC3_VERSION   "2.6.3-3"
+#define IOC3_VERSION   "2.6.3-4"
 
 #include <linux/init.h>
 #include <linux/delay.h>
@@ -1611,8 +1611,6 @@ static void ioc3_set_multicast_list(struct net_device *dev)
        netif_stop_queue(dev);                          /* Lock out others. */
 
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous.  */
-               /* Unconditionally log net taps.  */
-               printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
                ip->emcr |= EMCR_PROMISC;
                ioc3_w_emcr(ip->emcr);
                (void) ioc3_r_emcr();
index 47f6f64..415ba8d 100644 (file)
@@ -45,7 +45,6 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
index 0ea65c4..b69776e 100644 (file)
@@ -40,7 +40,6 @@
  ********************************************************************/
 
 #include <linux/module.h>
-#include <linux/config.h> 
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/skbuff.h>
index 82b67af..a51604b 100644 (file)
@@ -110,9 +110,6 @@ struct ixgb_adapter;
 #define IXGB_RXBUFFER_8192  8192
 #define IXGB_RXBUFFER_16384 16384
 
-/* How many Tx Descriptors do we need to call netif_wake_queue? */
-#define IXGB_TX_QUEUE_WAKE 16
-
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IXGB_RX_BUFFER_WRITE   4       /* Must be power of 2 */
 
@@ -173,7 +170,7 @@ struct ixgb_adapter {
        unsigned long led_status;
 
        /* TX */
-       struct ixgb_desc_ring tx_ring;
+       struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
        unsigned long timeo_start;
        uint32_t tx_cmd_type;
        uint64_t hw_csum_tx_good;
index cf19b89..ba62108 100644 (file)
@@ -654,11 +654,7 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data)
 
        mod_timer(&adapter->blink_timer, jiffies);
 
-       if (data)
-               schedule_timeout_interruptible(data * HZ);
-       else
-               schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
-
+       msleep_interruptible(data * 1000);
        del_timer_sync(&adapter->blink_timer);
        ixgb_led_off(&adapter->hw);
        clear_bit(IXGB_LED_ON, &adapter->led_status);
index f7fa10e..2b15155 100644 (file)
@@ -236,6 +236,17 @@ ixgb_identify_phy(struct ixgb_hw *hw)
                DEBUGOUT("Identified G6104 optics\n");
                phy_type = ixgb_phy_type_g6104;
                break;
+       case IXGB_DEVICE_ID_82597EX_CX4:
+               DEBUGOUT("Identified CX4\n");
+               xpak_vendor = ixgb_identify_xpak_vendor(hw);
+               if (xpak_vendor == ixgb_xpak_vendor_intel) {
+                       DEBUGOUT("Identified TXN17201 optics\n");
+                       phy_type = ixgb_phy_type_txn17201;
+               } else {
+                       DEBUGOUT("Identified G6005 optics\n");
+                       phy_type = ixgb_phy_type_g6005;
+               }
+               break;
        default:
                DEBUGOUT("Unknown physical layer module\n");
                phy_type = ixgb_phy_type_unknown;
index 40a085f..9fd6118 100644 (file)
@@ -45,6 +45,7 @@
 
 #define IXGB_DEVICE_ID_82597EX_CX4   0x109E
 #define IXGB_SUBDEVICE_ID_A00C  0xA00C
+#define IXGB_SUBDEVICE_ID_A01C  0xA01C
 
 #endif /* #ifndef _IXGB_IDS_H_ */
 /* End of File */
index 7bbd447..e36dee1 100644 (file)
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION            "1.0.109-k2"DRIVERNAPI
+#define DRV_VERSION            "1.0.112-k2"DRIVERNAPI
 char ixgb_driver_version[] = DRV_VERSION;
 static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -118,15 +118,26 @@ static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 static void ixgb_netpoll(struct net_device *dev);
 #endif
 
-/* Exported from other modules */
+static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
+                            enum pci_channel_state state);
+static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
+static void ixgb_io_resume (struct pci_dev *pdev);
 
+/* Exported from other modules */
 extern void ixgb_check_options(struct ixgb_adapter *adapter);
 
+static struct pci_error_handlers ixgb_err_handler = {
+       .error_detected = ixgb_io_error_detected,
+       .slot_reset = ixgb_io_slot_reset,
+       .resume = ixgb_io_resume,
+};
+
 static struct pci_driver ixgb_driver = {
        .name     = ixgb_driver_name,
        .id_table = ixgb_pci_tbl,
        .probe    = ixgb_probe,
        .remove   = __devexit_p(ixgb_remove),
+       .err_handler = &ixgb_err_handler
 };
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -140,12 +151,12 @@ module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 /* some defines for controlling descriptor fetches in h/w */
-#define RXDCTL_WTHRESH_DEFAULT 16      /* chip writes back at this many or RXT0 */
-#define RXDCTL_PTHRESH_DEFAULT 0               /* chip considers prefech below
-                                                * this */
-#define RXDCTL_HTHRESH_DEFAULT 0               /* chip will only prefetch if tail
-                                                * is pushed this many descriptors
-                                                * from head */
+#define RXDCTL_WTHRESH_DEFAULT 15  /* chip writes back at this many or RXT0 */
+#define RXDCTL_PTHRESH_DEFAULT 0   /* chip considers prefech below
+                                    * this */
+#define RXDCTL_HTHRESH_DEFAULT 0   /* chip will only prefetch if tail
+                                    * is pushed this many descriptors
+                                    * from head */
 
 /**
  * ixgb_init_module - Driver Registration Routine
@@ -162,7 +173,7 @@ ixgb_init_module(void)
 
        printk(KERN_INFO "%s\n", ixgb_copyright);
 
-       return pci_module_init(&ixgb_driver);
+       return pci_register_driver(&ixgb_driver);
 }
 
 module_init(ixgb_init_module);
@@ -1174,6 +1185,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
        int err;
 
        if (likely(skb_is_gso(skb))) {
+               struct ixgb_buffer *buffer_info;
                if (skb_header_cloned(skb)) {
                        err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
                        if (err)
@@ -1196,6 +1208,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 
                i = adapter->tx_ring.next_to_use;
                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+               buffer_info = &adapter->tx_ring.buffer_info[i];
+               WARN_ON(buffer_info->dma != 0);
 
                context_desc->ipcss = ipcss;
                context_desc->ipcso = ipcso;
@@ -1233,11 +1247,14 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
        uint8_t css, cso;
 
        if(likely(skb->ip_summed == CHECKSUM_HW)) {
+               struct ixgb_buffer *buffer_info;
                css = skb->h.raw - skb->data;
                cso = (skb->h.raw + skb->csum) - skb->data;
 
                i = adapter->tx_ring.next_to_use;
                context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+               buffer_info = &adapter->tx_ring.buffer_info[i];
+               WARN_ON(buffer_info->dma != 0);
 
                context_desc->tucss = css;
                context_desc->tucso = cso;
@@ -1283,6 +1300,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
                buffer_info = &tx_ring->buffer_info[i];
                size = min(len, IXGB_MAX_DATA_PER_TXD);
                buffer_info->length = size;
+               WARN_ON(buffer_info->dma != 0);
                buffer_info->dma =
                        pci_map_single(adapter->pdev,
                                skb->data + offset,
@@ -1543,6 +1561,11 @@ void
 ixgb_update_stats(struct ixgb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+
+       /* Prevent stats update while adapter is being reset */
+       if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
+               return;
 
        if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
           (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
@@ -1787,7 +1810,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
        if (unlikely(netif_queue_stopped(netdev))) {
                spin_lock(&adapter->tx_lock);
                if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
-                   (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
+                   (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
                        netif_wake_queue(netdev);
                spin_unlock(&adapter->tx_lock);
        }
@@ -1948,10 +1971,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
 #define IXGB_CB_LENGTH 256
                if (length < IXGB_CB_LENGTH) {
                        struct sk_buff *new_skb =
-                           dev_alloc_skb(length + NET_IP_ALIGN);
+                           netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
                        if (new_skb) {
                                skb_reserve(new_skb, NET_IP_ALIGN);
-                               new_skb->dev = netdev;
                                memcpy(new_skb->data - NET_IP_ALIGN,
                                       skb->data - NET_IP_ALIGN,
                                       length + NET_IP_ALIGN);
@@ -2031,14 +2053,14 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
        /* leave three descriptors unused */
        while(--cleancount > 2) {
                /* recycle! its good for you */
-               if (!(skb = buffer_info->skb))
-                       skb = dev_alloc_skb(adapter->rx_buffer_len
-                                           + NET_IP_ALIGN);
-               else {
+               skb = buffer_info->skb;
+               if (skb) {
                        skb_trim(skb, 0);
                        goto map_skb;
                }
 
+               skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
+                                      + NET_IP_ALIGN);
                if (unlikely(!skb)) {
                        /* Better luck next round */
                        adapter->alloc_rx_buff_failed++;
@@ -2051,8 +2073,6 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
                 */
                skb_reserve(skb, NET_IP_ALIGN);
 
-               skb->dev = netdev;
-
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
 map_skb:
@@ -2190,7 +2210,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
 
 static void ixgb_netpoll(struct net_device *dev)
 {
-       struct ixgb_adapter *adapter = dev->priv;
+       struct ixgb_adapter *adapter = netdev_priv(dev);
 
        disable_irq(adapter->pdev->irq);
        ixgb_intr(adapter->pdev->irq, dev, NULL);
@@ -2198,4 +2218,98 @@ static void ixgb_netpoll(struct net_device *dev)
 }
 #endif
 
+/**
+ * ixgb_io_error_detected() - called when PCI error is detected
+ * @pdev    pointer to pci device with error
+ * @state   pci channel state after error
+ *
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
+                                    enum pci_channel_state state)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct ixgb_adapter *adapter = netdev->priv;
+
+       if(netif_running(netdev))
+               ixgb_down(adapter, TRUE);
+
+       pci_disable_device(pdev);
+
+       /* Request a slot reset. */
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev    pointer to pci device with error
+ *
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the ixgb_probe() routine.
+ */
+static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct ixgb_adapter *adapter = netdev->priv;
+
+       if(pci_enable_device(pdev)) {
+               DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       /* Perform card reset only on one instance of the card */
+       if (0 != PCI_FUNC (pdev->devfn))
+               return PCI_ERS_RESULT_RECOVERED;
+
+       pci_set_master(pdev);
+
+       netif_carrier_off(netdev);
+       netif_stop_queue(netdev);
+       ixgb_reset(adapter);
+
+       /* Make sure the EEPROM is good */
+       if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+               DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+       memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+
+       if(!is_valid_ether_addr(netdev->perm_addr)) {
+               DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgb_io_resume - called when its OK to resume normal operations
+ * @pdev    pointer to pci device with error
+ *
+ * The error recovery driver tells us that its OK to resume
+ * normal operation. Implementation resembles the second-half
+ * of the ixgb_probe() routine.
+ */
+static void ixgb_io_resume (struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct ixgb_adapter *adapter = netdev->priv;
+
+       pci_set_master(pdev);
+
+       if(netif_running(netdev)) {
+               if(ixgb_up(adapter)) {
+                       printk ("ixgb: can't bring device back up after reset\n");
+                       return;
+               }
+       }
+
+       netif_device_attach(netdev);
+       mod_timer(&adapter->watchdog_timer, jiffies);
+}
+
 /* ixgb_main.c */
index 5b4dbfe..dc997be 100644 (file)
@@ -42,7 +42,7 @@
        Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
 */
 
-static const char version[] = "lance.c:v1.15ac 1999/11/13 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
+static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -1281,8 +1281,6 @@ static void set_multicast_list(struct net_device *dev)
        outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
 
        if (dev->flags&IFF_PROMISC) {
-               /* Log any net taps. */
-               printk("%s: Promiscuous mode enabled.\n", dev->name);
                outw(15, ioaddr+LANCE_ADDR);
                outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
        } else {
index 9bdd43a..b19e203 100644 (file)
@@ -187,11 +187,14 @@ struct myri10ge_priv {
        u8 mac_addr[6];         /* eeprom mac address */
        unsigned long serial_number;
        int vendor_specific_offset;
+       int fw_multicast_support;
        u32 devctl;
        u16 msi_flags;
        u32 read_dma;
        u32 write_dma;
        u32 read_write_dma;
+       u32 link_changes;
+       u32 msg_enable;
 };
 
 static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
@@ -257,6 +260,12 @@ module_param(myri10ge_max_irq_loops, int, S_IRUGO);
 MODULE_PARM_DESC(myri10ge_max_irq_loops,
                 "Set stuck legacy IRQ detection threshold\n");
 
+#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
+
+static int myri10ge_debug = -1;        /* defaults above */
+module_param(myri10ge_debug, int, 0);
+MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
+
 #define MYRI10GE_FW_OFFSET 1024*1024
 #define MYRI10GE_HIGHPART_TO_U32(X) \
 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -271,7 +280,7 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
        struct mcp_cmd *buf;
        char buf_bytes[sizeof(*buf) + 8];
        struct mcp_cmd_response *response = mgp->cmd;
-       char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET;
+       char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
        u32 dma_low, dma_high, result, value;
        int sleep_total = 0;
 
@@ -320,6 +329,8 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
                if (result == 0) {
                        data->data0 = value;
                        return 0;
+               } else if (result == MXGEFW_CMD_UNKNOWN) {
+                       return -ENOSYS;
                } else {
                        dev_err(&mgp->pdev->dev,
                                "command %d failed, result = %d\n",
@@ -404,7 +415,7 @@ static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
        buf[4] = htonl(dma_low);        /* dummy addr LSW */
        buf[5] = htonl(enable); /* enable? */
 
-       submit = mgp->sram + 0xfc01c0;
+       submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
 
        myri10ge_pio_copy(submit, &buf, sizeof(buf));
        for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
@@ -600,7 +611,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
        buf[5] = htonl(8);      /* where to copy to */
        buf[6] = htonl(0);      /* where to jump to */
 
-       submit = mgp->sram + 0xfc0000;
+       submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
 
        myri10ge_pio_copy(submit, &buf, sizeof(buf));
        mb();
@@ -764,6 +775,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
        mgp->rx_small.cnt = 0;
        mgp->rx_done.idx = 0;
        mgp->rx_done.cnt = 0;
+       mgp->link_changes = 0;
        status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
        myri10ge_change_promisc(mgp, 0, 0);
        myri10ge_change_pause(mgp, mgp->pause);
@@ -798,12 +810,13 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
  * pages directly and building a fraglist in the near future.
  */
 
-static inline struct sk_buff *myri10ge_alloc_big(int bytes)
+static inline struct sk_buff *myri10ge_alloc_big(struct net_device *dev,
+                                                int bytes)
 {
        struct sk_buff *skb;
        unsigned long data, roundup;
 
-       skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD);
+       skb = netdev_alloc_skb(dev, bytes + 4096 + MXGEFW_PAD);
        if (skb == NULL)
                return NULL;
 
@@ -821,12 +834,13 @@ static inline struct sk_buff *myri10ge_alloc_big(int bytes)
 
 /* Allocate 2x as much space as required and use whichever portion
  * does not cross a 4KB boundary */
-static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
+static inline struct sk_buff *myri10ge_alloc_small_safe(struct net_device *dev,
+                                                       unsigned int bytes)
 {
        struct sk_buff *skb;
        unsigned long data, boundary;
 
-       skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1);
+       skb = netdev_alloc_skb(dev, 2 * (bytes + MXGEFW_PAD) - 1);
        if (unlikely(skb == NULL))
                return NULL;
 
@@ -847,12 +861,13 @@ static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
 
 /* Allocate just enough space, and verify that the allocated
  * space does not cross a 4KB boundary */
-static inline struct sk_buff *myri10ge_alloc_small(int bytes)
+static inline struct sk_buff *myri10ge_alloc_small(struct net_device *dev,
+                                                  int bytes)
 {
        struct sk_buff *skb;
        unsigned long roundup, data, end;
 
-       skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD);
+       skb = netdev_alloc_skb(dev, bytes + 16 + MXGEFW_PAD);
        if (unlikely(skb == NULL))
                return NULL;
 
@@ -868,15 +883,17 @@ static inline struct sk_buff *myri10ge_alloc_small(int bytes)
                       "myri10ge_alloc_small: small skb crossed 4KB boundary\n");
                myri10ge_skb_cross_4k = 1;
                dev_kfree_skb_any(skb);
-               skb = myri10ge_alloc_small_safe(bytes);
+               skb = myri10ge_alloc_small_safe(dev, bytes);
        }
        return skb;
 }
 
 static inline int
-myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
-               int idx)
+myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct myri10ge_priv *mgp,
+               int bytes, int idx)
 {
+       struct net_device *dev = mgp->dev;
+       struct pci_dev *pdev = mgp->pdev;
        struct sk_buff *skb;
        dma_addr_t bus;
        int len, retval = 0;
@@ -884,11 +901,11 @@ myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
        bytes += VLAN_HLEN;     /* account for 802.1q vlan tag */
 
        if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
-               skb = myri10ge_alloc_big(bytes);
+               skb = myri10ge_alloc_big(dev, bytes);
        else if (myri10ge_skb_cross_4k)
-               skb = myri10ge_alloc_small_safe(bytes);
+               skb = myri10ge_alloc_small_safe(dev, bytes);
        else
-               skb = myri10ge_alloc_small(bytes);
+               skb = myri10ge_alloc_small(dev, bytes);
 
        if (unlikely(skb == NULL)) {
                rx->alloc_fail++;
@@ -951,7 +968,7 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
        unmap_len = pci_unmap_len(&rx->info[idx], len);
 
        /* try to replace the received skb */
-       if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) {
+       if (myri10ge_getbuf(rx, mgp, bytes, idx)) {
                /* drop the frame -- the old skbuf is re-cycled */
                mgp->stats.rx_dropped += 1;
                return 0;
@@ -968,7 +985,6 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
        skb_put(skb, len);
 
        skb->protocol = eth_type_trans(skb, mgp->dev);
-       skb->dev = mgp->dev;
        if (mgp->csum_flag) {
                if ((skb->protocol == ntohs(ETH_P_IP)) ||
                    (skb->protocol == ntohs(ETH_P_IPV6))) {
@@ -1081,13 +1097,19 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
                if (mgp->link_state != stats->link_up) {
                        mgp->link_state = stats->link_up;
                        if (mgp->link_state) {
-                               printk(KERN_INFO "myri10ge: %s: link up\n",
-                                      mgp->dev->name);
+                               if (netif_msg_link(mgp))
+                                       printk(KERN_INFO
+                                              "myri10ge: %s: link up\n",
+                                              mgp->dev->name);
                                netif_carrier_on(mgp->dev);
+                               mgp->link_changes++;
                        } else {
-                               printk(KERN_INFO "myri10ge: %s: link down\n",
-                                      mgp->dev->name);
+                               if (netif_msg_link(mgp))
+                                       printk(KERN_INFO
+                                              "myri10ge: %s: link down\n",
+                                              mgp->dev->name);
                                netif_carrier_off(mgp->dev);
+                               mgp->link_changes++;
                        }
                }
                if (mgp->rdma_tags_available !=
@@ -1289,7 +1311,8 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
        "serial_number", "tx_pkt_start", "tx_pkt_done",
        "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
        "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
-       "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered",
+       "link_changes", "link_up", "dropped_link_overflow",
+       "dropped_link_error_or_filtered", "dropped_multicast_filtered",
        "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
        "dropped_no_big_buffer"
 };
@@ -1341,16 +1364,31 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
        data[i++] = (unsigned int)mgp->stop_queue;
        data[i++] = (unsigned int)mgp->watchdog_resets;
        data[i++] = (unsigned int)mgp->tx_linearized;
+       data[i++] = (unsigned int)mgp->link_changes;
        data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
        data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
        data[i++] =
            (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
+       data[i++] =
+           (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered);
        data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
        data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
        data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
        data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
 }
 
+static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
+{
+       struct myri10ge_priv *mgp = netdev_priv(netdev);
+       mgp->msg_enable = value;
+}
+
+static u32 myri10ge_get_msglevel(struct net_device *netdev)
+{
+       struct myri10ge_priv *mgp = netdev_priv(netdev);
+       return mgp->msg_enable;
+}
+
 static struct ethtool_ops myri10ge_ethtool_ops = {
        .get_settings = myri10ge_get_settings,
        .get_drvinfo = myri10ge_get_drvinfo,
@@ -1371,7 +1409,9 @@ static struct ethtool_ops myri10ge_ethtool_ops = {
 #endif
        .get_strings = myri10ge_get_strings,
        .get_stats_count = myri10ge_get_stats_count,
-       .get_ethtool_stats = myri10ge_get_ethtool_stats
+       .get_ethtool_stats = myri10ge_get_ethtool_stats,
+       .set_msglevel = myri10ge_set_msglevel,
+       .get_msglevel = myri10ge_get_msglevel
 };
 
 static int myri10ge_allocate_rings(struct net_device *dev)
@@ -1439,7 +1479,7 @@ static int myri10ge_allocate_rings(struct net_device *dev)
        /* Fill the receive rings */
 
        for (i = 0; i <= mgp->rx_small.mask; i++) {
-               status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev,
+               status = myri10ge_getbuf(&mgp->rx_small, mgp,
                                         mgp->small_bytes, i);
                if (status) {
                        printk(KERN_ERR
@@ -1451,8 +1491,7 @@ static int myri10ge_allocate_rings(struct net_device *dev)
 
        for (i = 0; i <= mgp->rx_big.mask; i++) {
                status =
-                   myri10ge_getbuf(&mgp->rx_big, mgp->pdev,
-                                   dev->mtu + ETH_HLEN, i);
+                   myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i);
                if (status) {
                        printk(KERN_ERR
                               "myri10ge: %s: alloced only %d big bufs\n",
@@ -1648,9 +1687,11 @@ static int myri10ge_open(struct net_device *dev)
        }
 
        if (mgp->mtrr >= 0) {
-               mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000;
-               mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000;
-               mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000;
+               mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
+               mgp->rx_small.wc_fifo =
+                   (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
+               mgp->rx_big.wc_fifo =
+                   (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
        } else {
                mgp->tx.wc_fifo = NULL;
                mgp->rx_small.wc_fifo = NULL;
@@ -1686,7 +1727,21 @@ static int myri10ge_open(struct net_device *dev)
 
        cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
        cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
-       status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0);
+       cmd.data2 = sizeof(struct mcp_irq_data);
+       status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
+       if (status == -ENOSYS) {
+               dma_addr_t bus = mgp->fw_stats_bus;
+               bus += offsetof(struct mcp_irq_data, send_done_count);
+               cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
+               cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
+               status = myri10ge_send_cmd(mgp,
+                                          MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
+                                          &cmd, 0);
+               /* Firmware cannot support multicast without STATS_DMA_V2 */
+               mgp->fw_multicast_support = 0;
+       } else {
+               mgp->fw_multicast_support = 1;
+       }
        if (status) {
                printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
                       dev->name);
@@ -1841,7 +1896,8 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
        if (cnt > 0) {
                /* pad it to 64 bytes.  The src is 64 bytes bigger than it
                 * needs to be so that we don't overrun it */
-               myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64);
+               myri10ge_pio_copy(tx->wc_fifo + MXGEFW_ETH_SEND_OFFSET(cnt),
+                                 src, 64);
                mb();
        }
 }
@@ -2140,9 +2196,81 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
 
 static void myri10ge_set_multicast_list(struct net_device *dev)
 {
+       struct myri10ge_cmd cmd;
+       struct myri10ge_priv *mgp;
+       struct dev_mc_list *mc_list;
+       int err;
+
+       mgp = netdev_priv(dev);
        /* can be called from atomic contexts,
         * pass 1 to force atomicity in myri10ge_send_cmd() */
-       myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1);
+       myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
+
+       /* This firmware is known to not support multicast */
+       if (!mgp->fw_multicast_support)
+               return;
+
+       /* Disable multicast filtering */
+
+       err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
+       if (err != 0) {
+               printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_ENABLE_ALLMULTI,"
+                      " error status: %d\n", dev->name, err);
+               goto abort;
+       }
+
+       if (dev->flags & IFF_ALLMULTI) {
+               /* request to disable multicast filtering, so quit here */
+               return;
+       }
+
+       /* Flush the filters */
+
+       err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
+                               &cmd, 1);
+       if (err != 0) {
+               printk(KERN_ERR
+                      "myri10ge: %s: Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
+                      ", error status: %d\n", dev->name, err);
+               goto abort;
+       }
+
+       /* Walk the multicast list, and add each address */
+       for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
+               memcpy(&cmd.data0, &mc_list->dmi_addr, 4);
+               memcpy(&cmd.data1, ((char *)&mc_list->dmi_addr) + 4, 2);
+               cmd.data0 = htonl(cmd.data0);
+               cmd.data1 = htonl(cmd.data1);
+               err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
+                                       &cmd, 1);
+
+               if (err != 0) {
+                       printk(KERN_ERR "myri10ge: %s: Failed "
+                              "MXGEFW_JOIN_MULTICAST_GROUP, error status:"
+                              "%d\t", dev->name, err);
+                       printk(KERN_ERR "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+                              ((unsigned char *)&mc_list->dmi_addr)[0],
+                              ((unsigned char *)&mc_list->dmi_addr)[1],
+                              ((unsigned char *)&mc_list->dmi_addr)[2],
+                              ((unsigned char *)&mc_list->dmi_addr)[3],
+                              ((unsigned char *)&mc_list->dmi_addr)[4],
+                              ((unsigned char *)&mc_list->dmi_addr)[5]
+                           );
+                       goto abort;
+               }
+       }
+       /* Enable multicast filtering */
+       err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
+       if (err != 0) {
+               printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_DISABLE_ALLMULTI,"
+                      "error status: %d\n", dev->name, err);
+               goto abort;
+       }
+
+       return;
+
+abort:
+       return;
 }
 
 static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
@@ -2289,6 +2417,8 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
  */
 
 #define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE  0x0132
+#define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7
+#define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa
 
 static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
 {
@@ -2298,15 +2428,34 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
        mgp->fw_name = myri10ge_fw_unaligned;
 
        if (myri10ge_force_firmware == 0) {
+               int link_width, exp_cap;
+               u16 lnk;
+
+               exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
+               pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
+               link_width = (lnk >> 4) & 0x3f;
+
                myri10ge_enable_ecrc(mgp);
 
-               /* Check to see if the upstream bridge is known to
-                * provide aligned completions */
-               if (bridge
-                   /* ServerWorks HT2000/HT1000 */
-                   && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
-                   && bridge->device ==
-                   PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) {
+               /* Check to see if Link is less than 8 or if the
+                * upstream bridge is known to provide aligned
+                * completions */
+               if (link_width < 8) {
+                       dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
+                                link_width);
+                       mgp->tx.boundary = 4096;
+                       mgp->fw_name = myri10ge_fw_aligned;
+               } else if (bridge &&
+                          /* ServerWorks HT2000/HT1000 */
+                          ((bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
+                            && bridge->device ==
+                            PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE)
+                           /* All Intel E5000 PCIE ports */
+                           || (bridge->vendor == PCI_VENDOR_ID_INTEL
+                               && bridge->device >=
+                               PCI_DEVICE_ID_INTEL_E5000_PCIE23
+                               && bridge->device <=
+                               PCI_DEVICE_ID_INTEL_E5000_PCIE47))) {
                        dev_info(&mgp->pdev->dev,
                                 "Assuming aligned completions (0x%x:0x%x)\n",
                                 bridge->vendor, bridge->device);
@@ -2581,6 +2730,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
        mgp->pause = myri10ge_flow_control;
        mgp->intr_coal_delay = myri10ge_intr_coal_delay;
+       mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
        init_waitqueue_head(&mgp->down_wq);
 
        if (pci_enable_device(pdev)) {
index 0a6cae6..9519ae7 100644 (file)
@@ -91,7 +91,19 @@ struct mcp_kreq_ether_recv {
 
 /* Commands */
 
-#define MXGEFW_CMD_OFFSET 0xf80000
+#define        MXGEFW_BOOT_HANDOFF     0xfc0000
+#define        MXGEFW_BOOT_DUMMY_RDMA  0xfc01c0
+
+#define        MXGEFW_ETH_CMD          0xf80000
+#define        MXGEFW_ETH_SEND_4       0x200000
+#define        MXGEFW_ETH_SEND_1       0x240000
+#define        MXGEFW_ETH_SEND_2       0x280000
+#define        MXGEFW_ETH_SEND_3       0x2c0000
+#define        MXGEFW_ETH_RECV_SMALL   0x300000
+#define        MXGEFW_ETH_RECV_BIG     0x340000
+
+#define        MXGEFW_ETH_SEND(n)              (0x200000 + (((n) & 0x03) * 0x40000))
+#define        MXGEFW_ETH_SEND_OFFSET(n)       (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
 
 enum myri10ge_mcp_cmd_type {
        MXGEFW_CMD_NONE = 0,
@@ -154,7 +166,7 @@ enum myri10ge_mcp_cmd_type {
        MXGEFW_CMD_SET_MTU,
        MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET,  /* in microseconds */
        MXGEFW_CMD_SET_STATS_INTERVAL,  /* in microseconds */
-       MXGEFW_CMD_SET_STATS_DMA,
+       MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,      /* replaced by SET_STATS_DMA_V2 */
 
        MXGEFW_ENABLE_PROMISC,
        MXGEFW_DISABLE_PROMISC,
@@ -168,7 +180,26 @@ enum myri10ge_mcp_cmd_type {
         * data2       = RDMA length (MSH), WDMA length (LSH)
         * command return data = repetitions (MSH), 0.5-ms ticks (LSH)
         */
-       MXGEFW_DMA_TEST
+       MXGEFW_DMA_TEST,
+
+       MXGEFW_ENABLE_ALLMULTI,
+       MXGEFW_DISABLE_ALLMULTI,
+
+       /* returns MXGEFW_CMD_ERROR_MULTICAST
+        * if there is no room in the cache
+        * data0,MSH(data1) = multicast group address */
+       MXGEFW_JOIN_MULTICAST_GROUP,
+       /* returns MXGEFW_CMD_ERROR_MULTICAST
+        * if the address is not in the cache,
+        * or is equal to FF-FF-FF-FF-FF-FF
+        * data0,MSH(data1) = multicast group address */
+       MXGEFW_LEAVE_MULTICAST_GROUP,
+       MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
+
+       MXGEFW_CMD_SET_STATS_DMA_V2,
+       /* data0, data1 = bus addr,
+        * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
+        * adding new stuff to mcp_irq_data without changing the ABI */
 };
 
 enum myri10ge_mcp_cmd_status {
@@ -180,11 +211,17 @@ enum myri10ge_mcp_cmd_status {
        MXGEFW_CMD_ERROR_CLOSED,
        MXGEFW_CMD_ERROR_HASH_ERROR,
        MXGEFW_CMD_ERROR_BAD_PORT,
-       MXGEFW_CMD_ERROR_RESOURCES
+       MXGEFW_CMD_ERROR_RESOURCES,
+       MXGEFW_CMD_ERROR_MULTICAST
 };
 
-/* 40 Bytes */
+#define MXGEFW_OLD_IRQ_DATA_LEN 40
+
 struct mcp_irq_data {
+       /* add new counters at the beginning */
+       u32 future_use[5];
+       u32 dropped_multicast_filtered;
+       /* 40 Bytes */
        u32 send_done_count;
 
        u32 link_up;
index db0475a..2a46777 100644 (file)
@@ -54,8 +54,8 @@
 #include <asm/uaccess.h>
 
 #define DRV_NAME       "natsemi"
-#define DRV_VERSION    "2.0"
-#define DRV_RELDATE    "June 27, 2006"
+#define DRV_VERSION    "2.1"
+#define DRV_RELDATE    "Sept 11, 2006"
 
 #define RX_OFFSET      2
 
@@ -2387,9 +2387,6 @@ static void __set_rx_mode(struct net_device *dev)
        u32 rx_mode;
 
        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-                       dev->name);
                rx_mode = RxFilterEnable | AcceptBroadcast
                        | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
        } else if ((dev->mc_count > multicast_filter_limit)
@@ -3246,7 +3243,7 @@ static int __init natsemi_init_mod (void)
        printk(version);
 #endif
 
-       return pci_module_init (&natsemi_driver);
+       return pci_register_driver(&natsemi_driver);
 }
 
 static void __exit natsemi_exit_mod (void)
index 34bdba9..654b477 100644 (file)
@@ -702,7 +702,7 @@ static int __init ne2k_pci_init(void)
 #ifdef MODULE
        printk(version);
 #endif
-       return pci_module_init (&ne2k_driver);
+       return pci_register_driver(&ne2k_driver);
 }
 
 
index b1311ae..30ed9a5 100644 (file)
@@ -17,7 +17,6 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-#include <linux/config.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
index 0e76859..0dedd34 100644 (file)
@@ -2178,7 +2178,7 @@ static struct pci_driver driver = {
 static int __init ns83820_init(void)
 {
        printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n");
-       return pci_module_init(&driver);
+       return pci_register_driver(&driver);
 }
 
 static void __exit ns83820_exit(void)
index e0e2939..dea843a 100644 (file)
@@ -98,7 +98,7 @@ IVc. Errata
 #include <linux/crc32.h>
 #include <asm/io.h>
 
-#define NETDRV_VERSION         "1.0.0"
+#define NETDRV_VERSION         "1.0.1"
 #define MODNAME                        "netdrv"
 #define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
 #define PFX                    MODNAME ": "
@@ -1853,9 +1853,6 @@ static void netdrv_set_rx_mode (struct net_device *dev)
 
        /* Note: do not reorder, GCC is clever about common statements. */
        if (dev->flags & IFF_PROMISC) {
-               /* Unconditionally log net taps. */
-               printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-                       dev->name);
                rx_mode =
                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
                    AcceptAllPhys;
@@ -1963,7 +1960,7 @@ static int __init netdrv_init_module (void)
 #ifdef MODULE
        printk(version);
 #endif
-       return pci_module_init (&netdrv_pci_driver);
+       return pci_register_driver(&netdrv_pci_driver);
 }
 
 
index 297e9f8..c54f6a7 100644 (file)
@@ -771,6 +771,7 @@ static struct pcmcia_device_id axnet_ids[] = {
        PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
        PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106),
        PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab),
+       PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), 
        PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc),
        PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef),
        PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef),
@@ -786,8 +787,6 @@ static struct pcmcia_device_id axnet_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
        PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
        PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6,  0xab9be5ef),
-       /* this is not specific enough */
-       /* PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), */
        PCMCIA_DEVICE_NULL,
 };
 MODULE_DEVICE_TABLE(pcmcia, axnet_ids);
index ea93b8f..74211af 100644 (file)
@@ -29,7 +29,7 @@
 ======================================================================*/
 
 #define DRV_NAME       "fmvj18x_cs"
-#define DRV_VERSION    "2.8"
+#define DRV_VERSION    "2.9"
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -1193,8 +1193,6 @@ static void set_rx_mode(struct net_device *dev)
        outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
 
     if (dev->flags & IFF_PROMISC) {
-       /* Unconditionally log net taps. */
-       printk("%s: Promiscuous mode enabled.\n", dev->name);
        memset(mc_filter, 0xff, sizeof(mc_filter));
        outb(3, ioaddr + RX_MODE);      /* Enable promiscuous mode */
     } else if (dev->mc_count > MC_FILTERBREAK
index 0ecebfc..cc0dcc9 100644 (file)
@@ -654,11 +654,8 @@ static int pcnet_config(struct pcmcia_device *link)
     SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
 
     if (info->flags & (IS_DL10019|IS_DL10022)) {
-       u_char id = inb(dev->base_addr + 0x1a);
        dev->do_ioctl = &ei_ioctl;
        mii_phy_probe(dev);
-       if ((id == 0x30) && !info->pna_phy && (info->eth_phy == 4))
-           info->eth_phy = 0;
     }
 
     link->dev_node = &info->node;
@@ -821,15 +818,6 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
     }
 }
 
-static void mdio_reset(kio_addr_t addr, int phy_id)
-{
-    outb_p(0x08, addr);
-    outb_p(0x0c, addr);
-    outb_p(0x08, addr);
-    outb_p(0x0c, addr);
-    outb_p(0x00, addr);
-}
-
 /*======================================================================
 
     EEPROM access routines for DL10019 and DL10022 based cards
@@ -942,7 +930,8 @@ static void set_misc_reg(struct net_device *dev)
     }
     if (info->flags & IS_DL10022) {
        if (info->flags & HAS_MII) {
-           mdio_reset(nic_base + DLINK_GPIO, info->eth_phy);
+           /* Advertise 100F, 100H, 10F, 10H */
+           mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
            /* Restart MII autonegotiation */
            mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
            mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
index a73d545..3fb369f 100644 (file)
@@ -80,14 +80,14 @@ INT_MODULE_PARM(if_port, 0);
 #ifdef PCMCIA_DEBUG
 INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
 static const char *version =
-"smc91c92_cs.c 0.09 1996/8/4 Donald Becker, becker@scyld.com.\n";
+"smc91c92_cs.c 1.123 2006/11/09 Donald Becker, becker@scyld.com.\n";
 #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
 #else
 #define DEBUG(n, args...)
 #endif
 
 #define DRV_NAME       "smc91c92_cs"
-#define DRV_VERSION    "1.122"
+#define DRV_VERSION    "1.123"
 
 /*====================================================================*/
 
@@ -1780,7 +1780,6 @@ static void set_rx_mode(struct net_device *dev)
     u_short rx_cfg_setting;
 
     if (dev->flags & IFF_PROMISC) {
-       printk(KERN_NOTICE "%s: setting Rx mode to promiscuous.\n", dev->name);
        rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
     } else if (dev->flags & IFF_ALLMULTI)
        rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
index d50bcb8..5e26fe8 100644 (file)
@@ -2978,7 +2978,7 @@ static int __init pcnet32_init_module(void)
                tx_start = tx_start_pt;
 
        /* find the PCI devices */
-       if (!pci_module_init(&pcnet32_driver))
+       if (!pci_register_driver(&pcnet32_driver))
                pcnet32_have_pci = 1;
 
        /* should we find any remaining VLbus devices ? */
index 25e31fb..b1d8ed4 100644 (file)
@@ -14,7 +14,6 @@
  *
  */
 
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mii.h>
index ffd215d..792716b 100644 (file)
@@ -12,7 +12,6 @@
  *
  */
 
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mii.h>
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
new file mode 100644 (file)
index 0000000..c729aee
--- /dev/null
@@ -0,0 +1,3537 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+#include "qla3xxx.h"
+
+#define DRV_NAME       "qla3xxx"
+#define DRV_STRING     "QLogic ISP3XXX Network Driver"
+#define DRV_VERSION    "v2.02.00-k36"
+#define PFX            DRV_NAME " "
+
+static const char ql3xxx_driver_name[] = DRV_NAME;
+static const char ql3xxx_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg
+    = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1;         /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int msi;
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
+
+static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
+       {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+       /* required last entry */
+       {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
+
+/*
+ * Caller must take hw_lock.
+ */
+static int ql_sem_spinlock(struct ql3_adapter *qdev,
+                           u32 sem_mask, u32 sem_bits)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       u32 value;
+       unsigned int seconds = 3;
+
+       do {
+               writel((sem_mask | sem_bits),
+                      &port_regs->CommonRegs.semaphoreReg);
+               value = readl(&port_regs->CommonRegs.semaphoreReg);
+               if ((value & (sem_mask >> 16)) == sem_bits)
+                       return 0;
+               ssleep(1);
+       } while(--seconds);
+       return -1;
+}
+
+static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
+       readl(&port_regs->CommonRegs.semaphoreReg);
+}
+
+static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       u32 value;
+
+       writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
+       value = readl(&port_regs->CommonRegs.semaphoreReg);
+       return ((value & (sem_mask >> 16)) == sem_bits);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
+{
+       int i = 0;
+
+       while (1) {
+               if (!ql_sem_lock(qdev,
+                                QL_DRVR_SEM_MASK,
+                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+                                 * 2) << 1)) {
+                       if (i < 10) {
+                               ssleep(1);
+                               i++;
+                       } else {
+                               printk(KERN_ERR PFX "%s: Timed out waiting for "
+                                      "driver lock...\n",
+                                      qdev->ndev->name);
+                               return 0;
+                       }
+               } else {
+                       printk(KERN_DEBUG PFX
+                              "%s: driver lock acquired.\n",
+                              qdev->ndev->name);
+                       return 1;
+               }
+       }
+}
+
+static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+       writel(((ISP_CONTROL_NP_MASK << 16) | page),
+                       &port_regs->CommonRegs.ispControlStatus);
+       readl(&port_regs->CommonRegs.ispControlStatus);
+       qdev->current_page = page;
+}
+
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
+                             u32 __iomem * reg)
+{
+       u32 value;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       value = readl(reg);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       return value;
+}
+
+static u32 ql_read_common_reg(struct ql3_adapter *qdev,
+                             u32 __iomem * reg)
+{
+       return readl(reg);
+}
+
+static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+       u32 value;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       if (qdev->current_page != 0)
+               ql_set_register_page(qdev,0);
+       value = readl(reg);
+
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return value;
+}
+
+static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+       if (qdev->current_page != 0)
+               ql_set_register_page(qdev,0);
+       return readl(reg);
+}
+
+static void ql_write_common_reg_l(struct ql3_adapter *qdev,
+                               u32 * reg, u32 value)
+{
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       writel(value, (u32 *) reg);
+       readl(reg);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return;
+}
+
+static void ql_write_common_reg(struct ql3_adapter *qdev,
+                               u32 * reg, u32 value)
+{
+       writel(value, (u32 *) reg);
+       readl(reg);
+       return;
+}
+
+static void ql_write_page0_reg(struct ql3_adapter *qdev,
+                              u32 * reg, u32 value)
+{
+       if (qdev->current_page != 0)
+               ql_set_register_page(qdev,0);
+       writel(value, (u32 *) reg);
+       readl(reg);
+       return;
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page1_reg(struct ql3_adapter *qdev,
+                              u32 * reg, u32 value)
+{
+       if (qdev->current_page != 1)
+               ql_set_register_page(qdev,1);
+       writel(value, (u32 *) reg);
+       readl(reg);
+       return;
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page2_reg(struct ql3_adapter *qdev,
+                              u32 * reg, u32 value)
+{
+       if (qdev->current_page != 2)
+               ql_set_register_page(qdev,2);
+       writel(value, (u32 *) reg);
+       readl(reg);
+       return;
+}
+
+static void ql_disable_interrupts(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+       ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+                           (ISP_IMR_ENABLE_INT << 16));
+
+}
+
+static void ql_enable_interrupts(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+       ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+                           ((0xff << 16) | ISP_IMR_ENABLE_INT));
+
+}
+
+static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
+                                           struct ql_rcv_buf_cb *lrg_buf_cb)
+{
+       u64 map;
+       lrg_buf_cb->next = NULL;
+
+       if (qdev->lrg_buf_free_tail == NULL) {  /* The list is empty  */
+               qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
+       } else {
+               qdev->lrg_buf_free_tail->next = lrg_buf_cb;
+               qdev->lrg_buf_free_tail = lrg_buf_cb;
+       }
+
+       if (!lrg_buf_cb->skb) {
+               lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
+               if (unlikely(!lrg_buf_cb->skb)) {
+                       printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
+                              qdev->ndev->name);
+                       qdev->lrg_buf_skb_check++;
+               } else {
+                       /*
+                        * We save some space to copy the ethhdr from first
+                        * buffer
+                        */
+                       skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+                       map = pci_map_single(qdev->pdev,
+                                            lrg_buf_cb->skb->data,
+                                            qdev->lrg_buffer_len -
+                                            QL_HEADER_SPACE,
+                                            PCI_DMA_FROMDEVICE);
+                       lrg_buf_cb->buf_phy_addr_low =
+                           cpu_to_le32(LS_64BITS(map));
+                       lrg_buf_cb->buf_phy_addr_high =
+                           cpu_to_le32(MS_64BITS(map));
+                       pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+                       pci_unmap_len_set(lrg_buf_cb, maplen,
+                                         qdev->lrg_buffer_len -
+                                         QL_HEADER_SPACE);
+               }
+       }
+
+       qdev->lrg_buf_free_count++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
+                                                          *qdev)
+{
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+
+       if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
+               if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
+                       qdev->lrg_buf_free_tail = NULL;
+               qdev->lrg_buf_free_count--;
+       }
+
+       return lrg_buf_cb;
+}
+
+static u32 addrBits = EEPROM_NO_ADDR_BITS;
+static u32 dataBits = EEPROM_NO_DATA_BITS;
+
+static void fm93c56a_deselect(struct ql3_adapter *qdev);
+static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
+                           unsigned short *value);
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_select(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
+{
+       int i;
+       u32 mask;
+       u32 dataBit;
+       u32 previousBit;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       /* Clock in a zero, then do the start bit */
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                           AUBURN_EEPROM_DO_1);
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           ISP_NVRAM_MASK | qdev->
+                           eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+                           AUBURN_EEPROM_CLK_RISE);
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           ISP_NVRAM_MASK | qdev->
+                           eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+                           AUBURN_EEPROM_CLK_FALL);
+
+       mask = 1 << (FM93C56A_CMD_BITS - 1);
+       /* Force the previous data bit to be different */
+       previousBit = 0xffff;
+       for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+               dataBit =
+                   (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
+               if (previousBit != dataBit) {
+                       /*
+                        * If the bit changed, then change the DO state to
+                        * match
+                        */
+                       ql_write_common_reg(qdev,
+                                           &port_regs->CommonRegs.
+                                           serialPortInterfaceReg,
+                                           ISP_NVRAM_MASK | qdev->
+                                           eeprom_cmd_data | dataBit);
+                       previousBit = dataBit;
+               }
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   serialPortInterfaceReg,
+                                   ISP_NVRAM_MASK | qdev->
+                                   eeprom_cmd_data | dataBit |
+                                   AUBURN_EEPROM_CLK_RISE);
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   serialPortInterfaceReg,
+                                   ISP_NVRAM_MASK | qdev->
+                                   eeprom_cmd_data | dataBit |
+                                   AUBURN_EEPROM_CLK_FALL);
+               cmd = cmd << 1;
+       }
+
+       mask = 1 << (addrBits - 1);
+       /* Force the previous data bit to be different */
+       previousBit = 0xffff;
+       for (i = 0; i < addrBits; i++) {
+               dataBit =
+                   (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
+                   AUBURN_EEPROM_DO_0;
+               if (previousBit != dataBit) {
+                       /*
+                        * If the bit changed, then change the DO state to
+                        * match
+                        */
+                       ql_write_common_reg(qdev,
+                                           &port_regs->CommonRegs.
+                                           serialPortInterfaceReg,
+                                           ISP_NVRAM_MASK | qdev->
+                                           eeprom_cmd_data | dataBit);
+                       previousBit = dataBit;
+               }
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   serialPortInterfaceReg,
+                                   ISP_NVRAM_MASK | qdev->
+                                   eeprom_cmd_data | dataBit |
+                                   AUBURN_EEPROM_CLK_RISE);
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   serialPortInterfaceReg,
+                                   ISP_NVRAM_MASK | qdev->
+                                   eeprom_cmd_data | dataBit |
+                                   AUBURN_EEPROM_CLK_FALL);
+               eepromAddr = eepromAddr << 1;
+       }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_deselect(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
+{
+       int i;
+       u32 data = 0;
+       u32 dataBit;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       /* Read the data bits */
+       /* The first bit is a dummy.  Clock right over it. */
+       for (i = 0; i < dataBits; i++) {
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   serialPortInterfaceReg,
+                                   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   AUBURN_EEPROM_CLK_RISE);
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   serialPortInterfaceReg,
+                                   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   AUBURN_EEPROM_CLK_FALL);
+               dataBit =
+                   (ql_read_common_reg
+                    (qdev,
+                     &port_regs->CommonRegs.
+                     serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+               data = (data << 1) | dataBit;
+       }
+       *value = (u16) data;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void eeprom_readword(struct ql3_adapter *qdev,
+                           u32 eepromAddr, unsigned short *value)
+{
+       fm93c56a_select(qdev);
+       fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
+       fm93c56a_datain(qdev, value);
+       fm93c56a_deselect(qdev);
+}
+
+static void ql_swap_mac_addr(u8 * macAddress)
+{
+#ifdef __BIG_ENDIAN
+       u8 temp;
+       temp = macAddress[0];
+       macAddress[0] = macAddress[1];
+       macAddress[1] = temp;
+       temp = macAddress[2];
+       macAddress[2] = macAddress[3];
+       macAddress[3] = temp;
+       temp = macAddress[4];
+       macAddress[4] = macAddress[5];
+       macAddress[5] = temp;
+#endif
+}
+
+static int ql_get_nvram_params(struct ql3_adapter *qdev)
+{
+       u16 *pEEPROMData;
+       u16 checksum = 0;
+       u32 index;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       pEEPROMData = (u16 *) & qdev->nvram_data;
+       qdev->eeprom_cmd_data = 0;
+       if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 10)) {
+               printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
+                       __func__);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               return -1;
+       }
+
+       for (index = 0; index < EEPROM_SIZE; index++) {
+               eeprom_readword(qdev, index, pEEPROMData);
+               checksum += *pEEPROMData;
+               pEEPROMData++;
+       }
+       ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
+
+       if (checksum != 0) {
+               printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
+                      qdev->ndev->name, checksum);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               return -1;
+       }
+
+       /*
+        * We have a problem with endianness for the MAC addresses
+        * and the two 8-bit values version, and numPorts.  We
+        * have to swap them on big endian systems.
+        */
+       ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
+       ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
+       ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
+       ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
+       pEEPROMData = (u16 *) & qdev->nvram_data.version;
+       *pEEPROMData = le16_to_cpu(*pEEPROMData);
+
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return checksum;
+}
+
+static const u32 PHYAddr[2] = {
+       PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
+};
+
+static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 temp;
+       int count = 1000;
+
+       while (count) {
+               temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
+               if (!(temp & MAC_MII_STATUS_BSY))
+                       return 0;
+               udelay(10);
+               count--;
+       }
+       return -1;
+}
+
+static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 scanControl;
+
+       if (qdev->numPorts > 1) {
+               /* Auto scan will cycle through multiple ports */
+               scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
+       } else {
+               scanControl = MAC_MII_CONTROL_SC;
+       }
+
+       /*
+        * Scan register 1 of PHY/PETBI,
+        * Set up to scan both devices
+        * The autoscan starts from the first register, completes
+        * the last one before rolling over to the first
+        */
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          PHYAddr[0] | MII_SCAN_REGISTER);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (scanControl) |
+                          ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
+}
+
+static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
+{
+       u8 ret;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                                       qdev->mem_map_registers;
+
+       /* See if scan mode is enabled before we turn it off */
+       if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
+           (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
+               /* Scan is enabled */
+               ret = 1;
+       } else {
+               /* Scan is disabled */
+               ret = 0;
+       }
+
+       /*
+        * When disabling scan mode you must first change the MII register
+        * address
+        */
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          PHYAddr[0] | MII_SCAN_REGISTER);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
+                            MAC_MII_CONTROL_RC) << 16));
+
+       return ret;
+}
+
+static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
+                              u16 regAddr, u16 value, u32 mac_index)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u8 scanWasEnabled;
+
+       scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s Timed out waiting for management port to "
+                              "get free before issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          PHYAddr[mac_index] | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+       /* Wait for write to complete 9/10/04 SJP */
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Timed out waiting for management port to"
+                              "get free before issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       if (scanWasEnabled)
+               ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
+                             u16 * value, u32 mac_index)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u8 scanWasEnabled;
+       u32 temp;
+
+       scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Timed out waiting for management port to "
+                              "get free before issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          PHYAddr[mac_index] | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16));
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+       /* Wait for the read to complete */
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Timed out waiting for management port to "
+                              "get free after issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+       *value = (u16) temp;
+
+       if (scanWasEnabled)
+               ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Timed out waiting for management port to "
+                              "get free before issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          qdev->PHYAddr | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+       /* Wait for write to complete. */
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Timed out waiting for management port to "
+                              "get free before issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
+{
+       u32 temp;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       ql_mii_disable_scan_mode(qdev);
+
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Timed out waiting for management port to "
+                              "get free before issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+                          qdev->PHYAddr | regAddr);
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16));
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+       /* Wait for the read to complete */
+       if (ql_wait_for_mii_ready(qdev)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Timed out waiting for management port to "
+                              "get free before issuing command.\n",
+                              qdev->ndev->name);
+               return -1;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+       *value = (u16) temp;
+
+       ql_mii_enable_scan_mode(qdev);
+
+       return 0;
+}
+
+static void ql_petbi_reset(struct ql3_adapter *qdev)
+{
+       ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
+}
+
+static void ql_petbi_start_neg(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       /* Enable Auto-negotiation sense */
+       ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
+       reg |= PETBI_TBI_AUTO_SENSE;
+       ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
+
+       ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
+                        PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
+
+       ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
+                        PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+                        PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
+
+}
+
+static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+       ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
+                           mac_index);
+}
+
+static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+       u16 reg;
+
+       /* Enable Auto-negotiation sense */
+       ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
+       reg |= PETBI_TBI_AUTO_SENSE;
+       ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
+
+       ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
+                           PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
+
+       ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
+                           PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+                           PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
+                           mac_index);
+}
+
+static void ql_petbi_init(struct ql3_adapter *qdev)
+{
+       ql_petbi_reset(qdev);
+       ql_petbi_start_neg(qdev);
+}
+
+static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+       ql_petbi_reset_ex(qdev, mac_index);
+       ql_petbi_start_neg_ex(qdev, mac_index);
+}
+
+static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
+               return 0;
+
+       return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
+}
+
+static int ql_phy_get_speed(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+               return 0;
+
+       reg = (((reg & 0x18) >> 3) & 3);
+
+       if (reg == 2)
+               return SPEED_1000;
+       else if (reg == 1)
+               return SPEED_100;
+       else if (reg == 0)
+               return SPEED_10;
+       else
+               return -1;
+}
+
+static int ql_is_full_dup(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+               return 0;
+
+       return (reg & PHY_AUX_DUPLEX_STAT) != 0;
+}
+
+static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
+{
+       u16 reg;
+
+       if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
+               return 0;
+
+       return (reg & PHY_NEG_PAUSE) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
+       else
+               value = (MAC_CONFIG_REG_PE << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
+       else
+               value = (MAC_CONFIG_REG_SR << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
+       else
+               value = (MAC_CONFIG_REG_GM << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
+       else
+               value = (MAC_CONFIG_REG_FD << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 value;
+
+       if (enable)
+               value =
+                   ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
+                    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
+       else
+               value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
+
+       if (qdev->mac_index)
+               ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+       else
+               ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_fiber(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_SM0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_SM1;
+               break;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       return (temp & bitToCheck) != 0;
+}
+
+static int ql_is_auto_cfg(struct ql3_adapter *qdev)
+{
+       u16 reg;
+       ql_mii_read_reg(qdev, 0x00, &reg);
+       return (reg & 0x1000) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_AC0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_AC1;
+               break;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if (temp & bitToCheck) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_INFO PFX
+                              "%s: Auto-Negotiate complete.\n",
+                              qdev->ndev->name);
+               return 1;
+       } else {
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Auto-Negotiate incomplete.\n",
+                              qdev->ndev->name);
+               return 0;
+       }
+}
+
+/*
+ *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
+ */
+static int ql_is_neg_pause(struct ql3_adapter *qdev)
+{
+       if (ql_is_fiber(qdev))
+               return ql_is_petbi_neg_pause(qdev);
+       else
+               return ql_is_phy_neg_pause(qdev);
+}
+
+static int ql_auto_neg_error(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_AE0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_AE1;
+               break;
+       }
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       return (temp & bitToCheck) != 0;
+}
+
+static u32 ql_get_link_speed(struct ql3_adapter *qdev)
+{
+       if (ql_is_fiber(qdev))
+               return SPEED_1000;
+       else
+               return ql_phy_get_speed(qdev);
+}
+
+static int ql_is_link_full_dup(struct ql3_adapter *qdev)
+{
+       if (ql_is_fiber(qdev))
+               return 1;
+       else
+               return ql_is_full_dup(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = ISP_CONTROL_LINK_DN_0;
+               break;
+       case 1:
+               bitToCheck = ISP_CONTROL_LINK_DN_1;
+               break;
+       }
+
+       temp =
+           ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+       return (temp & bitToCheck) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       switch (qdev->mac_index) {
+       case 0:
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.ispControlStatus,
+                                   (ISP_CONTROL_LINK_DN_0) |
+                                   (ISP_CONTROL_LINK_DN_0 << 16));
+               break;
+
+       case 1:
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.ispControlStatus,
+                                   (ISP_CONTROL_LINK_DN_1) |
+                                   (ISP_CONTROL_LINK_DN_1 << 16));
+               break;
+
+       default:
+               return 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
+                                        u32 mac_index)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp;
+
+       switch (mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_F1_ENABLED;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_F3_ENABLED;
+               break;
+       default:
+               break;
+       }
+
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if (temp & bitToCheck) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_DEBUG PFX
+                              "%s: is not link master.\n", qdev->ndev->name);
+               return 0;
+       } else {
+               if (netif_msg_link(qdev))
+                       printk(KERN_DEBUG PFX
+                              "%s: is link master.\n", qdev->ndev->name);
+               return 1;
+       }
+}
+
+static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+       ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
+}
+
+static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+       u16 reg;
+
+       ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
+                           PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
+
+       ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
+       ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
+                           mac_index);
+}
+
+static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
+{
+       ql_phy_reset_ex(qdev, mac_index);
+       ql_phy_start_neg_ex(qdev, mac_index);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static u32 ql_get_link_state(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       u32 bitToCheck = 0;
+       u32 temp, linkState;
+
+       switch (qdev->mac_index) {
+       case 0:
+               bitToCheck = PORT_STATUS_UP0;
+               break;
+       case 1:
+               bitToCheck = PORT_STATUS_UP1;
+               break;
+       }
+       temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if (temp & bitToCheck) {
+               linkState = LS_UP;
+       } else {
+               linkState = LS_DOWN;
+               if (netif_msg_link(qdev))
+                       printk(KERN_WARNING PFX
+                              "%s: Link is down.\n", qdev->ndev->name);
+       }
+       return linkState;
+}
+
+static int ql_port_start(struct ql3_adapter *qdev)
+{
+       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return -1;
+
+       if (ql_is_fiber(qdev)) {
+               ql_petbi_init(qdev);
+       } else {
+               /* Copper port */
+               ql_phy_init_ex(qdev, qdev->mac_index);
+       }
+
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       return 0;
+}
+
+static int ql_finish_auto_neg(struct ql3_adapter *qdev)
+{
+
+       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return -1;
+
+       if (!ql_auto_neg_error(qdev)) {
+               if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+                       /* configure the MAC */
+                       if (netif_msg_link(qdev))
+                               printk(KERN_DEBUG PFX
+                                      "%s: Configuring link.\n",
+                                      qdev->ndev->
+                                      name);
+                       ql_mac_cfg_soft_reset(qdev, 1);
+                       ql_mac_cfg_gig(qdev,
+                                      (ql_get_link_speed
+                                       (qdev) ==
+                                       SPEED_1000));
+                       ql_mac_cfg_full_dup(qdev,
+                                           ql_is_link_full_dup
+                                           (qdev));
+                       ql_mac_cfg_pause(qdev,
+                                        ql_is_neg_pause
+                                        (qdev));
+                       ql_mac_cfg_soft_reset(qdev, 0);
+
+                       /* enable the MAC */
+                       if (netif_msg_link(qdev))
+                               printk(KERN_DEBUG PFX
+                                      "%s: Enabling mac.\n",
+                                      qdev->ndev->
+                                              name);
+                       ql_mac_enable(qdev, 1);
+               }
+
+               if (netif_msg_link(qdev))
+                       printk(KERN_DEBUG PFX
+                              "%s: Change port_link_state LS_DOWN to LS_UP.\n",
+                              qdev->ndev->name);
+               qdev->port_link_state = LS_UP;
+               netif_start_queue(qdev->ndev);
+               netif_carrier_on(qdev->ndev);
+               if (netif_msg_link(qdev))
+                       printk(KERN_INFO PFX
+                              "%s: Link is up at %d Mbps, %s duplex.\n",
+                              qdev->ndev->name,
+                              ql_get_link_speed(qdev),
+                              ql_is_link_full_dup(qdev)
+                              ? "full" : "half");
+
+       } else {        /* Remote error detected */
+
+               if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+                       if (netif_msg_link(qdev))
+                               printk(KERN_DEBUG PFX
+                                      "%s: Remote error detected. "
+                                      "Calling ql_port_start().\n",
+                                      qdev->ndev->
+                                      name);
+                       /*
+                        * ql_port_start() is shared code and needs
+                        * to lock the PHY on it's own.
+                        */
+                       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+                       if(ql_port_start(qdev)) {/* Restart port */
+                               return -1;
+                       } else
+                               return 0;
+               }
+       }
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       return 0;
+}
+
+static void ql_link_state_machine(struct ql3_adapter *qdev)
+{
+       u32 curr_link_state;
+       unsigned long hw_flags;
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       curr_link_state = ql_get_link_state(qdev);
+
+       if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
+               if (netif_msg_link(qdev))
+                       printk(KERN_INFO PFX
+                              "%s: Reset in progress, skip processing link "
+                              "state.\n", qdev->ndev->name);
+               return;
+       }
+
+       switch (qdev->port_link_state) {
+       default:
+               if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+                       ql_port_start(qdev);
+               }
+               qdev->port_link_state = LS_DOWN;
+               /* Fall Through */
+
+       case LS_DOWN:
+               if (netif_msg_link(qdev))
+                       printk(KERN_DEBUG PFX
+                              "%s: port_link_state = LS_DOWN.\n",
+                              qdev->ndev->name);
+               if (curr_link_state == LS_UP) {
+                       if (netif_msg_link(qdev))
+                               printk(KERN_DEBUG PFX
+                                      "%s: curr_link_state = LS_UP.\n",
+                                      qdev->ndev->name);
+                       if (ql_is_auto_neg_complete(qdev))
+                               ql_finish_auto_neg(qdev);
+
+                       if (qdev->port_link_state == LS_UP)
+                               ql_link_down_detect_clear(qdev);
+
+               }
+               break;
+
+       case LS_UP:
+               /*
+                * See if the link is currently down or went down and came
+                * back up
+                */
+               if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
+                       if (netif_msg_link(qdev))
+                               printk(KERN_INFO PFX "%s: Link is down.\n",
+                                      qdev->ndev->name);
+                       qdev->port_link_state = LS_DOWN;
+               }
+               break;
+       }
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_get_phy_owner(struct ql3_adapter *qdev)
+{
+       if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
+               set_bit(QL_LINK_MASTER,&qdev->flags);
+       else
+               clear_bit(QL_LINK_MASTER,&qdev->flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_init_scan_mode(struct ql3_adapter *qdev)
+{
+       ql_mii_enable_scan_mode(qdev);
+
+       if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+               if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
+                       ql_petbi_init_ex(qdev, qdev->mac_index);
+       } else {
+               if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
+                       ql_phy_init_ex(qdev, qdev->mac_index);
+       }
+}
+
+/*
+ * MII_Setup needs to be called before taking the PHY out of reset so that the
+ * management interface clock speed can be set properly.  It would be better if
+ * we had a way to disable MDC until after the PHY is out of reset, but we
+ * don't have that capability.
+ */
+static int ql_mii_setup(struct ql3_adapter *qdev)
+{
+       u32 reg;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+
+       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return -1;
+
+       /* Divide 125MHz clock by 28 to meet PHY timing requirements */
+       reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
+
+       ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+                          reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
+
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       return 0;
+}
+
+static u32 ql_supported_modes(struct ql3_adapter *qdev)
+{
+       u32 supported;
+
+       if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+               supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
+                   | SUPPORTED_Autoneg;
+       } else {
+               supported = SUPPORTED_10baseT_Half
+                   | SUPPORTED_10baseT_Full
+                   | SUPPORTED_100baseT_Half
+                   | SUPPORTED_100baseT_Full
+                   | SUPPORTED_1000baseT_Half
+                   | SUPPORTED_1000baseT_Full
+                   | SUPPORTED_Autoneg | SUPPORTED_TP;
+       }
+
+       return supported;
+}
+
+static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
+{
+       int status;
+       unsigned long hw_flags;
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return 0;
+       status = ql_is_auto_cfg(qdev);
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return status;
+}
+
+static u32 ql_get_speed(struct ql3_adapter *qdev)
+{
+       u32 status;
+       unsigned long hw_flags;
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return 0;
+       status = ql_get_link_speed(qdev);
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return status;
+}
+
+static int ql_get_full_dup(struct ql3_adapter *qdev)
+{
+       int status;
+       unsigned long hw_flags;
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7))
+               return 0;
+       status = ql_is_link_full_dup(qdev);
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       return status;
+}
+
+
+static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+
+       ecmd->transceiver = XCVR_INTERNAL;
+       ecmd->supported = ql_supported_modes(qdev);
+
+       if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+               ecmd->port = PORT_FIBRE;
+       } else {
+               ecmd->port = PORT_TP;
+               ecmd->phy_address = qdev->PHYAddr;
+       }
+       ecmd->advertising = ql_supported_modes(qdev);
+       ecmd->autoneg = ql_get_auto_cfg_status(qdev);
+       ecmd->speed = ql_get_speed(qdev);
+       ecmd->duplex = ql_get_full_dup(qdev);
+       return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+                          struct ethtool_drvinfo *drvinfo)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
+       strncpy(drvinfo->version, ql3xxx_driver_version, 32);
+       strncpy(drvinfo->fw_version, "N/A", 32);
+       strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+       drvinfo->n_stats = 0;
+       drvinfo->testinfo_len = 0;
+       drvinfo->regdump_len = 0;
+       drvinfo->eedump_len = 0;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       qdev->msg_enable = value;
+}
+
+static struct ethtool_ops ql3xxx_ethtool_ops = {
+       .get_settings = ql_get_settings,
+       .get_drvinfo = ql_get_drvinfo,
+       .get_perm_addr = ethtool_op_get_perm_addr,
+       .get_link = ethtool_op_get_link,
+       .get_msglevel = ql_get_msglevel,
+       .set_msglevel = ql_set_msglevel,
+};
+
+static int ql_populate_free_queue(struct ql3_adapter *qdev)
+{
+       struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+       u64 map;
+
+       while (lrg_buf_cb) {
+               if (!lrg_buf_cb->skb) {
+                       lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
+                       if (unlikely(!lrg_buf_cb->skb)) {
+                               printk(KERN_DEBUG PFX
+                                      "%s: Failed dev_alloc_skb().\n",
+                                      qdev->ndev->name);
+                               break;
+                       } else {
+                               /*
+                                * We save some space to copy the ethhdr from
+                                * first buffer
+                                */
+                               skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+                               map = pci_map_single(qdev->pdev,
+                                                    lrg_buf_cb->skb->data,
+                                                    qdev->lrg_buffer_len -
+                                                    QL_HEADER_SPACE,
+                                                    PCI_DMA_FROMDEVICE);
+                               lrg_buf_cb->buf_phy_addr_low =
+                                   cpu_to_le32(LS_64BITS(map));
+                               lrg_buf_cb->buf_phy_addr_high =
+                                   cpu_to_le32(MS_64BITS(map));
+                               pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+                               pci_unmap_len_set(lrg_buf_cb, maplen,
+                                                 qdev->lrg_buffer_len -
+                                                 QL_HEADER_SPACE);
+                               --qdev->lrg_buf_skb_check;
+                               if (!qdev->lrg_buf_skb_check)
+                                       return 1;
+                       }
+               }
+               lrg_buf_cb = lrg_buf_cb->next;
+       }
+       return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
+{
+       struct bufq_addr_element *lrg_buf_q_ele;
+       int i;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+
+       if ((qdev->lrg_buf_free_count >= 8)
+           && (qdev->lrg_buf_release_cnt >= 16)) {
+
+               if (qdev->lrg_buf_skb_check)
+                       if (!ql_populate_free_queue(qdev))
+                               return;
+
+               lrg_buf_q_ele = qdev->lrg_buf_next_free;
+
+               while ((qdev->lrg_buf_release_cnt >= 16)
+                      && (qdev->lrg_buf_free_count >= 8)) {
+
+                       for (i = 0; i < 8; i++) {
+                               lrg_buf_cb =
+                                   ql_get_from_lrg_buf_free_list(qdev);
+                               lrg_buf_q_ele->addr_high =
+                                   lrg_buf_cb->buf_phy_addr_high;
+                               lrg_buf_q_ele->addr_low =
+                                   lrg_buf_cb->buf_phy_addr_low;
+                               lrg_buf_q_ele++;
+
+                               qdev->lrg_buf_release_cnt--;
+                       }
+
+                       qdev->lrg_buf_q_producer_index++;
+
+                       if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
+                               qdev->lrg_buf_q_producer_index = 0;
+
+                       if (qdev->lrg_buf_q_producer_index ==
+                           (NUM_LBUFQ_ENTRIES - 1)) {
+                               lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
+                       }
+               }
+
+               qdev->lrg_buf_next_free = lrg_buf_q_ele;
+
+               ql_write_common_reg(qdev,
+                                   (u32 *) & port_regs->CommonRegs.
+                                   rxLargeQProducerIndex,
+                                   qdev->lrg_buf_q_producer_index);
+       }
+}
+
+static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
+                                  struct ob_mac_iocb_rsp *mac_rsp)
+{
+       struct ql_tx_buf_cb *tx_cb;
+
+       tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
+       pci_unmap_single(qdev->pdev,
+                        pci_unmap_addr(tx_cb, mapaddr),
+                        pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
+       dev_kfree_skb_irq(tx_cb->skb);
+       qdev->stats.tx_packets++;
+       qdev->stats.tx_bytes += tx_cb->skb->len;
+       tx_cb->skb = NULL;
+       atomic_inc(&qdev->tx_count);
+}
+
+static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
+                                  struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
+{
+       long int offset;
+       u32 lrg_buf_phy_addr_low = 0;
+       struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+       struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+       u32 *curr_ial_ptr;
+       struct sk_buff *skb;
+       u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
+
+       /*
+        * Get the inbound address list (small buffer).
+        */
+       offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
+       if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+               qdev->small_buf_index = 0;
+
+       curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
+       qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
+       qdev->small_buf_release_cnt++;
+
+       /* start of first buffer */
+       lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+       lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+       qdev->lrg_buf_release_cnt++;
+       if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+               qdev->lrg_buf_index = 0;
+       curr_ial_ptr++;         /* 64-bit pointers require two incs. */
+       curr_ial_ptr++;
+
+       /* start of second buffer */
+       lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+       lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
+
+       /*
+        * Second buffer gets sent up the stack.
+        */
+       qdev->lrg_buf_release_cnt++;
+       if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+               qdev->lrg_buf_index = 0;
+       skb = lrg_buf_cb2->skb;
+
+       qdev->stats.rx_packets++;
+       qdev->stats.rx_bytes += length;
+
+       skb_put(skb, length);
+       pci_unmap_single(qdev->pdev,
+                        pci_unmap_addr(lrg_buf_cb2, mapaddr),
+                        pci_unmap_len(lrg_buf_cb2, maplen),
+                        PCI_DMA_FROMDEVICE);
+       prefetch(skb->data);
+       skb->dev = qdev->ndev;
+       skb->ip_summed = CHECKSUM_NONE;
+       skb->protocol = eth_type_trans(skb, qdev->ndev);
+
+       netif_receive_skb(skb);
+       qdev->ndev->last_rx = jiffies;
+       lrg_buf_cb2->skb = NULL;
+
+       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
+                                    struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
+{
+       long int offset;
+       u32 lrg_buf_phy_addr_low = 0;
+       struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+       struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+       u32 *curr_ial_ptr;
+       struct sk_buff *skb1, *skb2;
+       struct net_device *ndev = qdev->ndev;
+       u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
+       u16 size = 0;
+
+       /*
+        * Get the inbound address list (small buffer).
+        */
+
+       offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
+       if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+               qdev->small_buf_index = 0;
+       curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
+       qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
+       qdev->small_buf_release_cnt++;
+
+       /* start of first buffer */
+       lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+       lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
+
+       qdev->lrg_buf_release_cnt++;
+       if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+               qdev->lrg_buf_index = 0;
+       skb1 = lrg_buf_cb1->skb;
+       curr_ial_ptr++;         /* 64-bit pointers require two incs. */
+       curr_ial_ptr++;
+
+       /* start of second buffer */
+       lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
+       lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
+       skb2 = lrg_buf_cb2->skb;
+       qdev->lrg_buf_release_cnt++;
+       if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
+               qdev->lrg_buf_index = 0;
+
+       qdev->stats.rx_packets++;
+       qdev->stats.rx_bytes += length;
+
+       /*
+        * Copy the ethhdr from first buffer to second. This
+        * is necessary for IP completions.
+        */
+       if (*((u16 *) skb1->data) != 0xFFFF)
+               size = VLAN_ETH_HLEN;
+       else
+               size = ETH_HLEN;
+
+       skb_put(skb2, length);  /* Just the second buffer length here. */
+       pci_unmap_single(qdev->pdev,
+                        pci_unmap_addr(lrg_buf_cb2, mapaddr),
+                        pci_unmap_len(lrg_buf_cb2, maplen),
+                        PCI_DMA_FROMDEVICE);
+       prefetch(skb2->data);
+
+       memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
+       skb2->dev = qdev->ndev;
+       skb2->ip_summed = CHECKSUM_NONE;
+       skb2->protocol = eth_type_trans(skb2, qdev->ndev);
+
+       netif_receive_skb(skb2);
+       ndev->last_rx = jiffies;
+       lrg_buf_cb2->skb = NULL;
+
+       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+       ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static int ql_tx_rx_clean(struct ql3_adapter *qdev,
+                         int *tx_cleaned, int *rx_cleaned, int work_to_do)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct net_rsp_iocb *net_rsp;
+       struct net_device *ndev = qdev->ndev;
+       unsigned long hw_flags;
+
+       /* While there are entries in the completion queue. */
+       while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
+               qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
+
+               net_rsp = qdev->rsp_current;
+               switch (net_rsp->opcode) {
+
+               case OPCODE_OB_MAC_IOCB_FN0:
+               case OPCODE_OB_MAC_IOCB_FN2:
+                       ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
+                                              net_rsp);
+                       (*tx_cleaned)++;
+                       break;
+
+               case OPCODE_IB_MAC_IOCB:
+                       ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
+                                              net_rsp);
+                       (*rx_cleaned)++;
+                       break;
+
+               case OPCODE_IB_IP_IOCB:
+                       ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
+                                                net_rsp);
+                       (*rx_cleaned)++;
+                       break;
+               default:
+                       {
+                               u32 *tmp = (u32 *) net_rsp;
+                               printk(KERN_ERR PFX
+                                      "%s: Hit default case, not "
+                                      "handled!\n"
+                                      "        dropping the packet, opcode = "
+                                      "%x.\n",
+                                      ndev->name, net_rsp->opcode);
+                               printk(KERN_ERR PFX
+                                      "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
+                                      (unsigned long int)tmp[0],
+                                      (unsigned long int)tmp[1],
+                                      (unsigned long int)tmp[2],
+                                      (unsigned long int)tmp[3]);
+                       }
+               }
+
+               qdev->rsp_consumer_index++;
+
+               if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
+                       qdev->rsp_consumer_index = 0;
+                       qdev->rsp_current = qdev->rsp_q_virt_addr;
+               } else {
+                       qdev->rsp_current++;
+               }
+       }
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       ql_update_lrg_bufq_prod_index(qdev);
+
+       if (qdev->small_buf_release_cnt >= 16) {
+               while (qdev->small_buf_release_cnt >= 16) {
+                       qdev->small_buf_q_producer_index++;
+
+                       if (qdev->small_buf_q_producer_index ==
+                           NUM_SBUFQ_ENTRIES)
+                               qdev->small_buf_q_producer_index = 0;
+                       qdev->small_buf_release_cnt -= 8;
+               }
+
+               ql_write_common_reg(qdev,
+                                   (u32 *) & port_regs->CommonRegs.
+                                   rxSmallQProducerIndex,
+                                   qdev->small_buf_q_producer_index);
+       }
+
+       ql_write_common_reg(qdev,
+                           (u32 *) & port_regs->CommonRegs.rspQConsumerIndex,
+                           qdev->rsp_consumer_index);
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       if (unlikely(netif_queue_stopped(qdev->ndev))) {
+               if (netif_queue_stopped(qdev->ndev) &&
+                   (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
+                       netif_wake_queue(qdev->ndev);
+       }
+
+       return *tx_cleaned + *rx_cleaned;
+}
+
+static int ql_poll(struct net_device *ndev, int *budget)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       int work_to_do = min(*budget, ndev->quota);
+       int rx_cleaned = 0, tx_cleaned = 0;
+
+       if (!netif_carrier_ok(ndev))
+               goto quit_polling;
+
+       ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
+       *budget -= rx_cleaned;
+       ndev->quota -= rx_cleaned;
+
+       if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
+quit_polling:
+               netif_rx_complete(ndev);
+               ql_enable_interrupts(qdev);
+               return 0;
+       }
+       return 1;
+}
+
+static irqreturn_t ql3xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+       struct net_device *ndev = dev_id;
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       u32 value;
+       int handled = 1;
+       u32 var;
+
+       port_regs = qdev->mem_map_registers;
+
+       value =
+           ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+
+       if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
+               spin_lock(&qdev->adapter_lock);
+               netif_stop_queue(qdev->ndev);
+               netif_carrier_off(qdev->ndev);
+               ql_disable_interrupts(qdev);
+               qdev->port_link_state = LS_DOWN;
+               set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
+
+               if (value & ISP_CONTROL_FE) {
+                       /*
+                        * Chip Fatal Error.
+                        */
+                       var =
+                           ql_read_page0_reg_l(qdev,
+                                             &port_regs->PortFatalErrStatus);
+                       printk(KERN_WARNING PFX
+                              "%s: Resetting chip. PortFatalErrStatus "
+                              "register = 0x%x\n", ndev->name, var);
+                       set_bit(QL_RESET_START,&qdev->flags) ;
+               } else {
+                       /*
+                        * Soft Reset Requested.
+                        */
+                       set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
+                       printk(KERN_ERR PFX
+                              "%s: Another function issued a reset to the "
+                              "chip. ISR value = %x.\n", ndev->name, value);
+               }
+               queue_work(qdev->workqueue, &qdev->reset_work);
+               spin_unlock(&qdev->adapter_lock);
+       } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
+               ql_disable_interrupts(qdev);
+               if (likely(netif_rx_schedule_prep(ndev)))
+                       __netif_rx_schedule(ndev);
+               else
+                       ql_enable_interrupts(qdev);
+       } else {
+               return IRQ_NONE;
+       }
+
+       return IRQ_RETVAL(handled);
+}
+
+static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql_tx_buf_cb *tx_cb;
+       struct ob_mac_iocb_req *mac_iocb_ptr;
+       u64 map;
+
+       if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
+               if (!netif_queue_stopped(ndev))
+                       netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+       tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
+       mac_iocb_ptr = tx_cb->queue_entry;
+       memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
+       mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
+       mac_iocb_ptr->flags |= qdev->mb_bit_mask;
+       mac_iocb_ptr->transaction_id = qdev->req_producer_index;
+       mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
+       tx_cb->skb = skb;
+       map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+       mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
+       mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
+       mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
+       pci_unmap_addr_set(tx_cb, mapaddr, map);
+       pci_unmap_len_set(tx_cb, maplen, skb->len);
+       atomic_dec(&qdev->tx_count);
+
+       qdev->req_producer_index++;
+       if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
+               qdev->req_producer_index = 0;
+       wmb();
+       ql_write_common_reg_l(qdev,
+                           (u32 *) & port_regs->CommonRegs.reqQProducerIndex,
+                           qdev->req_producer_index);
+
+       ndev->trans_start = jiffies;
+       if (netif_msg_tx_queued(qdev))
+               printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
+                      ndev->name, qdev->req_producer_index, skb->len);
+
+       return NETDEV_TX_OK;
+}
+static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+       qdev->req_q_size =
+           (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
+
+       qdev->req_q_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                (size_t) qdev->req_q_size,
+                                &qdev->req_q_phy_addr);
+
+       if ((qdev->req_q_virt_addr == NULL) ||
+           LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
+               printk(KERN_ERR PFX "%s: reqQ failed.\n",
+                      qdev->ndev->name);
+               return -ENOMEM;
+       }
+
+       qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
+
+       qdev->rsp_q_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                (size_t) qdev->rsp_q_size,
+                                &qdev->rsp_q_phy_addr);
+
+       if ((qdev->rsp_q_virt_addr == NULL) ||
+           LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
+               printk(KERN_ERR PFX
+                      "%s: rspQ allocation failed\n",
+                      qdev->ndev->name);
+               pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
+                                   qdev->req_q_virt_addr,
+                                   qdev->req_q_phy_addr);
+               return -ENOMEM;
+       }
+
+       set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+
+       return 0;
+}
+
+static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+       if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
+               printk(KERN_INFO PFX
+                      "%s: Already done.\n", qdev->ndev->name);
+               return;
+       }
+
+       pci_free_consistent(qdev->pdev,
+                           qdev->req_q_size,
+                           qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+
+       qdev->req_q_virt_addr = NULL;
+
+       pci_free_consistent(qdev->pdev,
+                           qdev->rsp_q_size,
+                           qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+
+       qdev->rsp_q_virt_addr = NULL;
+
+       clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+}
+
+static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+{
+       /* Create Large Buffer Queue */
+       qdev->lrg_buf_q_size =
+           NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+       if (qdev->lrg_buf_q_size < PAGE_SIZE)
+               qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
+       else
+               qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
+
+       qdev->lrg_buf_q_alloc_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                qdev->lrg_buf_q_alloc_size,
+                                &qdev->lrg_buf_q_alloc_phy_addr);
+
+       if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+               printk(KERN_ERR PFX
+                      "%s: lBufQ failed\n", qdev->ndev->name);
+               return -ENOMEM;
+       }
+       qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+       qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
+
+       /* Create Small Buffer Queue */
+       qdev->small_buf_q_size =
+           NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+       if (qdev->small_buf_q_size < PAGE_SIZE)
+               qdev->small_buf_q_alloc_size = PAGE_SIZE;
+       else
+               qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
+
+       qdev->small_buf_q_alloc_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                qdev->small_buf_q_alloc_size,
+                                &qdev->small_buf_q_alloc_phy_addr);
+
+       if (qdev->small_buf_q_alloc_virt_addr == NULL) {
+               printk(KERN_ERR PFX
+                      "%s: Small Buffer Queue allocation failed.\n",
+                      qdev->ndev->name);
+               pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
+                                   qdev->lrg_buf_q_alloc_virt_addr,
+                                   qdev->lrg_buf_q_alloc_phy_addr);
+               return -ENOMEM;
+       }
+
+       qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
+       qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
+       set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+       return 0;
+}
+
+static void ql_free_buffer_queues(struct ql3_adapter *qdev)
+{
+       if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
+               printk(KERN_INFO PFX
+                      "%s: Already done.\n", qdev->ndev->name);
+               return;
+       }
+
+       pci_free_consistent(qdev->pdev,
+                           qdev->lrg_buf_q_alloc_size,
+                           qdev->lrg_buf_q_alloc_virt_addr,
+                           qdev->lrg_buf_q_alloc_phy_addr);
+
+       qdev->lrg_buf_q_virt_addr = NULL;
+
+       pci_free_consistent(qdev->pdev,
+                           qdev->small_buf_q_alloc_size,
+                           qdev->small_buf_q_alloc_virt_addr,
+                           qdev->small_buf_q_alloc_phy_addr);
+
+       qdev->small_buf_q_virt_addr = NULL;
+
+       clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+}
+
+static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
+{
+       int i;
+       struct bufq_addr_element *small_buf_q_entry;
+
+       /* Currently we allocate on one of memory and use it for smallbuffers */
+       qdev->small_buf_total_size =
+           (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+            QL_SMALL_BUFFER_SIZE);
+
+       qdev->small_buf_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                qdev->small_buf_total_size,
+                                &qdev->small_buf_phy_addr);
+
+       if (qdev->small_buf_virt_addr == NULL) {
+               printk(KERN_ERR PFX
+                      "%s: Failed to get small buffer memory.\n",
+                      qdev->ndev->name);
+               return -ENOMEM;
+       }
+
+       qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
+       qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
+
+       small_buf_q_entry = qdev->small_buf_q_virt_addr;
+
+       qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
+
+       /* Initialize the small buffer queue. */
+       for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
+               small_buf_q_entry->addr_high =
+                   cpu_to_le32(qdev->small_buf_phy_addr_high);
+               small_buf_q_entry->addr_low =
+                   cpu_to_le32(qdev->small_buf_phy_addr_low +
+                               (i * QL_SMALL_BUFFER_SIZE));
+               small_buf_q_entry++;
+       }
+       qdev->small_buf_index = 0;
+       set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
+       return 0;
+}
+
+static void ql_free_small_buffers(struct ql3_adapter *qdev)
+{
+       if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
+               printk(KERN_INFO PFX
+                      "%s: Already done.\n", qdev->ndev->name);
+               return;
+       }
+       if (qdev->small_buf_virt_addr != NULL) {
+               pci_free_consistent(qdev->pdev,
+                                   qdev->small_buf_total_size,
+                                   qdev->small_buf_virt_addr,
+                                   qdev->small_buf_phy_addr);
+
+               qdev->small_buf_virt_addr = NULL;
+       }
+}
+
+static void ql_free_large_buffers(struct ql3_adapter *qdev)
+{
+       int i = 0;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+
+       for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+               lrg_buf_cb = &qdev->lrg_buf[i];
+               if (lrg_buf_cb->skb) {
+                       dev_kfree_skb(lrg_buf_cb->skb);
+                       pci_unmap_single(qdev->pdev,
+                                        pci_unmap_addr(lrg_buf_cb, mapaddr),
+                                        pci_unmap_len(lrg_buf_cb, maplen),
+                                        PCI_DMA_FROMDEVICE);
+                       memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+               } else {
+                       break;
+               }
+       }
+}
+
+static void ql_init_large_buffers(struct ql3_adapter *qdev)
+{
+       int i;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+       struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
+
+       for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+               lrg_buf_cb = &qdev->lrg_buf[i];
+               buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
+               buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
+               buf_addr_ele++;
+       }
+       qdev->lrg_buf_index = 0;
+       qdev->lrg_buf_skb_check = 0;
+}
+
+static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+{
+       int i;
+       struct ql_rcv_buf_cb *lrg_buf_cb;
+       struct sk_buff *skb;
+       u64 map;
+
+       for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
+               skb = dev_alloc_skb(qdev->lrg_buffer_len);
+               if (unlikely(!skb)) {
+                       /* Better luck next round */
+                       printk(KERN_ERR PFX
+                              "%s: large buff alloc failed, "
+                              "for %d bytes at index %d.\n",
+                              qdev->ndev->name,
+                              qdev->lrg_buffer_len * 2, i);
+                       ql_free_large_buffers(qdev);
+                       return -ENOMEM;
+               } else {
+
+                       lrg_buf_cb = &qdev->lrg_buf[i];
+                       memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+                       lrg_buf_cb->index = i;
+                       lrg_buf_cb->skb = skb;
+                       /*
+                        * We save some space to copy the ethhdr from first
+                        * buffer
+                        */
+                       skb_reserve(skb, QL_HEADER_SPACE);
+                       map = pci_map_single(qdev->pdev,
+                                            skb->data,
+                                            qdev->lrg_buffer_len -
+                                            QL_HEADER_SPACE,
+                                            PCI_DMA_FROMDEVICE);
+                       pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+                       pci_unmap_len_set(lrg_buf_cb, maplen,
+                                         qdev->lrg_buffer_len -
+                                         QL_HEADER_SPACE);
+                       lrg_buf_cb->buf_phy_addr_low =
+                           cpu_to_le32(LS_64BITS(map));
+                       lrg_buf_cb->buf_phy_addr_high =
+                           cpu_to_le32(MS_64BITS(map));
+               }
+       }
+       return 0;
+}
+
+static void ql_create_send_free_list(struct ql3_adapter *qdev)
+{
+       struct ql_tx_buf_cb *tx_cb;
+       int i;
+       struct ob_mac_iocb_req *req_q_curr =
+                                       qdev->req_q_virt_addr;
+
+       /* Create free list of transmit buffers */
+       for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+               tx_cb = &qdev->tx_buf[i];
+               tx_cb->skb = NULL;
+               tx_cb->queue_entry = req_q_curr;
+               req_q_curr++;
+       }
+}
+
+static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
+{
+       if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
+               qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
+       else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+               qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
+       } else {
+               printk(KERN_ERR PFX
+                      "%s: Invalid mtu size.  Only 1500 and 9000 are accepted.\n",
+                      qdev->ndev->name);
+               return -ENOMEM;
+       }
+       qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
+       qdev->max_frame_size =
+           (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+
+       /*
+        * First allocate a page of shared memory and use it for shadow
+        * locations of Network Request Queue Consumer Address Register and
+        * Network Completion Queue Producer Index Register
+        */
+       qdev->shadow_reg_virt_addr =
+           pci_alloc_consistent(qdev->pdev,
+                                PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+
+       if (qdev->shadow_reg_virt_addr != NULL) {
+               qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
+               qdev->req_consumer_index_phy_addr_high =
+                   MS_64BITS(qdev->shadow_reg_phy_addr);
+               qdev->req_consumer_index_phy_addr_low =
+                   LS_64BITS(qdev->shadow_reg_phy_addr);
+
+               qdev->prsp_producer_index =
+                   (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+               qdev->rsp_producer_index_phy_addr_high =
+                   qdev->req_consumer_index_phy_addr_high;
+               qdev->rsp_producer_index_phy_addr_low =
+                   qdev->req_consumer_index_phy_addr_low + 8;
+       } else {
+               printk(KERN_ERR PFX
+                      "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
+               return -ENOMEM;
+       }
+
+       if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
+               printk(KERN_ERR PFX
+                      "%s: ql_alloc_net_req_rsp_queues failed.\n",
+                      qdev->ndev->name);
+               goto err_req_rsp;
+       }
+
+       if (ql_alloc_buffer_queues(qdev) != 0) {
+               printk(KERN_ERR PFX
+                      "%s: ql_alloc_buffer_queues failed.\n",
+                      qdev->ndev->name);
+               goto err_buffer_queues;
+       }
+
+       if (ql_alloc_small_buffers(qdev) != 0) {
+               printk(KERN_ERR PFX
+                      "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
+               goto err_small_buffers;
+       }
+
+       if (ql_alloc_large_buffers(qdev) != 0) {
+               printk(KERN_ERR PFX
+                      "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
+               goto err_small_buffers;
+       }
+
+       /* Initialize the large buffer queue. */
+       ql_init_large_buffers(qdev);
+       ql_create_send_free_list(qdev);
+
+       qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+       return 0;
+
+err_small_buffers:
+       ql_free_buffer_queues(qdev);
+err_buffer_queues:
+       ql_free_net_req_rsp_queues(qdev);
+err_req_rsp:
+       pci_free_consistent(qdev->pdev,
+                           PAGE_SIZE,
+                           qdev->shadow_reg_virt_addr,
+                           qdev->shadow_reg_phy_addr);
+
+       return -ENOMEM;
+}
+
+static void ql_free_mem_resources(struct ql3_adapter *qdev)
+{
+       ql_free_large_buffers(qdev);
+       ql_free_small_buffers(qdev);
+       ql_free_buffer_queues(qdev);
+       ql_free_net_req_rsp_queues(qdev);
+       if (qdev->shadow_reg_virt_addr != NULL) {
+               pci_free_consistent(qdev->pdev,
+                                   PAGE_SIZE,
+                                   qdev->shadow_reg_virt_addr,
+                                   qdev->shadow_reg_phy_addr);
+               qdev->shadow_reg_virt_addr = NULL;
+       }
+}
+
+static int ql_init_misc_registers(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_local_ram_registers *local_ram =
+           (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers;
+
+       if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 4))
+               return -1;
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->bufletSize, qdev->nvram_data.bufletSize);
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->maxBufletCount,
+                          qdev->nvram_data.bufletCount);
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->freeBufletThresholdLow,
+                          (qdev->nvram_data.tcpWindowThreshold25 << 16) |
+                          (qdev->nvram_data.tcpWindowThreshold0));
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->freeBufletThresholdHigh,
+                          qdev->nvram_data.tcpWindowThreshold50);
+
+       ql_write_page2_reg(qdev,
+                          &local_ram->ipHashTableBase,
+                          (qdev->nvram_data.ipHashTableBaseHi << 16) |
+                          qdev->nvram_data.ipHashTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->ipHashTableCount,
+                          qdev->nvram_data.ipHashTableSize);
+       ql_write_page2_reg(qdev,
+                          &local_ram->tcpHashTableBase,
+                          (qdev->nvram_data.tcpHashTableBaseHi << 16) |
+                          qdev->nvram_data.tcpHashTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->tcpHashTableCount,
+                          qdev->nvram_data.tcpHashTableSize);
+       ql_write_page2_reg(qdev,
+                          &local_ram->ncbBase,
+                          (qdev->nvram_data.ncbTableBaseHi << 16) |
+                          qdev->nvram_data.ncbTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->maxNcbCount,
+                          qdev->nvram_data.ncbTableSize);
+       ql_write_page2_reg(qdev,
+                          &local_ram->drbBase,
+                          (qdev->nvram_data.drbTableBaseHi << 16) |
+                          qdev->nvram_data.drbTableBaseLo);
+       ql_write_page2_reg(qdev,
+                          &local_ram->maxDrbCount,
+                          qdev->nvram_data.drbTableSize);
+       ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
+       return 0;
+}
+
+static int ql_adapter_initialize(struct ql3_adapter *qdev)
+{
+       u32 value;
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_host_memory_registers __iomem *hmem_regs =
+           (struct ql3xxx_host_memory_registers *)port_regs;
+       u32 delay = 10;
+       int status = 0;
+
+       if(ql_mii_setup(qdev))
+               return -1;
+
+       /* Bring out PHY out of reset */
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           (ISP_SERIAL_PORT_IF_WE |
+                            (ISP_SERIAL_PORT_IF_WE << 16)));
+
+       qdev->port_link_state = LS_DOWN;
+       netif_carrier_off(qdev->ndev);
+
+       /* V2 chip fix for ARS-39168. */
+       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+                           (ISP_SERIAL_PORT_IF_SDE |
+                            (ISP_SERIAL_PORT_IF_SDE << 16)));
+
+       /* Request Queue Registers */
+       *((u32 *) (qdev->preq_consumer_index)) = 0;
+       atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
+       qdev->req_producer_index = 0;
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqConsumerIndexAddrHigh,
+                          qdev->req_consumer_index_phy_addr_high);
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqConsumerIndexAddrLow,
+                          qdev->req_consumer_index_phy_addr_low);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqBaseAddrHigh,
+                          MS_64BITS(qdev->req_q_phy_addr));
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->reqBaseAddrLow,
+                          LS_64BITS(qdev->req_q_phy_addr));
+       ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
+
+       /* Response Queue Registers */
+       *((u16 *) (qdev->prsp_producer_index)) = 0;
+       qdev->rsp_consumer_index = 0;
+       qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspProducerIndexAddrHigh,
+                          qdev->rsp_producer_index_phy_addr_high);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspProducerIndexAddrLow,
+                          qdev->rsp_producer_index_phy_addr_low);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspBaseAddrHigh,
+                          MS_64BITS(qdev->rsp_q_phy_addr));
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rspBaseAddrLow,
+                          LS_64BITS(qdev->rsp_q_phy_addr));
+
+       ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
+
+       /* Large Buffer Queue */
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeQBaseAddrHigh,
+                          MS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeQBaseAddrLow,
+                          LS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeBufferLength,
+                          qdev->lrg_buffer_len);
+
+       /* Small Buffer Queue */
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxSmallQBaseAddrHigh,
+                          MS_64BITS(qdev->small_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxSmallQBaseAddrLow,
+                          LS_64BITS(qdev->small_buf_q_phy_addr));
+
+       ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxSmallBufferLength,
+                          QL_SMALL_BUFFER_SIZE);
+
+       qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
+       qdev->small_buf_release_cnt = 8;
+       qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
+       qdev->lrg_buf_release_cnt = 8;
+       qdev->lrg_buf_next_free =
+           (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
+       qdev->small_buf_index = 0;
+       qdev->lrg_buf_index = 0;
+       qdev->lrg_buf_free_count = 0;
+       qdev->lrg_buf_free_head = NULL;
+       qdev->lrg_buf_free_tail = NULL;
+
+       ql_write_common_reg(qdev,
+                           (u32 *) & port_regs->CommonRegs.
+                           rxSmallQProducerIndex,
+                           qdev->small_buf_q_producer_index);
+       ql_write_common_reg(qdev,
+                           (u32 *) & port_regs->CommonRegs.
+                           rxLargeQProducerIndex,
+                           qdev->lrg_buf_q_producer_index);
+
+       /*
+        * Find out if the chip has already been initialized.  If it has, then
+        * we skip some of the initialization.
+        */
+       clear_bit(QL_LINK_MASTER, &qdev->flags);
+       value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       if ((value & PORT_STATUS_IC) == 0) {
+
+               /* Chip has not been configured yet, so let it rip. */
+               if(ql_init_misc_registers(qdev)) {
+                       status = -1;
+                       goto out;
+               }
+
+               if (qdev->mac_index)
+                       ql_write_page0_reg(qdev,
+                                          &port_regs->mac1MaxFrameLengthReg,
+                                          qdev->max_frame_size);
+               else
+                       ql_write_page0_reg(qdev,
+                                          &port_regs->mac0MaxFrameLengthReg,
+                                          qdev->max_frame_size);
+
+               value = qdev->nvram_data.tcpMaxWindowSize;
+               ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
+
+               value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
+
+               if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+                               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+                                * 2) << 13)) {
+                       status = -1;
+                       goto out;
+               }
+               ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
+               ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
+                                  (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
+                                    16) | (INTERNAL_CHIP_SD |
+                                           INTERNAL_CHIP_WE)));
+               ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
+       }
+
+
+       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                       (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+                        2) << 7)) {
+               status = -1;
+               goto out;
+       }
+
+       ql_init_scan_mode(qdev);
+       ql_get_phy_owner(qdev);
+
+       /* Load the MAC Configuration */
+
+       /* Program lower 32 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((qdev->ndev->dev_addr[2] << 24)
+                           | (qdev->ndev->dev_addr[3] << 16)
+                           | (qdev->ndev->dev_addr[4] << 8)
+                           | qdev->ndev->dev_addr[5]));
+
+       /* Program top 16 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((qdev->ndev->dev_addr[0] << 8)
+                           | qdev->ndev->dev_addr[1]));
+
+       /* Enable Primary MAC */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
+                           MAC_ADDR_INDIRECT_PTR_REG_PE));
+
+       /* Clear Primary and Secondary IP addresses */
+       ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+                          ((IP_ADDR_INDEX_REG_MASK << 16) |
+                           (qdev->mac_index << 2)));
+       ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+       ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+                          ((IP_ADDR_INDEX_REG_MASK << 16) |
+                           ((qdev->mac_index << 2) + 1)));
+       ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+       ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+
+       /* Indicate Configuration Complete */
+       ql_write_page0_reg(qdev,
+                          &port_regs->portControl,
+                          ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
+
+       do {
+               value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+               if (value & PORT_STATUS_IC)
+                       break;
+               msleep(500);
+       } while (--delay);
+
+       if (delay == 0) {
+               printk(KERN_ERR PFX
+                      "%s: Hw Initialization timeout.\n", qdev->ndev->name);
+               status = -1;
+               goto out;
+       }
+
+       /* Enable Ethernet Function */
+       value =
+           (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+            PORT_CONTROL_HH);
+       ql_write_page0_reg(qdev, &port_regs->portControl,
+                          ((value << 16) | value));
+
+out:
+       return status;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_adapter_reset(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       int status = 0;
+       u16 value;
+       int max_wait_time;
+
+       set_bit(QL_RESET_ACTIVE, &qdev->flags);
+       clear_bit(QL_RESET_DONE, &qdev->flags);
+
+       /*
+        * Issue soft reset to chip.
+        */
+       printk(KERN_DEBUG PFX
+              "%s: Issue soft reset to chip.\n",
+              qdev->ndev->name);
+       ql_write_common_reg(qdev,
+                           (u32 *) & port_regs->CommonRegs.ispControlStatus,
+                           ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
+
+       /* Wait 3 seconds for reset to complete. */
+       printk(KERN_DEBUG PFX
+              "%s: Wait 10 milliseconds for reset to complete.\n",
+              qdev->ndev->name);
+
+       /* Wait until the firmware tells us the Soft Reset is done */
+       max_wait_time = 5;
+       do {
+               value =
+                   ql_read_common_reg(qdev,
+                                      &port_regs->CommonRegs.ispControlStatus);
+               if ((value & ISP_CONTROL_SR) == 0)
+                       break;
+
+               ssleep(1);
+       } while ((--max_wait_time));
+
+       /*
+        * Also, make sure that the Network Reset Interrupt bit has been
+        * cleared after the soft reset has taken place.
+        */
+       value =
+           ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+       if (value & ISP_CONTROL_RI) {
+               printk(KERN_DEBUG PFX
+                      "ql_adapter_reset: clearing RI after reset.\n");
+               ql_write_common_reg(qdev,
+                                   (u32 *) & port_regs->CommonRegs.
+                                   ispControlStatus,
+                                   ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+       }
+
+       if (max_wait_time == 0) {
+               /* Issue Force Soft Reset */
+               ql_write_common_reg(qdev,
+                                   (u32 *) & port_regs->CommonRegs.
+                                   ispControlStatus,
+                                   ((ISP_CONTROL_FSR << 16) |
+                                    ISP_CONTROL_FSR));
+               /*
+                * Wait until the firmware tells us the Force Soft Reset is
+                * done
+                */
+               max_wait_time = 5;
+               do {
+                       value =
+                           ql_read_common_reg(qdev,
+                                              &port_regs->CommonRegs.
+                                              ispControlStatus);
+                       if ((value & ISP_CONTROL_FSR) == 0) {
+                               break;
+                       }
+                       ssleep(1);
+               } while ((--max_wait_time));
+       }
+       if (max_wait_time == 0)
+               status = 1;
+
+       clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+       set_bit(QL_RESET_DONE, &qdev->flags);
+       return status;
+}
+
+static void ql_set_mac_info(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       u32 value, port_status;
+       u8 func_number;
+
+       /* Get the function number */
+       value =
+           ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+       func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
+       port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
+       switch (value & ISP_CONTROL_FN_MASK) {
+       case ISP_CONTROL_FN0_NET:
+               qdev->mac_index = 0;
+               qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+               qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
+               qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
+               qdev->mb_bit_mask = FN0_MA_BITS_MASK;
+               qdev->PHYAddr = PORT0_PHY_ADDRESS;
+               if (port_status & PORT_STATUS_SM0)
+                       set_bit(QL_LINK_OPTICAL,&qdev->flags);
+               else
+                       clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+               break;
+
+       case ISP_CONTROL_FN1_NET:
+               qdev->mac_index = 1;
+               qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+               qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
+               qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
+               qdev->mb_bit_mask = FN1_MA_BITS_MASK;
+               qdev->PHYAddr = PORT1_PHY_ADDRESS;
+               if (port_status & PORT_STATUS_SM1)
+                       set_bit(QL_LINK_OPTICAL,&qdev->flags);
+               else
+                       clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+               break;
+
+       case ISP_CONTROL_FN0_SCSI:
+       case ISP_CONTROL_FN1_SCSI:
+       default:
+               printk(KERN_DEBUG PFX
+                      "%s: Invalid function number, ispControlStatus = 0x%x\n",
+                      qdev->ndev->name,value);
+               break;
+       }
+       qdev->numPorts = qdev->nvram_data.numPorts;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+       struct pci_dev *pdev = qdev->pdev;
+
+       printk(KERN_INFO PFX
+              "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
+              DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
+       printk(KERN_INFO PFX
+              "%s Interface.\n",
+              test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
+
+       /*
+        * Print PCI bus width/type.
+        */
+       printk(KERN_INFO PFX
+              "Bus interface is %s %s.\n",
+              ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+              ((qdev->pci_x) ? "PCI-X" : "PCI"));
+
+       printk(KERN_INFO PFX
+              "mem  IO base address adjusted = 0x%p\n",
+              qdev->mem_map_registers);
+       printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
+
+       if (netif_msg_probe(qdev))
+               printk(KERN_INFO PFX
+                      "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
+                      ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
+                      ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
+                      ndev->dev_addr[5]);
+}
+
+static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
+{
+       struct net_device *ndev = qdev->ndev;
+       int retval = 0;
+
+       netif_stop_queue(ndev);
+       netif_carrier_off(ndev);
+
+       clear_bit(QL_ADAPTER_UP,&qdev->flags);
+       clear_bit(QL_LINK_MASTER,&qdev->flags);
+
+       ql_disable_interrupts(qdev);
+
+       free_irq(qdev->pdev->irq, ndev);
+
+       if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
+               printk(KERN_INFO PFX
+                      "%s: calling pci_disable_msi().\n", qdev->ndev->name);
+               clear_bit(QL_MSI_ENABLED,&qdev->flags);
+               pci_disable_msi(qdev->pdev);
+       }
+
+       del_timer_sync(&qdev->adapter_timer);
+
+       netif_poll_disable(ndev);
+
+       if (do_reset) {
+               int soft_reset;
+               unsigned long hw_flags;
+
+               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               if (ql_wait_for_drvr_lock(qdev)) {
+                       if ((soft_reset = ql_adapter_reset(qdev))) {
+                               printk(KERN_ERR PFX
+                                      "%s: ql_adapter_reset(%d) FAILED!\n",
+                                      ndev->name, qdev->index);
+                       }
+                       printk(KERN_ERR PFX
+                               "%s: Releaseing driver lock via chip reset.\n",ndev->name);
+               } else {
+                       printk(KERN_ERR PFX
+                              "%s: Could not acquire driver lock to do "
+                              "reset!\n", ndev->name);
+                       retval = -1;
+               }
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+       }
+       ql_free_mem_resources(qdev);
+       return retval;
+}
+
+static int ql_adapter_up(struct ql3_adapter *qdev)
+{
+       struct net_device *ndev = qdev->ndev;
+       int err;
+       unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
+       unsigned long hw_flags;
+
+       if (ql_alloc_mem_resources(qdev)) {
+               printk(KERN_ERR PFX
+                      "%s Unable to  allocate buffers.\n", ndev->name);
+               return -ENOMEM;
+       }
+
+       if (qdev->msi) {
+               if (pci_enable_msi(qdev->pdev)) {
+                       printk(KERN_ERR PFX
+                              "%s: User requested MSI, but MSI failed to "
+                              "initialize.  Continuing without MSI.\n",
+                              qdev->ndev->name);
+                       qdev->msi = 0;
+               } else {
+                       printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
+                       set_bit(QL_MSI_ENABLED,&qdev->flags);
+                       irq_flags &= ~SA_SHIRQ;
+               }
+       }
+
+       if ((err = request_irq(qdev->pdev->irq,
+                              ql3xxx_isr,
+                              irq_flags, ndev->name, ndev))) {
+               printk(KERN_ERR PFX
+                      "%s: Failed to reserve interrupt %d already in use.\n",
+                      ndev->name, qdev->pdev->irq);
+               goto err_irq;
+       }
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+       if ((err = ql_wait_for_drvr_lock(qdev))) {
+               if ((err = ql_adapter_initialize(qdev))) {
+                       printk(KERN_ERR PFX
+                              "%s: Unable to initialize adapter.\n",
+                              ndev->name);
+                       goto err_init;
+               }
+               printk(KERN_ERR PFX
+                               "%s: Releaseing driver lock.\n",ndev->name);
+               ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+       } else {
+               printk(KERN_ERR PFX
+                      "%s: Could not aquire driver lock.\n",
+                      ndev->name);
+               goto err_lock;
+       }
+
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       set_bit(QL_ADAPTER_UP,&qdev->flags);
+
+       mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+       netif_poll_enable(ndev);
+       ql_enable_interrupts(qdev);
+       return 0;
+
+err_init:
+       ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+err_lock:
+       free_irq(qdev->pdev->irq, ndev);
+err_irq:
+       if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
+               printk(KERN_INFO PFX
+                      "%s: calling pci_disable_msi().\n",
+                      qdev->ndev->name);
+               clear_bit(QL_MSI_ENABLED,&qdev->flags);
+               pci_disable_msi(qdev->pdev);
+       }
+       return err;
+}
+
+static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
+{
+       if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
+               printk(KERN_ERR PFX
+                               "%s: Driver up/down cycle failed, "
+                               "closing device\n",qdev->ndev->name);
+               dev_close(qdev->ndev);
+               return -1;
+       }
+       return 0;
+}
+
+static int ql3xxx_close(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+
+       /*
+        * Wait for device to recover from a reset.
+        * (Rarely happens, but possible.)
+        */
+       while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
+               msleep(50);
+
+       ql_adapter_down(qdev,QL_DO_RESET);
+       return 0;
+}
+
+static int ql3xxx_open(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       return (ql_adapter_up(qdev));
+}
+
+static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
+{
+       struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
+       return &qdev->stats;
+}
+
+static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+       printk(KERN_ERR PFX "%s:  new mtu size = %d.\n", ndev->name, new_mtu);
+       if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
+               printk(KERN_ERR PFX
+                      "%s: mtu size of %d is not valid.  Use exactly %d or "
+                      "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
+                      JUMBO_MTU_SIZE);
+               return -EINVAL;
+       }
+
+       if (!netif_running(ndev)) {
+               ndev->mtu = new_mtu;
+               return 0;
+       }
+
+       ndev->mtu = new_mtu;
+       return ql_cycle_adapter(qdev,QL_DO_RESET);
+}
+
+static void ql3xxx_set_multicast_list(struct net_device *ndev)
+{
+       /*
+        * We are manually parsing the list in the net_device structure.
+        */
+       return;
+}
+
+static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
+{
+       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
+       struct sockaddr *addr = p;
+       unsigned long hw_flags;
+
+       if (netif_running(ndev))
+               return -EBUSY;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+       spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+       /* Program lower 32 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((ndev->dev_addr[2] << 24) | (ndev->
+                                                        dev_addr[3] << 16) |
+                           (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
+
+       /* Program top 16 bits of the MAC address */
+       ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+                          ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+       ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+                          ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
+       spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+       return 0;
+}
+
+static void ql3xxx_tx_timeout(struct net_device *ndev)
+{
+       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+
+       printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
+       /*
+        * Stop the queues, we've got a problem.
+        */
+       netif_stop_queue(ndev);
+
+       /*
+        * Wake up the worker to process this event.
+        */
+       queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+}
+
+static void ql_reset_work(struct ql3_adapter *qdev)
+{
+       struct net_device *ndev = qdev->ndev;
+       u32 value;
+       struct ql_tx_buf_cb *tx_cb;
+       int max_wait_time, i;
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       unsigned long hw_flags;
+
+       if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
+               clear_bit(QL_LINK_MASTER,&qdev->flags);
+
+               /*
+                * Loop through the active list and return the skb.
+                */
+               for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+                       tx_cb = &qdev->tx_buf[i];
+                       if (tx_cb->skb) {
+
+                               printk(KERN_DEBUG PFX
+                                      "%s: Freeing lost SKB.\n",
+                                      qdev->ndev->name);
+                               pci_unmap_single(qdev->pdev,
+                                       pci_unmap_addr(tx_cb, mapaddr),
+                                       pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
+                               dev_kfree_skb(tx_cb->skb);
+                               tx_cb->skb = NULL;
+                       }
+               }
+
+               printk(KERN_ERR PFX
+                      "%s: Clearing NRI after reset.\n", qdev->ndev->name);
+               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               ql_write_common_reg(qdev,
+                                   &port_regs->CommonRegs.
+                                   ispControlStatus,
+                                   ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+               /*
+                * Wait the for Soft Reset to Complete.
+                */
+               max_wait_time = 10;
+               do {
+                       value = ql_read_common_reg(qdev,
+                                                  &port_regs->CommonRegs.
+
+                                                  ispControlStatus);
+                       if ((value & ISP_CONTROL_SR) == 0) {
+                               printk(KERN_DEBUG PFX
+                                      "%s: reset completed.\n",
+                                      qdev->ndev->name);
+                               break;
+                       }
+
+                       if (value & ISP_CONTROL_RI) {
+                               printk(KERN_DEBUG PFX
+                                      "%s: clearing NRI after reset.\n",
+                                      qdev->ndev->name);
+                               ql_write_common_reg(qdev,
+                                                   (u32 *) &
+                                                   port_regs->
+                                                   CommonRegs.
+                                                   ispControlStatus,
+                                                   ((ISP_CONTROL_RI <<
+                                                     16) | ISP_CONTROL_RI));
+                       }
+
+                       ssleep(1);
+               } while (--max_wait_time);
+               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+               if (value & ISP_CONTROL_SR) {
+
+                       /*
+                        * Set the reset flags and clear the board again.
+                        * Nothing else to do...
+                        */
+                       printk(KERN_ERR PFX
+                              "%s: Timed out waiting for reset to "
+                              "complete.\n", ndev->name);
+                       printk(KERN_ERR PFX
+                              "%s: Do a reset.\n", ndev->name);
+                       clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
+                       clear_bit(QL_RESET_START,&qdev->flags);
+                       ql_cycle_adapter(qdev,QL_DO_RESET);
+                       return;
+               }
+
+               clear_bit(QL_RESET_ACTIVE,&qdev->flags);
+               clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
+               clear_bit(QL_RESET_START,&qdev->flags);
+               ql_cycle_adapter(qdev,QL_NO_RESET);
+       }
+}
+
+static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+{
+       ql_cycle_adapter(qdev,QL_DO_RESET);
+}
+
+static void ql_get_board_info(struct ql3_adapter *qdev)
+{
+       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       u32 value;
+
+       value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
+
+       qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
+       if (value & PORT_STATUS_64)
+               qdev->pci_width = 64;
+       else
+               qdev->pci_width = 32;
+       if (value & PORT_STATUS_X)
+               qdev->pci_x = 1;
+       else
+               qdev->pci_x = 0;
+       qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
+}
+
+static void ql3xxx_timer(unsigned long ptr)
+{
+       struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+
+       if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
+               printk(KERN_DEBUG PFX
+                      "%s: Reset in progress.\n",
+                      qdev->ndev->name);
+               goto end;
+       }
+
+       ql_link_state_machine(qdev);
+
+       /* Restart timer on 2 second interval. */
+end:
+       mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+}
+
+static int __devinit ql3xxx_probe(struct pci_dev *pdev,
+                                 const struct pci_device_id *pci_entry)
+{
+       struct net_device *ndev = NULL;
+       struct ql3_adapter *qdev = NULL;
+       static int cards_found = 0;
+       int pci_using_dac, err;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               printk(KERN_ERR PFX "%s cannot enable PCI device\n",
+                      pci_name(pdev));
+               goto err_out;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
+                      pci_name(pdev));
+               goto err_out_disable_pdev;
+       }
+
+       pci_set_master(pdev);
+
+       if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+               pci_using_dac = 1;
+               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+       } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+               pci_using_dac = 0;
+               err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+       }
+
+       if (err) {
+               printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+                      pci_name(pdev));
+               goto err_out_free_regions;
+       }
+
+       ndev = alloc_etherdev(sizeof(struct ql3_adapter));
+       if (!ndev)
+               goto err_out_free_regions;
+
+       SET_MODULE_OWNER(ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       ndev->features = NETIF_F_LLTX;
+       if (pci_using_dac)
+               ndev->features |= NETIF_F_HIGHDMA;
+
+       pci_set_drvdata(pdev, ndev);
+
+       qdev = netdev_priv(ndev);
+       qdev->index = cards_found;
+       qdev->ndev = ndev;
+       qdev->pdev = pdev;
+       qdev->port_link_state = LS_DOWN;
+       if (msi)
+               qdev->msi = 1;
+
+       qdev->msg_enable = netif_msg_init(debug, default_msg);
+
+       qdev->mem_map_registers =
+           ioremap_nocache(pci_resource_start(pdev, 1),
+                           pci_resource_len(qdev->pdev, 1));
+       if (!qdev->mem_map_registers) {
+               printk(KERN_ERR PFX "%s: cannot map device registers\n",
+                      pci_name(pdev));
+               goto err_out_free_ndev;
+       }
+
+       spin_lock_init(&qdev->adapter_lock);
+       spin_lock_init(&qdev->hw_lock);
+
+       /* Set driver entry points */
+       ndev->open = ql3xxx_open;
+       ndev->hard_start_xmit = ql3xxx_send;
+       ndev->stop = ql3xxx_close;
+       ndev->get_stats = ql3xxx_get_stats;
+       ndev->change_mtu = ql3xxx_change_mtu;
+       ndev->set_multicast_list = ql3xxx_set_multicast_list;
+       SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+       ndev->set_mac_address = ql3xxx_set_mac_address;
+       ndev->tx_timeout = ql3xxx_tx_timeout;
+       ndev->watchdog_timeo = 5 * HZ;
+
+       ndev->poll = &ql_poll;
+       ndev->weight = 64;
+
+       ndev->irq = pdev->irq;
+
+       /* make sure the EEPROM is good */
+       if (ql_get_nvram_params(qdev)) {
+               printk(KERN_ALERT PFX
+                      "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
+                      qdev->index);
+               goto err_out_iounmap;
+       }
+
+       ql_set_mac_info(qdev);
+
+       /* Validate and set parameters */
+       if (qdev->mac_index) {
+               memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
+                      ETH_ALEN);
+       } else {
+               memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
+                      ETH_ALEN);
+       }
+       memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+       ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
+
+       /* Turn off support for multicasting */
+       ndev->flags &= ~IFF_MULTICAST;
+
+       /* Record PCI bus information. */
+       ql_get_board_info(qdev);
+
+       /*
+        * Set the Maximum Memory Read Byte Count value. We do this to handle
+        * jumbo frames.
+        */
+       if (qdev->pci_x) {
+               pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
+       }
+
+       err = register_netdev(ndev);
+       if (err) {
+               printk(KERN_ERR PFX "%s: cannot register net device\n",
+                      pci_name(pdev));
+               goto err_out_iounmap;
+       }
+
+       /* we're going to reset, so assume we have no link for now */
+
+       netif_carrier_off(ndev);
+       netif_stop_queue(ndev);
+
+       qdev->workqueue = create_singlethread_workqueue(ndev->name);
+       INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
+       INIT_WORK(&qdev->tx_timeout_work,
+                 (void (*)(void *))ql_tx_timeout_work, qdev);
+
+       init_timer(&qdev->adapter_timer);
+       qdev->adapter_timer.function = ql3xxx_timer;
+       qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
+       qdev->adapter_timer.data = (unsigned long)qdev;
+
+       if(!cards_found) {
+               printk(KERN_ALERT PFX "%s\n", DRV_STRING);
+               printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
+                  DRV_NAME, DRV_VERSION);
+       }
+       ql_display_dev_info(ndev);
+
+       cards_found++;
+       return 0;
+
+err_out_iounmap:
+       iounmap(qdev->mem_map_registers);
+err_out_free_ndev:
+       free_netdev(ndev);
+err_out_free_regions:
+       pci_release_regions(pdev);
+err_out_disable_pdev:
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+err_out:
+       return err;
+}
+
+static void __devexit ql3xxx_remove(struct pci_dev *pdev)
+{
+       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct ql3_adapter *qdev = netdev_priv(ndev);
+
+       unregister_netdev(ndev);
+       qdev = netdev_priv(ndev);
+
+       ql_disable_interrupts(qdev);
+
+       if (qdev->workqueue) {
+               cancel_delayed_work(&qdev->reset_work);
+               cancel_delayed_work(&qdev->tx_timeout_work);
+               destroy_workqueue(qdev->workqueue);
+               qdev->workqueue = NULL;
+       }
+
+       iounmap((void *)qdev->mmap_virt_base);
+       pci_release_regions(pdev);
+       pci_set_drvdata(pdev, NULL);
+       free_netdev(ndev);
+}
+
+static struct pci_driver ql3xxx_driver = {
+
+       .name = DRV_NAME,
+       .id_table = ql3xxx_pci_tbl,
+       .probe = ql3xxx_probe,
+       .remove = __devexit_p(ql3xxx_remove),
+};
+
+static int __init ql3xxx_init_module(void)
+{
+       return pci_register_driver(&ql3xxx_driver);
+}
+
+static void __exit ql3xxx_exit(void)
+{
+       pci_unregister_driver(&ql3xxx_driver);
+}
+
+module_init(ql3xxx_init_module);
+module_exit(ql3xxx_exit);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
new file mode 100644 (file)
index 0000000..9492cee
--- /dev/null
@@ -0,0 +1,1194 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c)  2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+#ifndef _QLA3XXX_H_
+#define _QLA3XXX_H_
+
+/*
+ * IOCB Definitions...
+ */
+#pragma pack(1)
+
+#define OPCODE_OB_MAC_IOCB_FN0          0x01
+#define OPCODE_OB_MAC_IOCB_FN2          0x21
+#define OPCODE_OB_TCP_IOCB_FN0          0x03
+#define OPCODE_OB_TCP_IOCB_FN2          0x23
+#define OPCODE_UPDATE_NCB_IOCB_FN0      0x00
+#define OPCODE_UPDATE_NCB_IOCB_FN2      0x20
+
+#define OPCODE_UPDATE_NCB_IOCB      0xF0
+#define OPCODE_IB_MAC_IOCB          0xF9
+#define OPCODE_IB_IP_IOCB           0xFA
+#define OPCODE_IB_TCP_IOCB          0xFB
+#define OPCODE_DUMP_PROTO_IOCB      0xFE
+#define OPCODE_BUFFER_ALERT_IOCB    0xFB
+
+#define OPCODE_FUNC_ID_MASK                 0x30
+#define OUTBOUND_MAC_IOCB                   0x01       /* plus function bits */
+#define OUTBOUND_TCP_IOCB                   0x03       /* plus function bits */
+#define UPDATE_NCB_IOCB                     0x00       /* plus function bits */
+
+#define FN0_MA_BITS_MASK    0x00
+#define FN1_MA_BITS_MASK    0x80
+
+struct ob_mac_iocb_req {
+       u8 opcode;
+       u8 flags;
+#define OB_MAC_IOCB_REQ_MA  0xC0
+#define OB_MAC_IOCB_REQ_F   0x20
+#define OB_MAC_IOCB_REQ_X   0x10
+#define OB_MAC_IOCB_REQ_D   0x02
+#define OB_MAC_IOCB_REQ_I   0x01
+       __le16 reserved0;
+
+       __le32 transaction_id;
+       __le16 data_len;
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 reserved3;
+       __le32 buf_addr0_low;
+       __le32 buf_addr0_high;
+       __le32 buf_0_len;
+       __le32 buf_addr1_low;
+       __le32 buf_addr1_high;
+       __le32 buf_1_len;
+       __le32 buf_addr2_low;
+       __le32 buf_addr2_high;
+       __le32 buf_2_len;
+       __le32 reserved4;
+       __le32 reserved5;
+};
+/*
+ * The following constants define control bits for buffer
+ * length fields for all IOCB's.
+ */
+#define OB_MAC_IOCB_REQ_E   0x80000000 /* Last valid buffer in list. */
+#define OB_MAC_IOCB_REQ_C   0x40000000 /* points to an OAL. (continuation) */
+#define OB_MAC_IOCB_REQ_L   0x20000000 /* Auburn local address pointer. */
+#define OB_MAC_IOCB_REQ_R   0x10000000 /* 32-bit address pointer. */
+
+struct ob_mac_iocb_rsp {
+       u8 opcode;
+       u8 flags;
+#define OB_MAC_IOCB_RSP_P   0x08
+#define OB_MAC_IOCB_RSP_S   0x02
+#define OB_MAC_IOCB_RSP_I   0x01
+
+       __le16 reserved0;
+       __le32 transaction_id;
+       __le32 reserved1;
+       __le32 reserved2;
+};
+
+struct ib_mac_iocb_rsp {
+       u8 opcode;
+       u8 flags;
+#define IB_MAC_IOCB_RSP_S   0x80
+#define IB_MAC_IOCB_RSP_H1  0x40
+#define IB_MAC_IOCB_RSP_H0  0x20
+#define IB_MAC_IOCB_RSP_B   0x10
+#define IB_MAC_IOCB_RSP_M   0x08
+#define IB_MAC_IOCB_RSP_MA  0x07
+
+       __le16 length;
+       __le32 reserved;
+       __le32 ial_low;
+       __le32 ial_high;
+
+};
+
+struct ob_ip_iocb_req {
+       u8 opcode;
+       __le16 flags;
+#define OB_IP_IOCB_REQ_O        0x100
+#define OB_IP_IOCB_REQ_H        0x008
+#define OB_IP_IOCB_REQ_U        0x004
+#define OB_IP_IOCB_REQ_D        0x002
+#define OB_IP_IOCB_REQ_I        0x001
+
+       u8 reserved0;
+
+       __le32 transaction_id;
+       __le16 data_len;
+       __le16 reserved1;
+       __le32 hncb_ptr_low;
+       __le32 hncb_ptr_high;
+       __le32 buf_addr0_low;
+       __le32 buf_addr0_high;
+       __le32 buf_0_len;
+       __le32 buf_addr1_low;
+       __le32 buf_addr1_high;
+       __le32 buf_1_len;
+       __le32 buf_addr2_low;
+       __le32 buf_addr2_high;
+       __le32 buf_2_len;
+       __le32 reserved2;
+       __le32 reserved3;
+};
+
+/* defines for BufferLength fields above */
+#define OB_IP_IOCB_REQ_E    0x80000000
+#define OB_IP_IOCB_REQ_C    0x40000000
+#define OB_IP_IOCB_REQ_L    0x20000000
+#define OB_IP_IOCB_REQ_R    0x10000000
+
+struct ob_ip_iocb_rsp {
+       u8 opcode;
+       u8 flags;
+#define OB_MAC_IOCB_RSP_E       0x08
+#define OB_MAC_IOCB_RSP_L       0x04
+#define OB_MAC_IOCB_RSP_S       0x02
+#define OB_MAC_IOCB_RSP_I       0x01
+
+       __le16 reserved0;
+       __le32 transaction_id;
+       __le32 reserved1;
+       __le32 reserved2;
+};
+
+struct ob_tcp_iocb_req {
+       u8 opcode;
+
+       u8 flags0;
+#define OB_TCP_IOCB_REQ_P       0x80
+#define OB_TCP_IOCB_REQ_CI      0x20
+#define OB_TCP_IOCB_REQ_H       0x10
+#define OB_TCP_IOCB_REQ_LN      0x08
+#define OB_TCP_IOCB_REQ_K       0x04
+#define OB_TCP_IOCB_REQ_D       0x02
+#define OB_TCP_IOCB_REQ_I       0x01
+
+       u8 flags1;
+#define OB_TCP_IOCB_REQ_OSM     0x40
+#define OB_TCP_IOCB_REQ_URG     0x20
+#define OB_TCP_IOCB_REQ_ACK     0x10
+#define OB_TCP_IOCB_REQ_PSH     0x08
+#define OB_TCP_IOCB_REQ_RST     0x04
+#define OB_TCP_IOCB_REQ_SYN     0x02
+#define OB_TCP_IOCB_REQ_FIN     0x01
+
+       u8 options_len;
+#define OB_TCP_IOCB_REQ_OMASK   0xF0
+#define OB_TCP_IOCB_REQ_SHIFT   4
+
+       __le32 transaction_id;
+       __le32 data_len;
+       __le32 hncb_ptr_low;
+       __le32 hncb_ptr_high;
+       __le32 buf_addr0_low;
+       __le32 buf_addr0_high;
+       __le32 buf_0_len;
+       __le32 buf_addr1_low;
+       __le32 buf_addr1_high;
+       __le32 buf_1_len;
+       __le32 buf_addr2_low;
+       __le32 buf_addr2_high;
+       __le32 buf_2_len;
+       __le32 time_stamp;
+       __le32 reserved1;
+};
+
+struct ob_tcp_iocb_rsp {
+       u8 opcode;
+
+       u8 flags0;
+#define OB_TCP_IOCB_RSP_C       0x20
+#define OB_TCP_IOCB_RSP_H       0x10
+#define OB_TCP_IOCB_RSP_LN      0x08
+#define OB_TCP_IOCB_RSP_K       0x04
+#define OB_TCP_IOCB_RSP_D       0x02
+#define OB_TCP_IOCB_RSP_I       0x01
+
+       u8 flags1;
+#define OB_TCP_IOCB_RSP_E       0x10
+#define OB_TCP_IOCB_RSP_W       0x08
+#define OB_TCP_IOCB_RSP_P       0x04
+#define OB_TCP_IOCB_RSP_T       0x02
+#define OB_TCP_IOCB_RSP_F       0x01
+
+       u8 state;
+#define OB_TCP_IOCB_RSP_SMASK   0xF0
+#define OB_TCP_IOCB_RSP_SHIFT   4
+
+       __le32 transaction_id;
+       __le32 local_ncb_ptr;
+       __le32 reserved0;
+};
+
+struct ib_ip_iocb_rsp {
+       u8 opcode;
+       u8 flags;
+#define IB_IP_IOCB_RSP_S        0x80
+#define IB_IP_IOCB_RSP_H1       0x40
+#define IB_IP_IOCB_RSP_H0       0x20
+#define IB_IP_IOCB_RSP_B        0x10
+#define IB_IP_IOCB_RSP_M        0x08
+#define IB_IP_IOCB_RSP_MA       0x07
+
+       __le16 length;
+       __le16 checksum;
+       __le16 reserved;
+#define IB_IP_IOCB_RSP_R        0x01
+       __le32 ial_low;
+       __le32 ial_high;
+};
+
+struct ib_tcp_iocb_rsp {
+       u8 opcode;
+       u8 flags;
+#define IB_TCP_IOCB_RSP_P       0x80
+#define IB_TCP_IOCB_RSP_T       0x40
+#define IB_TCP_IOCB_RSP_D       0x20
+#define IB_TCP_IOCB_RSP_N       0x10
+#define IB_TCP_IOCB_RSP_IP      0x03
+#define IB_TCP_FLAG_MASK        0xf0
+#define IB_TCP_FLAG_IOCB_SYN    0x00
+
+#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
+
+       __le16 length;
+       __le32 hncb_ref_num;
+       __le32 ial_low;
+       __le32 ial_high;
+};
+
+struct net_rsp_iocb {
+       u8 opcode;
+       u8 flags;
+       __le16 reserved0;
+       __le32 reserved[3];
+};
+#pragma pack()
+
+/*
+ * Register Definitions...
+ */
+#define PORT0_PHY_ADDRESS   0x1e00
+#define PORT1_PHY_ADDRESS   0x1f00
+
+#define ETHERNET_CRC_SIZE   4
+
+#define MII_SCAN_REGISTER 0x00000001
+
+/* 32-bit ispControlStatus */
+enum {
+       ISP_CONTROL_NP_MASK = 0x0003,
+       ISP_CONTROL_NP_PCSR = 0x0000,
+       ISP_CONTROL_NP_HMCR = 0x0001,
+       ISP_CONTROL_NP_LRAMCR = 0x0002,
+       ISP_CONTROL_NP_PSR = 0x0003,
+       ISP_CONTROL_RI = 0x0008,
+       ISP_CONTROL_CI = 0x0010,
+       ISP_CONTROL_PI = 0x0020,
+       ISP_CONTROL_IN = 0x0040,
+       ISP_CONTROL_BE = 0x0080,
+       ISP_CONTROL_FN_MASK = 0x0700,
+       ISP_CONTROL_FN0_NET = 0x0400,
+       ISP_CONTROL_FN0_SCSI = 0x0500,
+       ISP_CONTROL_FN1_NET = 0x0600,
+       ISP_CONTROL_FN1_SCSI = 0x0700,
+       ISP_CONTROL_LINK_DN_0 = 0x0800,
+       ISP_CONTROL_LINK_DN_1 = 0x1000,
+       ISP_CONTROL_FSR = 0x2000,
+       ISP_CONTROL_FE = 0x4000,
+       ISP_CONTROL_SR = 0x8000,
+};
+
+/* 32-bit ispInterruptMaskReg */
+enum {
+       ISP_IMR_ENABLE_INT = 0x0004,
+       ISP_IMR_DISABLE_RESET_INT = 0x0008,
+       ISP_IMR_DISABLE_CMPL_INT = 0x0010,
+       ISP_IMR_DISABLE_PROC_INT = 0x0020,
+};
+
+/* 32-bit serialPortInterfaceReg */
+enum {
+       ISP_SERIAL_PORT_IF_CLK = 0x0001,
+       ISP_SERIAL_PORT_IF_CS = 0x0002,
+       ISP_SERIAL_PORT_IF_D0 = 0x0004,
+       ISP_SERIAL_PORT_IF_DI = 0x0008,
+       ISP_NVRAM_MASK = (0x000F << 16),
+       ISP_SERIAL_PORT_IF_WE = 0x0010,
+       ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
+       ISP_SERIAL_PORT_IF_SCI = 0x0400,
+       ISP_SERIAL_PORT_IF_SC0 = 0x0800,
+       ISP_SERIAL_PORT_IF_SCE = 0x1000,
+       ISP_SERIAL_PORT_IF_SDI = 0x2000,
+       ISP_SERIAL_PORT_IF_SDO = 0x4000,
+       ISP_SERIAL_PORT_IF_SDE = 0x8000,
+       ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
+};
+
+/* semaphoreReg */
+enum {
+       QL_RESOURCE_MASK_BASE_CODE = 0x7,
+       QL_RESOURCE_BITS_BASE_CODE = 0x4,
+       QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
+       QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
+       QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
+       QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
+       QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
+       QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
+       QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
+       QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
+       QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
+       QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
+};
+
+ /*
+  * QL3XXX memory-mapped registers
+  * QL3XXX has 4 "pages" of registers, each page occupying
+  * 256 bytes.  Each page has a "common" area at the start and then
+  * page-specific registers after that.
+  */
+struct ql3xxx_common_registers {
+       u32 MB0;                /* Offset 0x00 */
+       u32 MB1;                /* Offset 0x04 */
+       u32 MB2;                /* Offset 0x08 */
+       u32 MB3;                /* Offset 0x0c */
+       u32 MB4;                /* Offset 0x10 */
+       u32 MB5;                /* Offset 0x14 */
+       u32 MB6;                /* Offset 0x18 */
+       u32 MB7;                /* Offset 0x1c */
+       u32 flashBiosAddr;
+       u32 flashBiosData;
+       u32 ispControlStatus;
+       u32 ispInterruptMaskReg;
+       u32 serialPortInterfaceReg;
+       u32 semaphoreReg;
+       u32 reqQProducerIndex;
+       u32 rspQConsumerIndex;
+
+       u32 rxLargeQProducerIndex;
+       u32 rxSmallQProducerIndex;
+       u32 arcMadiCommand;
+       u32 arcMadiData;
+};
+
+enum {
+       EXT_HW_CONFIG_SP_MASK = 0x0006,
+       EXT_HW_CONFIG_SP_NONE = 0x0000,
+       EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
+       EXT_HW_CONFIG_SP_ECC = 0x0004,
+       EXT_HW_CONFIG_SP_ECCx = 0x0006,
+       EXT_HW_CONFIG_SIZE_MASK = 0x0060,
+       EXT_HW_CONFIG_SIZE_128M = 0x0000,
+       EXT_HW_CONFIG_SIZE_256M = 0x0020,
+       EXT_HW_CONFIG_SIZE_512M = 0x0040,
+       EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
+       EXT_HW_CONFIG_PD = 0x0080,
+       EXT_HW_CONFIG_FW = 0x0200,
+       EXT_HW_CONFIG_US = 0x0400,
+       EXT_HW_CONFIG_DCS_MASK = 0x1800,
+       EXT_HW_CONFIG_DCS_9MA = 0x0000,
+       EXT_HW_CONFIG_DCS_15MA = 0x0800,
+       EXT_HW_CONFIG_DCS_18MA = 0x1000,
+       EXT_HW_CONFIG_DCS_24MA = 0x1800,
+       EXT_HW_CONFIG_DDS_MASK = 0x6000,
+       EXT_HW_CONFIG_DDS_9MA = 0x0000,
+       EXT_HW_CONFIG_DDS_15MA = 0x2000,
+       EXT_HW_CONFIG_DDS_18MA = 0x4000,
+       EXT_HW_CONFIG_DDS_24MA = 0x6000,
+};
+
+/* InternalChipConfig */
+enum {
+       INTERNAL_CHIP_DM = 0x0001,
+       INTERNAL_CHIP_SD = 0x0002,
+       INTERNAL_CHIP_RAP_MASK = 0x000C,
+       INTERNAL_CHIP_RAP_RR = 0x0000,
+       INTERNAL_CHIP_RAP_NRM = 0x0004,
+       INTERNAL_CHIP_RAP_ERM = 0x0008,
+       INTERNAL_CHIP_RAP_ERMx = 0x000C,
+       INTERNAL_CHIP_WE = 0x0010,
+       INTERNAL_CHIP_EF = 0x0020,
+       INTERNAL_CHIP_FR = 0x0040,
+       INTERNAL_CHIP_FW = 0x0080,
+       INTERNAL_CHIP_FI = 0x0100,
+       INTERNAL_CHIP_FT = 0x0200,
+};
+
+/* portControl */
+enum {
+       PORT_CONTROL_DS = 0x0001,
+       PORT_CONTROL_HH = 0x0002,
+       PORT_CONTROL_EI = 0x0004,
+       PORT_CONTROL_ET = 0x0008,
+       PORT_CONTROL_EF = 0x0010,
+       PORT_CONTROL_DRM = 0x0020,
+       PORT_CONTROL_RLB = 0x0040,
+       PORT_CONTROL_RCB = 0x0080,
+       PORT_CONTROL_MAC = 0x0100,
+       PORT_CONTROL_IPV = 0x0200,
+       PORT_CONTROL_IFP = 0x0400,
+       PORT_CONTROL_ITP = 0x0800,
+       PORT_CONTROL_FI = 0x1000,
+       PORT_CONTROL_DFP = 0x2000,
+       PORT_CONTROL_OI = 0x4000,
+       PORT_CONTROL_CC = 0x8000,
+};
+
+/* portStatus */
+enum {
+       PORT_STATUS_SM0 = 0x0001,
+       PORT_STATUS_SM1 = 0x0002,
+       PORT_STATUS_X = 0x0008,
+       PORT_STATUS_DL = 0x0080,
+       PORT_STATUS_IC = 0x0200,
+       PORT_STATUS_MRC = 0x0400,
+       PORT_STATUS_NL = 0x0800,
+       PORT_STATUS_REV_ID_MASK = 0x7000,
+       PORT_STATUS_REV_ID_1 = 0x1000,
+       PORT_STATUS_REV_ID_2 = 0x2000,
+       PORT_STATUS_REV_ID_3 = 0x3000,
+       PORT_STATUS_64 = 0x8000,
+       PORT_STATUS_UP0 = 0x10000,
+       PORT_STATUS_AC0 = 0x20000,
+       PORT_STATUS_AE0 = 0x40000,
+       PORT_STATUS_UP1 = 0x100000,
+       PORT_STATUS_AC1 = 0x200000,
+       PORT_STATUS_AE1 = 0x400000,
+       PORT_STATUS_F0_ENABLED = 0x1000000,
+       PORT_STATUS_F1_ENABLED = 0x2000000,
+       PORT_STATUS_F2_ENABLED = 0x4000000,
+       PORT_STATUS_F3_ENABLED = 0x8000000,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+       MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
+       MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
+       MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
+       MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
+       MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
+       MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
+       MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+       MAC_MII_CONTROL_RC = 0x0001,
+       MAC_MII_CONTROL_SC = 0x0002,
+       MAC_MII_CONTROL_AS = 0x0004,
+       MAC_MII_CONTROL_NP = 0x0008,
+       MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
+       MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
+       MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
+       MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
+       MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
+       MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
+       MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
+       MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
+       MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
+       MAC_MII_CONTROL_RM = 0x8000,
+};
+
+/* macMIIStatusReg */
+enum {
+       MAC_MII_STATUS_BSY = 0x0001,
+       MAC_MII_STATUS_SC = 0x0002,
+       MAC_MII_STATUS_NV = 0x0004,
+};
+
+enum {
+       MAC_CONFIG_REG_PE = 0x0001,
+       MAC_CONFIG_REG_TF = 0x0002,
+       MAC_CONFIG_REG_RF = 0x0004,
+       MAC_CONFIG_REG_FD = 0x0008,
+       MAC_CONFIG_REG_GM = 0x0010,
+       MAC_CONFIG_REG_LB = 0x0020,
+       MAC_CONFIG_REG_SR = 0x8000,
+};
+
+enum {
+       MAC_HALF_DUPLEX_REG_ED = 0x10000,
+       MAC_HALF_DUPLEX_REG_NB = 0x20000,
+       MAC_HALF_DUPLEX_REG_BNB = 0x40000,
+       MAC_HALF_DUPLEX_REG_ALT = 0x80000,
+};
+
+enum {
+       IP_ADDR_INDEX_REG_MASK = 0x000f,
+       IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
+       IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
+       IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
+       IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
+       IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
+       IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
+       IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
+       IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
+};
+
+enum {
+       PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
+       PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
+       PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
+       PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
+       PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
+       PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
+       PROBE_MUX_ADDR_REG_UP = 0x4000,
+       PROBE_MUX_ADDR_REG_RE = 0x8000,
+};
+
+enum {
+       STATISTICS_INDEX_REG_MASK = 0x01ff,
+       STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
+       STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
+       STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
+       STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
+       STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
+       STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
+       STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
+       STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
+       STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
+       STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
+       STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
+       STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
+       STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
+       STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
+       STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
+       STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
+       STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
+       STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
+       STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
+       STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
+       STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
+       STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
+       STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
+       STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
+       STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
+       STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
+       STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
+       STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
+       STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
+       STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
+       STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
+       STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
+       STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
+       STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
+       STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
+       STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
+       STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
+       STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
+};
+
+enum {
+       PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
+       PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
+       PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
+       PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
+       PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
+       PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
+       PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
+       PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
+       PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
+       PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
+       PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
+       PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
+       PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
+       PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
+       PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
+       PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
+       PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
+       PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
+       PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
+       PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
+       PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
+       PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
+       PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
+       PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
+       PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
+};
+
+/*
+ *  port control and status page - page 0
+ */
+
+struct ql3xxx_port_registers {
+       struct ql3xxx_common_registers CommonRegs;
+
+       u32 ExternalHWConfig;
+       u32 InternalChipConfig;
+       u32 portControl;
+       u32 portStatus;
+       u32 macAddrIndirectPtrReg;
+       u32 macAddrDataReg;
+       u32 macMIIMgmtControlReg;
+       u32 macMIIMgmtAddrReg;
+       u32 macMIIMgmtDataReg;
+       u32 macMIIStatusReg;
+       u32 mac0ConfigReg;
+       u32 mac0IpgIfgReg;
+       u32 mac0HalfDuplexReg;
+       u32 mac0MaxFrameLengthReg;
+       u32 mac0PauseThresholdReg;
+       u32 mac1ConfigReg;
+       u32 mac1IpgIfgReg;
+       u32 mac1HalfDuplexReg;
+       u32 mac1MaxFrameLengthReg;
+       u32 mac1PauseThresholdReg;
+       u32 ipAddrIndexReg;
+       u32 ipAddrDataReg;
+       u32 ipReassemblyTimeout;
+       u32 tcpMaxWindow;
+       u32 currentTcpTimestamp[2];
+       u32 internalRamRWAddrReg;
+       u32 internalRamWDataReg;
+       u32 reclaimedBufferAddrRegLow;
+       u32 reclaimedBufferAddrRegHigh;
+       u32 reserved[2];
+       u32 fpgaRevID;
+       u32 localRamAddr;
+       u32 localRamDataAutoIncr;
+       u32 localRamDataNonIncr;
+       u32 gpOutput;
+       u32 gpInput;
+       u32 probeMuxAddr;
+       u32 probeMuxData;
+       u32 statisticsIndexReg;
+       u32 statisticsReadDataRegAutoIncr;
+       u32 statisticsReadDataRegNoIncr;
+       u32 PortFatalErrStatus;
+};
+
+/*
+ * port host memory config page - page 1
+ */
+struct ql3xxx_host_memory_registers {
+       struct ql3xxx_common_registers CommonRegs;
+
+       u32 reserved[12];
+
+       /* Network Request Queue */
+       u32 reqConsumerIndex;
+       u32 reqConsumerIndexAddrLow;
+       u32 reqConsumerIndexAddrHigh;
+       u32 reqBaseAddrLow;
+       u32 reqBaseAddrHigh;
+       u32 reqLength;
+
+       /* Network Completion Queue */
+       u32 rspProducerIndex;
+       u32 rspProducerIndexAddrLow;
+       u32 rspProducerIndexAddrHigh;
+       u32 rspBaseAddrLow;
+       u32 rspBaseAddrHigh;
+       u32 rspLength;
+
+       /* RX Large Buffer Queue */
+       u32 rxLargeQConsumerIndex;
+       u32 rxLargeQBaseAddrLow;
+       u32 rxLargeQBaseAddrHigh;
+       u32 rxLargeQLength;
+       u32 rxLargeBufferLength;
+
+       /* RX Small Buffer Queue */
+       u32 rxSmallQConsumerIndex;
+       u32 rxSmallQBaseAddrLow;
+       u32 rxSmallQBaseAddrHigh;
+       u32 rxSmallQLength;
+       u32 rxSmallBufferLength;
+
+};
+
+/*
+ *  port local RAM page - page 2
+ */
+struct ql3xxx_local_ram_registers {
+       struct ql3xxx_common_registers CommonRegs;
+       u32 bufletSize;
+       u32 maxBufletCount;
+       u32 currentBufletCount;
+       u32 reserved;
+       u32 freeBufletThresholdLow;
+       u32 freeBufletThresholdHigh;
+       u32 ipHashTableBase;
+       u32 ipHashTableCount;
+       u32 tcpHashTableBase;
+       u32 tcpHashTableCount;
+       u32 ncbBase;
+       u32 maxNcbCount;
+       u32 currentNcbCount;
+       u32 drbBase;
+       u32 maxDrbCount;
+       u32 currentDrbCount;
+};
+
+/*
+ * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
+ */
+
+#define LS_64BITS(x)    (u32)(0xffffffff & ((u64)x))
+#define MS_64BITS(x)    (u32)(0xffffffff & (((u64)x)>>16>>16) )
+
+/*
+ * I/O register
+ */
+
+enum {
+       CONTROL_REG = 0,
+       STATUS_REG = 1,
+       PHY_STAT_LINK_UP = 0x0004,
+       PHY_CTRL_LOOPBACK = 0x4000,
+
+       PETBI_CONTROL_REG = 0x00,
+       PETBI_CTRL_SOFT_RESET = 0x8000,
+       PETBI_CTRL_AUTO_NEG = 0x1000,
+       PETBI_CTRL_RESTART_NEG = 0x0200,
+       PETBI_CTRL_FULL_DUPLEX = 0x0100,
+       PETBI_CTRL_SPEED_1000 = 0x0040,
+
+       PETBI_STATUS_REG = 0x01,
+       PETBI_STAT_NEG_DONE = 0x0020,
+       PETBI_STAT_LINK_UP = 0x0004,
+
+       PETBI_NEG_ADVER = 0x04,
+       PETBI_NEG_PAUSE = 0x0080,
+       PETBI_NEG_PAUSE_MASK = 0x0180,
+       PETBI_NEG_DUPLEX = 0x0020,
+       PETBI_NEG_DUPLEX_MASK = 0x0060,
+
+       PETBI_NEG_PARTNER = 0x05,
+       PETBI_NEG_ERROR_MASK = 0x3000,
+
+       PETBI_EXPANSION_REG = 0x06,
+       PETBI_EXP_PAGE_RX = 0x0002,
+
+       PETBI_TBI_CTRL = 0x11,
+       PETBI_TBI_RESET = 0x8000,
+       PETBI_TBI_AUTO_SENSE = 0x0100,
+       PETBI_TBI_SERDES_MODE = 0x0010,
+       PETBI_TBI_SERDES_WRAP = 0x0002,
+
+       AUX_CONTROL_STATUS = 0x1c,
+       PHY_AUX_NEG_DONE = 0x8000,
+       PHY_NEG_PARTNER = 5,
+       PHY_AUX_DUPLEX_STAT = 0x0020,
+       PHY_AUX_SPEED_STAT = 0x0018,
+       PHY_AUX_NO_HW_STRAP = 0x0004,
+       PHY_AUX_RESET_STICK = 0x0002,
+       PHY_NEG_PAUSE = 0x0400,
+       PHY_CTRL_SOFT_RESET = 0x8000,
+       PHY_NEG_ADVER = 4,
+       PHY_NEG_ADV_SPEED = 0x01e0,
+       PHY_CTRL_RESTART_NEG = 0x0200,
+};
+enum {
+/* AM29LV Flash definitions    */
+       FM93C56A_START = 0x1,
+/* Commands */
+       FM93C56A_READ = 0x2,
+       FM93C56A_WEN = 0x0,
+       FM93C56A_WRITE = 0x1,
+       FM93C56A_WRITE_ALL = 0x0,
+       FM93C56A_WDS = 0x0,
+       FM93C56A_ERASE = 0x3,
+       FM93C56A_ERASE_ALL = 0x0,
+/* Command Extentions */
+       FM93C56A_WEN_EXT = 0x3,
+       FM93C56A_WRITE_ALL_EXT = 0x1,
+       FM93C56A_WDS_EXT = 0x0,
+       FM93C56A_ERASE_ALL_EXT = 0x2,
+/* Special Bits */
+       FM93C56A_READ_DUMMY_BITS = 1,
+       FM93C56A_READY = 0,
+       FM93C56A_BUSY = 1,
+       FM93C56A_CMD_BITS = 2,
+/* AM29LV Flash definitions    */
+       FM93C56A_SIZE_8 = 0x100,
+       FM93C56A_SIZE_16 = 0x80,
+       FM93C66A_SIZE_8 = 0x200,
+       FM93C66A_SIZE_16 = 0x100,
+       FM93C86A_SIZE_16 = 0x400,
+/* Address Bits */
+       FM93C56A_NO_ADDR_BITS_16 = 8,
+       FM93C56A_NO_ADDR_BITS_8 = 9,
+       FM93C86A_NO_ADDR_BITS_16 = 10,
+/* Data Bits */
+       FM93C56A_DATA_BITS_16 = 16,
+       FM93C56A_DATA_BITS_8 = 8,
+};
+enum {
+/* Auburn Bits */
+           AUBURN_EEPROM_DI = 0x8,
+       AUBURN_EEPROM_DI_0 = 0x0,
+       AUBURN_EEPROM_DI_1 = 0x8,
+       AUBURN_EEPROM_DO = 0x4,
+       AUBURN_EEPROM_DO_0 = 0x0,
+       AUBURN_EEPROM_DO_1 = 0x4,
+       AUBURN_EEPROM_CS = 0x2,
+       AUBURN_EEPROM_CS_0 = 0x0,
+       AUBURN_EEPROM_CS_1 = 0x2,
+       AUBURN_EEPROM_CLK_RISE = 0x1,
+       AUBURN_EEPROM_CLK_FALL = 0x0,
+};
+enum {EEPROM_SIZE = FM93C86A_SIZE_16,
+       EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
+       EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
+};
+
+/*
+ *  MAC Config data structure
+ */
+    struct eeprom_port_cfg {
+       u16 etherMtu_mac;
+       u16 pauseThreshold_mac;
+       u16 resumeThreshold_mac;
+       u16 portConfiguration;
+#define PORT_CONFIG_AUTO_NEG_ENABLED        0x8000
+#define PORT_CONFIG_SYM_PAUSE_ENABLED       0x4000
+#define PORT_CONFIG_FULL_DUPLEX_ENABLED     0x2000
+#define PORT_CONFIG_HALF_DUPLEX_ENABLED     0x1000
+#define PORT_CONFIG_1000MB_SPEED            0x0400
+#define PORT_CONFIG_100MB_SPEED             0x0200
+#define PORT_CONFIG_10MB_SPEED              0x0100
+#define PORT_CONFIG_LINK_SPEED_MASK         0x0F00
+       u16 reserved[12];
+
+};
+
+/*
+ * BIOS data structure
+ */
+struct eeprom_bios_cfg {
+       u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
+
+       u8 bootID0:7, boodID0Valid:1;
+       u8 bootLun0[8];
+
+       u8 bootID1:7, boodID1Valid:1;
+       u8 bootLun1[8];
+
+       u16 MaxLunsTrgt;
+       u8 reserved[10];
+};
+
+/*
+ *  Function Specific Data structure
+ */
+struct eeprom_function_cfg {
+       u8 reserved[30];
+       u8 macAddress[6];
+       u8 macAddressSecondary[6];
+
+       u16 subsysVendorId;
+       u16 subsysDeviceId;
+};
+
+/*
+ *  EEPROM format
+ */
+struct eeprom_data {
+       u8 asicId[4];
+       u8 version;
+       u8 numPorts;
+       u16 boardId;
+
+#define EEPROM_BOARDID_STR_SIZE   16
+#define EEPROM_SERIAL_NUM_SIZE    16
+
+       u8 boardIdStr[16];
+       u8 serialNumber[16];
+       u16 extHwConfig;
+       struct eeprom_port_cfg macCfg_port0;
+       struct eeprom_port_cfg macCfg_port1;
+       u16 bufletSize;
+       u16 bufletCount;
+       u16 tcpWindowThreshold50;
+       u16 tcpWindowThreshold25;
+       u16 tcpWindowThreshold0;
+       u16 ipHashTableBaseHi;
+       u16 ipHashTableBaseLo;
+       u16 ipHashTableSize;
+       u16 tcpHashTableBaseHi;
+       u16 tcpHashTableBaseLo;
+       u16 tcpHashTableSize;
+       u16 ncbTableBaseHi;
+       u16 ncbTableBaseLo;
+       u16 ncbTableSize;
+       u16 drbTableBaseHi;
+       u16 drbTableBaseLo;
+       u16 drbTableSize;
+       u16 reserved_142[4];
+       u16 ipReassemblyTimeout;
+       u16 tcpMaxWindowSize;
+       u16 ipSecurity;
+#define IPSEC_CONFIG_PRESENT 0x0001
+       u8 reserved_156[294];
+       u16 qDebug[8];
+       struct eeprom_function_cfg funcCfg_fn0;
+       u16 reserved_510;
+       u8 oemSpace[432];
+       struct eeprom_bios_cfg biosCfg_fn1;
+       struct eeprom_function_cfg funcCfg_fn1;
+       u16 reserved_1022;
+       u8 reserved_1024[464];
+       struct eeprom_function_cfg funcCfg_fn2;
+       u16 reserved_1534;
+       u8 reserved_1536[432];
+       struct eeprom_bios_cfg biosCfg_fn3;
+       struct eeprom_function_cfg funcCfg_fn3;
+       u16 checksum;
+};
+
+/*
+ * General definitions...
+ */
+
+/*
+ * Below are a number compiler switches for controlling driver behavior.
+ * Some are not supported under certain conditions and are notated as such.
+ */
+
+#define QL3XXX_VENDOR_ID    0x1077
+#define QL3022_DEVICE_ID    0x3022
+
+/* MTU & Frame Size stuff */
+#define NORMAL_MTU_SIZE                ETH_DATA_LEN
+#define JUMBO_MTU_SIZE                         9000
+#define VLAN_ID_LEN                        2
+
+/* Request Queue Related Definitions */
+#define NUM_REQ_Q_ENTRIES   256        /* so that 64 * 64  = 4096 (1 page) */
+
+/* Response Queue Related Definitions */
+#define NUM_RSP_Q_ENTRIES   256        /* so that 256 * 16  = 4096 (1 page) */
+
+/* Transmit and Receive Buffers */
+#define NUM_LBUFQ_ENTRIES      128
+#define NUM_SBUFQ_ENTRIES      64
+#define QL_SMALL_BUFFER_SIZE    32
+#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
+(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
+    /* Each send has at least control block.  This is how many we keep. */
+#define NUM_SMALL_BUFFERS      NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+#define NUM_LARGE_BUFFERS      NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+#define QL_HEADER_SPACE 32     /* make header space at top of skb. */
+/*
+ * Large & Small Buffers for Receives
+ */
+struct lrg_buf_q_entry {
+
+       u32 addr0_lower;
+#define IAL_LAST_ENTRY 0x00000001
+#define IAL_CONT_ENTRY 0x00000002
+#define IAL_FLAG_MASK  0x00000003
+       u32 addr0_upper;
+       u32 addr1_lower;
+       u32 addr1_upper;
+       u32 addr2_lower;
+       u32 addr2_upper;
+       u32 addr3_lower;
+       u32 addr3_upper;
+       u32 addr4_lower;
+       u32 addr4_upper;
+       u32 addr5_lower;
+       u32 addr5_upper;
+       u32 addr6_lower;
+       u32 addr6_upper;
+       u32 addr7_lower;
+       u32 addr7_upper;
+
+};
+
+struct bufq_addr_element {
+       u32 addr_low;
+       u32 addr_high;
+};
+
+#define QL_NO_RESET                    0
+#define QL_DO_RESET                    1
+
+enum link_state_t {
+       LS_UNKNOWN = 0,
+       LS_DOWN,
+       LS_DEGRADE,
+       LS_RECOVER,
+       LS_UP,
+};
+
+struct ql_rcv_buf_cb {
+       struct ql_rcv_buf_cb *next;
+       struct sk_buff *skb;
+        DECLARE_PCI_UNMAP_ADDR(mapaddr);
+        DECLARE_PCI_UNMAP_LEN(maplen);
+       __le32 buf_phy_addr_low;
+       __le32 buf_phy_addr_high;
+       int index;
+};
+
+struct ql_tx_buf_cb {
+       struct sk_buff *skb;
+       struct ob_mac_iocb_req *queue_entry ;
+        DECLARE_PCI_UNMAP_ADDR(mapaddr);
+        DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+/* definitions for type field */
+#define QL_BUF_TYPE_MACIOCB 0x01
+#define QL_BUF_TYPE_IPIOCB  0x02
+#define QL_BUF_TYPE_TCPIOCB 0x03
+
+/* qdev->flags definitions. */
+enum { QL_RESET_DONE = 1,      /* Reset finished. */
+       QL_RESET_ACTIVE = 2,    /* Waiting for reset to finish. */
+       QL_RESET_START = 3,     /* Please reset the chip. */
+       QL_RESET_PER_SCSI = 4,  /* SCSI driver requests reset. */
+       QL_TX_TIMEOUT = 5,      /* Timeout in progress. */
+       QL_LINK_MASTER = 6,     /* This driver controls the link. */
+       QL_ADAPTER_UP = 7,      /* Adapter has been brought up. */
+       QL_THREAD_UP = 8,       /* This flag is available. */
+       QL_LINK_UP = 9, /* Link Status. */
+       QL_ALLOC_REQ_RSP_Q_DONE = 10,
+       QL_ALLOC_BUFQS_DONE = 11,
+       QL_ALLOC_SMALL_BUF_DONE = 12,
+       QL_LINK_OPTICAL = 13,
+       QL_MSI_ENABLED = 14,
+};
+
+/*
+ * ql3_adapter - The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+
+struct ql3_adapter {
+       u32 reserved_00;
+       unsigned long flags;
+
+       /* PCI Configuration information for this device */
+       struct pci_dev *pdev;
+       struct net_device *ndev;        /* Parent NET device */
+
+       /* Hardware information */
+       u8 chip_rev_id;
+       u8 pci_slot;
+       u8 pci_width;
+       u8 pci_x;
+       u32 msi;
+       int index;
+       struct timer_list adapter_timer;        /* timer used for various functions */
+
+       spinlock_t adapter_lock;
+       spinlock_t hw_lock;
+
+       /* PCI Bus Relative Register Addresses */
+       u8 *mmap_virt_base;     /* stores return value from ioremap() */
+       struct ql3xxx_port_registers __iomem *mem_map_registers;
+       u32 current_page;       /* tracks current register page */
+
+       u32 msg_enable;
+       u8 reserved_01[2];
+       u8 reserved_02[2];
+
+       /* Page for Shadow Registers */
+       void *shadow_reg_virt_addr;
+       dma_addr_t shadow_reg_phy_addr;
+
+       /* Net Request Queue */
+       u32 req_q_size;
+       u32 reserved_03;
+       struct ob_mac_iocb_req *req_q_virt_addr;
+       dma_addr_t req_q_phy_addr;
+       u16 req_producer_index;
+       u16 reserved_04;
+       u16 *preq_consumer_index;
+       u32 req_consumer_index_phy_addr_high;
+       u32 req_consumer_index_phy_addr_low;
+       atomic_t tx_count;
+       struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
+
+       /* Net Response Queue */
+       u32 rsp_q_size;
+       u32 eeprom_cmd_data;
+       struct net_rsp_iocb *rsp_q_virt_addr;
+       dma_addr_t rsp_q_phy_addr;
+       struct net_rsp_iocb *rsp_current;
+       u16 rsp_consumer_index;
+       u16 reserved_06;
+       u32 *prsp_producer_index;
+       u32 rsp_producer_index_phy_addr_high;
+       u32 rsp_producer_index_phy_addr_low;
+
+       /* Large Buffer Queue */
+       u32 lrg_buf_q_alloc_size;
+       u32 lrg_buf_q_size;
+       void *lrg_buf_q_alloc_virt_addr;
+       void *lrg_buf_q_virt_addr;
+       dma_addr_t lrg_buf_q_alloc_phy_addr;
+       dma_addr_t lrg_buf_q_phy_addr;
+       u32 lrg_buf_q_producer_index;
+       u32 lrg_buf_release_cnt;
+       struct bufq_addr_element *lrg_buf_next_free;
+
+       /* Large (Receive) Buffers */
+       struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS];
+       struct ql_rcv_buf_cb *lrg_buf_free_head;
+       struct ql_rcv_buf_cb *lrg_buf_free_tail;
+       u32 lrg_buf_free_count;
+       u32 lrg_buffer_len;
+       u32 lrg_buf_index;
+       u32 lrg_buf_skb_check;
+
+       /* Small Buffer Queue */
+       u32 small_buf_q_alloc_size;
+       u32 small_buf_q_size;
+       u32 small_buf_q_producer_index;
+       void *small_buf_q_alloc_virt_addr;
+       void *small_buf_q_virt_addr;
+       dma_addr_t small_buf_q_alloc_phy_addr;
+       dma_addr_t small_buf_q_phy_addr;
+       u32 small_buf_index;
+
+       /* Small (Receive) Buffers */
+       void *small_buf_virt_addr;
+       dma_addr_t small_buf_phy_addr;
+       u32 small_buf_phy_addr_low;
+       u32 small_buf_phy_addr_high;
+       u32 small_buf_release_cnt;
+       u32 small_buf_total_size;
+
+       /* ISR related, saves status for DPC. */
+       u32 control_status;
+
+       struct eeprom_data nvram_data;
+       struct timer_list ioctl_timer;
+       u32 port_link_state;
+       u32 last_rsp_offset;
+
+       /* 4022 specific */
+       u32 mac_index;          /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
+       u32 PHYAddr;            /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
+       u32 mac_ob_opcode;      /* Opcode to use on mac transmission */
+       u32 tcp_ob_opcode;      /* Opcode to use on tcp transmission */
+       u32 update_ob_opcode;   /* Opcode to use for updating NCB */
+       u32 mb_bit_mask;        /* MA Bits mask to use on transmission */
+       u32 numPorts;
+       struct net_device_stats stats;
+       struct workqueue_struct *workqueue;
+       struct work_struct reset_work;
+       struct work_struct tx_timeout_work;
+       u32 max_frame_size;
+};
+
+#endif                         /* _QLA3XXX_H_ */
index 4c2f575..5722a56 100644 (file)
@@ -2809,7 +2809,7 @@ static struct pci_driver rtl8169_pci_driver = {
 static int __init
 rtl8169_init_module(void)
 {
-       return pci_module_init(&rtl8169_pci_driver);
+       return pci_register_driver(&rtl8169_pci_driver);
 }
 
 static void __exit
index c3ed734..31bcdad 100644 (file)
@@ -1736,7 +1736,7 @@ static struct pci_driver rr_driver = {
 
 static int __init rr_init_module(void)
 {
-       return pci_module_init(&rr_driver);
+       return pci_register_driver(&rr_driver);
 }
 
 static void __exit rr_cleanup_module(void)
index e72e0e0..c16f915 100644 (file)
@@ -7233,7 +7233,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
 
 int __init s2io_starter(void)
 {
-       return pci_module_init(&s2io_driver);
+       return pci_register_driver(&s2io_driver);
 }
 
 /**
index b2acedb..c479b07 100644 (file)
@@ -1131,7 +1131,7 @@ static struct pci_driver saa9730_driver = {
 
 static int __init saa9730_init(void)
 {
-       return pci_module_init(&saa9730_driver);
+       return pci_register_driver(&saa9730_driver);
 }
 
 static void __exit saa9730_cleanup(void)
index 9ab1618..e4c8896 100644 (file)
@@ -2708,7 +2708,6 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
 static void sbmac_set_rx_mode(struct net_device *dev)
 {
        unsigned long flags;
-       int msg_flag = 0;
        struct sbmac_softc *sc = netdev_priv(dev);
 
        spin_lock_irqsave(&sc->sbm_lock, flags);
@@ -2718,22 +2717,14 @@ static void sbmac_set_rx_mode(struct net_device *dev)
                 */
 
                if (dev->flags & IFF_PROMISC) {
-                       /* Unconditionally log net taps. */
-                       msg_flag = 1;
                        sbmac_promiscuous_mode(sc,1);
                }
                else {
-                       msg_flag = 2;
                        sbmac_promiscuous_mode(sc,0);
                }
        }
        spin_unlock_irqrestore(&sc->sbm_lock, flags);
 
-       if (msg_flag) {
-               printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
-                      dev->name,(msg_flag==1)?"en":"dis");
-       }
-
        /*
         * Program the multicasts.  Do this every time.
         */
index df0cbeb..7c1982a 100644 (file)
@@ -821,9 +821,6 @@ static void sis190_set_rx_mode(struct net_device *dev)
        u16 rx_mode;
 
        if (dev->flags & IFF_PROMISC) {
-               /* Unconditionally log net taps. */
-               net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-                       dev->name);
                rx_mode =
                        AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
                        AcceptAllPhys;
@@ -1871,7 +1868,7 @@ static struct pci_driver sis190_pci_driver = {
 
 static int __init sis190_init_module(void)
 {
-       return pci_module_init(&sis190_pci_driver);
+       return pci_register_driver(&sis190_pci_driver);
 }
 
 static void __exit sis190_cleanup_module(void)
index 29ee7ff..6af5028 100644 (file)
@@ -134,6 +134,7 @@ static const struct mii_chip_info {
        { "AMD 79C901 10BASE-T PHY",            0x0000, 0x6B70, LAN },
        { "AMD 79C901 HomePNA PHY",             0x0000, 0x6B90, HOME},
        { "ICS LAN PHY",                        0x0015, 0xF440, LAN },
+       { "ICS LAN PHY",                        0x0143, 0xBC70, LAN },
        { "NS 83851 PHY",                       0x2000, 0x5C20, MIX },
        { "NS 83847 PHY",                       0x2000, 0x5C30, MIX },
        { "Realtek RTL8201 PHY",                0x0000, 0x8200, LAN },
@@ -2495,7 +2496,7 @@ static int __init sis900_init_module(void)
        printk(version);
 #endif
 
-       return pci_module_init(&sis900_pci_driver);
+       return pci_register_driver(&sis900_pci_driver);
 }
 
 static void __exit sis900_cleanup_module(void)
index ee62845..49e76c7 100644 (file)
@@ -5133,7 +5133,7 @@ static struct pci_driver skge_driver = {
 
 static int __init skge_init(void)
 {
-       return pci_module_init(&skge_driver);
+       return pci_register_driver(&skge_driver);
 }
 
 static void __exit skge_exit(void)
index b5714a6..8e4d184 100644 (file)
@@ -2280,7 +2280,7 @@ static struct pci_driver skfddi_pci_driver = {
 
 static int __init skfd_init(void)
 {
-       return pci_module_init(&skfddi_pci_driver);
+       return pci_register_driver(&skfddi_pci_driver);
 }
 
 static void __exit skfd_exit(void)
index ad878df..fba8b74 100644 (file)
@@ -43,7 +43,7 @@
 #include "skge.h"
 
 #define DRV_NAME               "skge"
-#define DRV_VERSION            "1.6"
+#define DRV_VERSION            "1.8"
 #define PFX                    DRV_NAME " "
 
 #define DEFAULT_TX_RING_SIZE   128
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
 static int skge_up(struct net_device *dev);
 static int skge_down(struct net_device *dev);
 static void skge_phy_reset(struct skge_port *skge);
-static void skge_tx_clean(struct skge_port *skge);
+static void skge_tx_clean(struct net_device *dev);
 static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
 static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
 static void genesis_get_stats(struct skge_port *skge, u64 *data);
@@ -105,6 +105,7 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
 static const int rxqaddr[] = { Q_R1, Q_R2 };
 static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
 static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
+static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
 
 static int skge_get_regs_len(struct net_device *dev)
 {
@@ -818,8 +819,9 @@ static void skge_rx_clean(struct skge_port *skge)
 /* Allocate buffers for receive ring
  * For receive:  to_clean is next received frame.
  */
-static int skge_rx_fill(struct skge_port *skge)
+static int skge_rx_fill(struct net_device *dev)
 {
+       struct skge_port *skge = netdev_priv(dev);
        struct skge_ring *ring = &skge->rx_ring;
        struct skge_element *e;
 
@@ -827,7 +829,8 @@ static int skge_rx_fill(struct skge_port *skge)
        do {
                struct sk_buff *skb;
 
-               skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
+               skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
+                                        GFP_KERNEL);
                if (!skb)
                        return -ENOMEM;
 
@@ -2178,7 +2181,7 @@ static int skge_up(struct net_device *dev)
        if (err)
                goto free_pci_mem;
 
-       err = skge_rx_fill(skge);
+       err = skge_rx_fill(dev);
        if (err)
                goto free_rx_ring;
 
@@ -2281,7 +2284,7 @@ static int skge_down(struct net_device *dev)
        skge_led(skge, LED_MODE_OFF);
 
        netif_poll_disable(dev);
-       skge_tx_clean(skge);
+       skge_tx_clean(dev);
        skge_rx_clean(skge);
 
        kfree(skge->rx_ring.start);
@@ -2306,25 +2309,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        int i;
        u32 control, len;
        u64 map;
-       unsigned long flags;
 
        if (skb_padto(skb, ETH_ZLEN))
                return NETDEV_TX_OK;
 
-       if (!spin_trylock_irqsave(&skge->tx_lock, flags))
-               /* Collision - tell upper layer to requeue */
-               return NETDEV_TX_LOCKED;
-
-       if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
-
-                       printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
-                              dev->name);
-               }
-               spin_unlock_irqrestore(&skge->tx_lock, flags);
+       if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
                return NETDEV_TX_BUSY;
-       }
 
        e = skge->tx_ring.to_use;
        td = e->desc;
@@ -2399,8 +2389,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
        }
 
-       spin_unlock_irqrestore(&skge->tx_lock, flags);
-
        dev->trans_start = jiffies;
 
        return NETDEV_TX_OK;
@@ -2430,18 +2418,18 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
                        printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
                               skge->netdev->name, e - skge->tx_ring.start);
 
-               dev_kfree_skb_any(e->skb);
+               dev_kfree_skb(e->skb);
        }
        e->skb = NULL;
 }
 
 /* Free all buffers in transmit ring */
-static void skge_tx_clean(struct skge_port *skge)
+static void skge_tx_clean(struct net_device *dev)
 {
+       struct skge_port *skge = netdev_priv(dev);
        struct skge_element *e;
-       unsigned long flags;
 
-       spin_lock_irqsave(&skge->tx_lock, flags);
+       netif_tx_lock_bh(dev);
        for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
                struct skge_tx_desc *td = e->desc;
                skge_tx_free(skge, e, td->control);
@@ -2449,8 +2437,8 @@ static void skge_tx_clean(struct skge_port *skge)
        }
 
        skge->tx_ring.to_clean = e;
-       netif_wake_queue(skge->netdev);
-       spin_unlock_irqrestore(&skge->tx_lock, flags);
+       netif_wake_queue(dev);
+       netif_tx_unlock_bh(dev);
 }
 
 static void skge_tx_timeout(struct net_device *dev)
@@ -2461,7 +2449,7 @@ static void skge_tx_timeout(struct net_device *dev)
                printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
 
        skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
-       skge_tx_clean(skge);
+       skge_tx_clean(dev);
 }
 
 static int skge_change_mtu(struct net_device *dev, int new_mtu)
@@ -2584,16 +2572,17 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
 /* Get receive buffer from descriptor.
  * Handles copy of small buffers and reallocation failures
  */
-static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
-                                         struct skge_element *e,
-                                         u32 control, u32 status, u16 csum)
+static struct sk_buff *skge_rx_get(struct net_device *dev,
+                                  struct skge_element *e,
+                                  u32 control, u32 status, u16 csum)
 {
+       struct skge_port *skge = netdev_priv(dev);
        struct sk_buff *skb;
        u16 len = control & BMU_BBC;
 
        if (unlikely(netif_msg_rx_status(skge)))
                printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
-                      skge->netdev->name, e - skge->rx_ring.start,
+                      dev->name, e - skge->rx_ring.start,
                       status, len);
 
        if (len > skge->rx_buf_size)
@@ -2609,7 +2598,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
                goto error;
 
        if (len < RX_COPY_THRESHOLD) {
-               skb = alloc_skb(len + 2, GFP_ATOMIC);
+               skb = netdev_alloc_skb(dev, len + 2);
                if (!skb)
                        goto resubmit;
 
@@ -2624,7 +2613,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
                skge_rx_reuse(e, skge->rx_buf_size);
        } else {
                struct sk_buff *nskb;
-               nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
+               nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
                if (!nskb)
                        goto resubmit;
 
@@ -2639,20 +2628,19 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
        }
 
        skb_put(skb, len);
-       skb->dev = skge->netdev;
        if (skge->rx_csum) {
                skb->csum = csum;
                skb->ip_summed = CHECKSUM_HW;
        }
 
-       skb->protocol = eth_type_trans(skb, skge->netdev);
+       skb->protocol = eth_type_trans(skb, dev);
 
        return skb;
 error:
 
        if (netif_msg_rx_err(skge))
                printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
-                      skge->netdev->name, e - skge->rx_ring.start,
+                      dev->name, e - skge->rx_ring.start,
                       control, status);
 
        if (skge->hw->chip_id == CHIP_ID_GENESIS) {
@@ -2677,15 +2665,15 @@ resubmit:
 }
 
 /* Free all buffers in Tx ring which are no longer owned by device */
-static void skge_txirq(struct net_device *dev)
+static void skge_tx_done(struct net_device *dev)
 {
        struct skge_port *skge = netdev_priv(dev);
        struct skge_ring *ring = &skge->tx_ring;
        struct skge_element *e;
 
-       rmb();
+       skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
 
-       spin_lock(&skge->tx_lock);
+       netif_tx_lock(dev);
        for (e = ring->to_clean; e != ring->to_use; e = e->next) {
                struct skge_tx_desc *td = e->desc;
 
@@ -2696,11 +2684,10 @@ static void skge_txirq(struct net_device *dev)
        }
        skge->tx_ring.to_clean = e;
 
-       if (netif_queue_stopped(skge->netdev)
-           && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
-               netif_wake_queue(skge->netdev);
+       if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
+               netif_wake_queue(dev);
 
-       spin_unlock(&skge->tx_lock);
+       netif_tx_unlock(dev);
 }
 
 static int skge_poll(struct net_device *dev, int *budget)
@@ -2712,6 +2699,10 @@ static int skge_poll(struct net_device *dev, int *budget)
        int to_do = min(dev->quota, *budget);
        int work_done = 0;
 
+       skge_tx_done(dev);
+
+       skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
        for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
                struct skge_rx_desc *rd = e->desc;
                struct sk_buff *skb;
@@ -2722,7 +2713,7 @@ static int skge_poll(struct net_device *dev, int *budget)
                if (control & BMU_OWN)
                        break;
 
-               skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
+               skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
                if (likely(skb)) {
                        dev->last_rx = jiffies;
                        netif_receive_skb(skb);
@@ -2742,12 +2733,11 @@ static int skge_poll(struct net_device *dev, int *budget)
        if (work_done >=  to_do)
                return 1; /* not done */
 
-       netif_rx_complete(dev);
-
        spin_lock_irq(&hw->hw_lock);
-       hw->intr_mask |= rxirqmask[skge->port];
+       __netif_rx_complete(dev);
+       hw->intr_mask |= irqmask[skge->port];
        skge_write32(hw, B0_IMSK, hw->intr_mask);
-       mmiowb();
+       skge_read32(hw, B0_IMSK);
        spin_unlock_irq(&hw->hw_lock);
 
        return 0;
@@ -2881,6 +2871,7 @@ static void skge_extirq(void *arg)
        spin_lock_irq(&hw->hw_lock);
        hw->intr_mask |= IS_EXT_REG;
        skge_write32(hw, B0_IMSK, hw->intr_mask);
+       skge_read32(hw, B0_IMSK);
        spin_unlock_irq(&hw->hw_lock);
 }
 
@@ -2888,27 +2879,23 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
 {
        struct skge_hw *hw = dev_id;
        u32 status;
+       int handled = 0;
 
+       spin_lock(&hw->hw_lock);
        /* Reading this register masks IRQ */
        status = skge_read32(hw, B0_SP_ISRC);
-       if (status == 0)
-               return IRQ_NONE;
+       if (status == 0 || status == ~0)
+               goto out;
 
-       spin_lock(&hw->hw_lock);
+       handled = 1;
        status &= hw->intr_mask;
        if (status & IS_EXT_REG) {
                hw->intr_mask &= ~IS_EXT_REG;
                schedule_work(&hw->phy_work);
        }
 
-       if (status & IS_XA1_F) {
-               skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
-               skge_txirq(hw->dev[0]);
-       }
-
-       if (status & IS_R1_F) {
-               skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
-               hw->intr_mask &= ~IS_R1_F;
+       if (status & (IS_XA1_F|IS_R1_F)) {
+               hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
                netif_rx_schedule(hw->dev[0]);
        }
 
@@ -2927,14 +2914,8 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
                skge_mac_intr(hw, 0);
 
        if (hw->dev[1]) {
-               if (status & IS_XA2_F) {
-                       skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
-                       skge_txirq(hw->dev[1]);
-               }
-
-               if (status & IS_R2_F) {
-                       skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
-                       hw->intr_mask &= ~IS_R2_F;
+               if (status & (IS_XA2_F|IS_R2_F)) {
+                       hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
                        netif_rx_schedule(hw->dev[1]);
                }
 
@@ -2955,9 +2936,11 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
                skge_error_irq(hw);
 
        skge_write32(hw, B0_IMSK, hw->intr_mask);
+       skge_read32(hw, B0_IMSK);
+out:
        spin_unlock(&hw->hw_lock);
 
-       return IRQ_HANDLED;
+       return IRQ_RETVAL(handled);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3106,7 +3089,6 @@ static int skge_reset(struct skge_hw *hw)
        else
                hw->ram_size = t8 * 4096;
 
-       spin_lock_init(&hw->hw_lock);
        hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
        if (hw->ports > 1)
                hw->intr_mask |= IS_PORT_2;
@@ -3222,7 +3204,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
        dev->poll_controller = skge_netpoll;
 #endif
        dev->irq = hw->pdev->irq;
-       dev->features = NETIF_F_LLTX;
+
        if (highmem)
                dev->features |= NETIF_F_HIGHDMA;
 
@@ -3244,8 +3226,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
 
        skge->port = port;
 
-       spin_lock_init(&skge->tx_lock);
-
        if (hw->chip_id != CHIP_ID_GENESIS) {
                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
                skge->rx_csum = 1;
@@ -3332,6 +3312,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
        hw->pdev = pdev;
        mutex_init(&hw->phy_mutex);
        INIT_WORK(&hw->phy_work, skge_extirq, hw);
+       spin_lock_init(&hw->hw_lock);
 
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
        if (!hw->regs) {
@@ -3340,23 +3321,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
                goto err_out_free_hw;
        }
 
-       err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw);
-       if (err) {
-               printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
-                      pci_name(pdev), pdev->irq);
-               goto err_out_iounmap;
-       }
-       pci_set_drvdata(pdev, hw);
-
        err = skge_reset(hw);
        if (err)
-               goto err_out_free_irq;
+               goto err_out_iounmap;
 
        printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
               (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
               skge_board_name(hw), hw->chip_rev);
 
-       if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
+       dev = skge_devinit(hw, 0, using_dac);
+       if (!dev)
                goto err_out_led_off;
 
        if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -3366,7 +3340,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
                goto err_out_free_netdev;
        }
 
-
        err = register_netdev(dev);
        if (err) {
                printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3374,6 +3347,12 @@ static int __devinit skge_probe(struct pci_dev *pdev,
                goto err_out_free_netdev;
        }
 
+       err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
+       if (err) {
+               printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
+                      dev->name, pdev->irq);
+               goto err_out_unregister;
+       }
        skge_show_addr(dev);
 
        if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
@@ -3386,15 +3365,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
                        free_netdev(dev1);
                }
        }
+       pci_set_drvdata(pdev, hw);
 
        return 0;
 
+err_out_unregister:
+       unregister_netdev(dev);
 err_out_free_netdev:
        free_netdev(dev);
 err_out_led_off:
        skge_write16(hw, B0_LED, LED_STAT_OFF);
-err_out_free_irq:
-       free_irq(pdev->irq, hw);
 err_out_iounmap:
        iounmap(hw->regs);
 err_out_free_hw:
@@ -3424,6 +3404,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
        spin_lock_irq(&hw->hw_lock);
        hw->intr_mask = 0;
        skge_write32(hw, B0_IMSK, 0);
+       skge_read32(hw, B0_IMSK);
        spin_unlock_irq(&hw->hw_lock);
 
        skge_write16(hw, B0_LED, LED_STAT_OFF);
@@ -3449,26 +3430,25 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
        struct skge_hw *hw  = pci_get_drvdata(pdev);
        int i, wol = 0;
 
-       for (i = 0; i < 2; i++) {
+       pci_save_state(pdev);
+       for (i = 0; i < hw->ports; i++) {
                struct net_device *dev = hw->dev[i];
 
-               if (dev) {
+               if (netif_running(dev)) {
                        struct skge_port *skge = netdev_priv(dev);
-                       if (netif_running(dev)) {
-                               netif_carrier_off(dev);
-                               if (skge->wol)
-                                       netif_stop_queue(dev);
-                               else
-                                       skge_down(dev);
-                       }
-                       netif_device_detach(dev);
+
+                       netif_carrier_off(dev);
+                       if (skge->wol)
+                               netif_stop_queue(dev);
+                       else
+                               skge_down(dev);
                        wol |= skge->wol;
                }
+               netif_device_detach(dev);
        }
 
-       pci_save_state(pdev);
+       skge_write32(hw, B0_IMSK, 0);
        pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
-       pci_disable_device(pdev);
        pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
        return 0;
@@ -3477,23 +3457,33 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
 static int skge_resume(struct pci_dev *pdev)
 {
        struct skge_hw *hw  = pci_get_drvdata(pdev);
-       int i;
+       int i, err;
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        pci_enable_wake(pdev, PCI_D0, 0);
 
-       skge_reset(hw);
+       err = skge_reset(hw);
+       if (err)
+               goto out;
 
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < hw->ports; i++) {
                struct net_device *dev = hw->dev[i];
-               if (dev) {
-                       netif_device_attach(dev);
-                       if (netif_running(dev) && skge_up(dev))
+
+               netif_device_attach(dev);
+               if (netif_running(dev)) {
+                       err = skge_up(dev);
+
+                       if (err) {
+                               printk(KERN_ERR PFX "%s: could not up: %d\n",
+                                      dev->name, err);
                                dev_close(dev);
+                               goto out;
+                       }
                }
        }
-       return 0;
+out:
+       return err;
 }
 #endif
 
@@ -3510,7 +3500,7 @@ static struct pci_driver skge_driver = {
 
 static int __init skge_init_module(void)
 {
-       return pci_module_init(&skge_driver);
+       return pci_register_driver(&skge_driver);
 }
 
 static void __exit skge_cleanup_module(void)
index 593387b..79e0927 100644 (file)
@@ -2417,7 +2417,6 @@ struct skge_port {
        struct net_device    *netdev;
        int                  port;
 
-       spinlock_t           tx_lock;
        struct skge_ring     tx_ring;
        struct skge_ring     rx_ring;
 
index 933e87f..7ce0663 100644 (file)
@@ -50,7 +50,7 @@
 #include "sky2.h"
 
 #define DRV_NAME               "sky2"
-#define DRV_VERSION            "1.5"
+#define DRV_VERSION            "1.7"
 #define PFX                    DRV_NAME " "
 
 /*
@@ -106,6 +106,7 @@ static const struct pci_device_id sky2_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },    /* DGE-560T */
+       { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) },    /* DGE-550SX */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -117,10 +118,17 @@ static const struct pci_device_id sky2_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) },
        { 0 }
 };
 
@@ -190,7 +198,6 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
 {
        u16 power_control;
-       u32 reg1;
        int vaux;
 
        pr_debug("sky2_set_power_state %d\n", state);
@@ -223,20 +230,9 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
                else
                        sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 
-               /* Turn off phy power saving */
-               reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
-               reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
-
-               /* looks like this XL is back asswards .. */
-               if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
-                       reg1 |= PCI_Y2_PHY1_COMA;
-                       if (hw->ports > 1)
-                               reg1 |= PCI_Y2_PHY2_COMA;
-               }
-               sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
-               udelay(100);
-
                if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+                       u32 reg1;
+
                        sky2_pci_write32(hw, PCI_DEV_REG3, 0);
                        reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
                        reg1 &= P_ASPM_CONTROL_MSK;
@@ -248,15 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
 
        case PCI_D3hot:
        case PCI_D3cold:
-               /* Turn on phy power saving */
-               reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
-               if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
-                       reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
-               else
-                       reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
-               sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
-               udelay(100);
-
                if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
                        sky2_write8(hw, B2_Y2_CLK_GATE, 0);
                else
@@ -280,7 +267,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 }
 
-static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
+static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
 {
        u16 reg;
 
@@ -528,6 +515,29 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
                gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
 }
 
+static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
+{
+       u32 reg1;
+       static const u32 phy_power[]
+               = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
+
+       /* looks like this XL is back asswards .. */
+       if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+               onoff = !onoff;
+
+       reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+
+       if (onoff)
+               /* Turn off phy power saving */
+               reg1 &= ~phy_power[port];
+       else
+               reg1 |= phy_power[port];
+
+       sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+       sky2_pci_read32(hw, PCI_DEV_REG1);
+       udelay(100);
+}
+
 /* Force a renegotiation */
 static void sky2_phy_reinit(struct sky2_port *sky2)
 {
@@ -760,9 +770,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
 /* Update chip's next pointer */
 static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
 {
+       q = Y2_QADDR(q, PREF_UNIT_PUT_IDX);
        wmb();
-       sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
-       mmiowb();
+       sky2_write16(hw, q, idx);
+       sky2_read16(hw, q);
 }
 
 
@@ -949,14 +960,16 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 /*
  * It appears the hardware has a bug in the FIFO logic that
  * cause it to hang if the FIFO gets overrun and the receive buffer
- * is not aligned. ALso alloc_skb() won't align properly if slab
- * debugging is enabled.
+ * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is
+ * aligned except if slab debugging is enabled.
  */
-static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
+static inline struct sk_buff *sky2_alloc_skb(struct net_device *dev,
+                                            unsigned int length,
+                                            gfp_t gfp_mask)
 {
        struct sk_buff *skb;
 
-       skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
+       skb = __netdev_alloc_skb(dev, length + RX_SKB_ALIGN, gfp_mask);
        if (likely(skb)) {
                unsigned long p = (unsigned long) skb->data;
                skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
@@ -992,7 +1005,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
        for (i = 0; i < sky2->rx_pending; i++) {
                struct ring_info *re = sky2->rx_ring + i;
 
-               re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
+               re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize,
+                                        GFP_KERNEL);
                if (!re->skb)
                        goto nomem;
 
@@ -1080,6 +1094,8 @@ static int sky2_up(struct net_device *dev)
        if (!sky2->rx_ring)
                goto err_out;
 
+       sky2_phy_power(hw, port, 1);
+
        sky2_mac_init(hw, port);
 
        /* Determine available ram buffer space (in 4K blocks).
@@ -1184,7 +1200,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        struct sky2_tx_le *le = NULL;
        struct tx_ring_info *re;
        unsigned i, len;
-       int avail;
        dma_addr_t mapping;
        u32 addr64;
        u16 mss;
@@ -1234,25 +1249,18 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        /* Check for TCP Segmentation Offload */
        mss = skb_shinfo(skb)->gso_size;
        if (mss != 0) {
-               /* just drop the packet if non-linear expansion fails */
-               if (skb_header_cloned(skb) &&
-                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
-                       dev_kfree_skb(skb);
-                       goto out_unlock;
-               }
-
                mss += ((skb->h.th->doff - 5) * 4);     /* TCP options */
                mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
                mss += ETH_HLEN;
-       }
 
-       if (mss != sky2->tx_last_mss) {
-               le = get_tx_le(sky2);
-               le->tx.tso.size = cpu_to_le16(mss);
-               le->tx.tso.rsvd = 0;
-               le->opcode = OP_LRGLEN | HW_OWNER;
-               le->ctrl = 0;
-               sky2->tx_last_mss = mss;
+               if (mss != sky2->tx_last_mss) {
+                       le = get_tx_le(sky2);
+                       le->tx.tso.size = cpu_to_le16(mss);
+                       le->tx.tso.rsvd = 0;
+                       le->opcode = OP_LRGLEN | HW_OWNER;
+                       le->ctrl = 0;
+                       sky2->tx_last_mss = mss;
+               }
        }
 
        ctrl = 0;
@@ -1280,12 +1288,17 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                if (skb->nh.iph->protocol == IPPROTO_UDP)
                        ctrl |= UDPTCP;
 
-               le = get_tx_le(sky2);
-               le->tx.csum.start = cpu_to_le16(hdr);
-               le->tx.csum.offset = cpu_to_le16(offset);
-               le->length = 0; /* initial checksum value */
-               le->ctrl = 1;   /* one packet */
-               le->opcode = OP_TCPLISW | HW_OWNER;
+               if (hdr != sky2->tx_csum_start || offset != sky2->tx_csum_offset) {
+                       sky2->tx_csum_start = hdr;
+                       sky2->tx_csum_offset = offset;
+
+                       le = get_tx_le(sky2);
+                       le->tx.csum.start = cpu_to_le16(hdr);
+                       le->tx.csum.offset = cpu_to_le16(offset);
+                       le->length = 0; /* initial checksum value */
+                       le->ctrl = 1;   /* one packet */
+                       le->opcode = OP_TCPLISW | HW_OWNER;
+               }
        }
 
        le = get_tx_le(sky2);
@@ -1320,23 +1333,18 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                le->opcode = OP_BUFFER | HW_OWNER;
 
                fre = sky2->tx_ring
-                   + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
+                       + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
                pci_unmap_addr_set(fre, mapaddr, mapping);
        }
 
        re->idx = sky2->tx_prod;
        le->ctrl |= EOP;
 
-       avail = tx_avail(sky2);
-       if (mss != 0 || avail < TX_MIN_PENDING) {
-               le->ctrl |= FRC_STAT;
-               if (avail <= MAX_SKB_TX_LE)
-                       netif_stop_queue(dev);
-       }
+       if (tx_avail(sky2) <= MAX_SKB_TX_LE)
+               netif_stop_queue(dev);
 
        sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
 
-out_unlock:
        spin_unlock(&sky2->tx_lock);
 
        dev->trans_start = jiffies;
@@ -1421,7 +1429,7 @@ static int sky2_down(struct net_device *dev)
        /* Stop more packets from being queued */
        netif_stop_queue(dev);
 
-       sky2_phy_reset(hw, port);
+       sky2_gmac_reset(hw, port);
 
        /* Stop transmitter */
        sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
@@ -1469,6 +1477,8 @@ static int sky2_down(struct net_device *dev)
        imask &= ~portirq_msk[port];
        sky2_write32(hw, B0_IMSK, imask);
 
+       sky2_phy_power(hw, port, 0);
+
        /* turn off LED's */
        sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
 
@@ -1832,15 +1842,16 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
  * For small packets or errors, just reuse existing skb.
  * For larger packets, get new buffer.
  */
-static struct sk_buff *sky2_receive(struct sky2_port *sky2,
+static struct sk_buff *sky2_receive(struct net_device *dev,
                                    u16 length, u32 status)
 {
+       struct sky2_port *sky2 = netdev_priv(dev);
        struct ring_info *re = sky2->rx_ring + sky2->rx_next;
        struct sk_buff *skb = NULL;
 
        if (unlikely(netif_msg_rx_status(sky2)))
                printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
-                      sky2->netdev->name, sky2->rx_next, status, length);
+                      dev->name, sky2->rx_next, status, length);
 
        sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
        prefetch(sky2->rx_ring + sky2->rx_next);
@@ -1851,11 +1862,11 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
        if (!(status & GMR_FS_RX_OK))
                goto resubmit;
 
-       if (length > sky2->netdev->mtu + ETH_HLEN)
+       if (length > dev->mtu + ETH_HLEN)
                goto oversize;
 
        if (length < copybreak) {
-               skb = alloc_skb(length + 2, GFP_ATOMIC);
+               skb = netdev_alloc_skb(dev, length + 2);
                if (!skb)
                        goto resubmit;
 
@@ -1870,7 +1881,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
        } else {
                struct sk_buff *nskb;
 
-               nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
+               nskb = sky2_alloc_skb(dev, sky2->rx_bufsize, GFP_ATOMIC);
                if (!nskb)
                        goto resubmit;
 
@@ -1900,7 +1911,7 @@ error:
 
        if (netif_msg_rx_err(sky2) && net_ratelimit())
                printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
-                      sky2->netdev->name, status, length);
+                      dev->name, status, length);
 
        if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
                sky2->net_stats.rx_length_errors++;
@@ -1926,12 +1937,6 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
        }
 }
 
-/* Is status ring empty or is there more to do? */
-static inline int sky2_more_work(const struct sky2_hw *hw)
-{
-       return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
-}
-
 /* Process status response ring */
 static int sky2_status_intr(struct sky2_hw *hw, int to_do)
 {
@@ -1960,11 +1965,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
 
                switch (le->opcode & ~HW_OWNER) {
                case OP_RXSTAT:
-                       skb = sky2_receive(sky2, length, status);
+                       skb = sky2_receive(dev, length, status);
                        if (!skb)
                                break;
 
-                       skb->dev = dev;
                        skb->protocol = eth_type_trans(skb, dev);
                        dev->last_rx = jiffies;
 
@@ -2022,6 +2026,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
                }
        }
 
+       /* Fully processed status ring so clear irq */
+       sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
 exit_loop:
        if (buf_write[0]) {
                sky2 = netdev_priv(hw->dev[0]);
@@ -2231,19 +2238,16 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
 
        work_done = sky2_status_intr(hw, work_limit);
-       *budget -= work_done;
-       dev0->quota -= work_done;
+       if (work_done < work_limit) {
+               netif_rx_complete(dev0);
 
-       if (status & Y2_IS_STAT_BMU)
-               sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-
-       if (sky2_more_work(hw))
+               sky2_read32(hw, B0_Y2_SP_LISR);
+               return 0;
+       } else {
+               *budget -= work_done;
+               dev0->quota -= work_done;
                return 1;
-
-       netif_rx_complete(dev0);
-
-       sky2_read32(hw, B0_Y2_SP_LISR);
-       return 0;
+       }
 }
 
 static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2409,7 +2413,7 @@ static int sky2_reset(struct sky2_hw *hw)
        sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
 
        for (i = 0; i < hw->ports; i++)
-               sky2_phy_reset(hw, i);
+               sky2_gmac_reset(hw, i);
 
        memset(hw->st_le, 0, STATUS_LE_BYTES);
        hw->st_idx = 0;
@@ -3200,6 +3204,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
        struct pci_dev *pdev = hw->pdev;
        int err;
 
+       init_waitqueue_head (&hw->msi_wait);
+
        sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
 
        err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw);
@@ -3209,10 +3215,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
                return err;
        }
 
-       init_waitqueue_head (&hw->msi_wait);
-
        sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
-       wmb();
+       sky2_read8(hw, B0_CTST);
 
        wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
 
index 2db8d19..fa8af9f 100644 (file)
@@ -1748,7 +1748,6 @@ enum {
        INIT_SUM= 1<<3,
        LOCK_SUM= 1<<4,
        INS_VLAN= 1<<5,
-       FRC_STAT= 1<<6,
        EOP     = 1<<7,
 };
 
@@ -1844,6 +1843,8 @@ struct sky2_port {
        u32                  tx_addr64;
        u16                  tx_pending;
        u16                  tx_last_mss;
+       u16                  tx_csum_start;
+       u16                  tx_csum_offset;
 
        struct ring_info     *rx_ring ____cacheline_aligned_in_smp;
        struct sky2_rx_le    *rx_le;
index 3a1b713..9a540e2 100644 (file)
@@ -94,27 +94,23 @@ slhc_init(int rslots, int tslots)
        register struct cstate *ts;
        struct slcompress *comp;
 
-       comp = (struct slcompress *)kmalloc(sizeof(struct slcompress),
-                                           GFP_KERNEL);
+       comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
        if (! comp)
                goto out_fail;
-       memset(comp, 0, sizeof(struct slcompress));
 
        if ( rslots > 0  &&  rslots < 256 ) {
                size_t rsize = rslots * sizeof(struct cstate);
-               comp->rstate = (struct cstate *) kmalloc(rsize, GFP_KERNEL);
+               comp->rstate = kzalloc(rsize, GFP_KERNEL);
                if (! comp->rstate)
                        goto out_free;
-               memset(comp->rstate, 0, rsize);
                comp->rslot_limit = rslots - 1;
        }
 
        if ( tslots > 0  &&  tslots < 256 ) {
                size_t tsize = tslots * sizeof(struct cstate);
-               comp->tstate = (struct cstate *) kmalloc(tsize, GFP_KERNEL);
+               comp->tstate = kzalloc(tsize, GFP_KERNEL);
                if (! comp->tstate)
                        goto out_free2;
-               memset(comp->tstate, 0, tsize);
                comp->tslot_limit = tslots - 1;
        }
 
@@ -141,9 +137,9 @@ slhc_init(int rslots, int tslots)
        return comp;
 
 out_free2:
-       kfree((unsigned char *)comp->rstate);
+       kfree(comp->rstate);
 out_free:
-       kfree((unsigned char *)comp);
+       kfree(comp);
 out_fail:
        return NULL;
 }
@@ -700,20 +696,6 @@ EXPORT_SYMBOL(slhc_compress);
 EXPORT_SYMBOL(slhc_uncompress);
 EXPORT_SYMBOL(slhc_toss);
 
-#ifdef MODULE
-
-int init_module(void)
-{
-       printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California\n");
-       return 0;
-}
-
-void cleanup_module(void)
-{
-       return;
-}
-
-#endif /* MODULE */
 #else /* CONFIG_INET */
 
 
index 0b15290..4438fe8 100644 (file)
@@ -55,8 +55,6 @@ static const char version[] =
                         )
 #endif
 
-
-#include <linux/config.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
index c0a62b0..8e1f620 100644 (file)
@@ -2053,7 +2053,7 @@ static int __init starfire_init (void)
                return -ENODEV;
        }
 
-       return pci_module_init (&starfire_driver);
+       return pci_register_driver(&starfire_driver);
 }
 
 
index 2dcadb1..0d76e22 100644 (file)
@@ -914,7 +914,7 @@ static void set_multicast_list( struct net_device *dev )
 
        if (dev->flags & IFF_PROMISC) {
                /* Log any net taps. */
-               DPRINTK( 1, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+               DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name ));
                REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
        } else {
                short multicast_table[4];
index 698568e..a3a7a35 100644 (file)
        Support and updates available at
        http://www.scyld.com/network/sundance.html
        [link no longer provides useful info -jgarzik]
+       Archives of the mailing list are still available at
+       http://www.beowulf.org/pipermail/netdrivers/
 
 */
 
 #define DRV_NAME       "sundance"
-#define DRV_VERSION    "1.1"
-#define DRV_RELDATE    "27-Jun-2006"
+#define DRV_VERSION    "1.2"
+#define DRV_RELDATE    "11-Sep-2006"
 
 
 /* The user-configurable values.
@@ -646,7 +648,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
        /* Reset the chip to erase previous misconfiguration. */
        if (netif_msg_hw(np))
                printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
-       iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
+       sundance_reset(dev, 0x00ff << 16);
        if (netif_msg_hw(np))
                printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 
@@ -1075,13 +1077,8 @@ reset_tx (struct net_device *dev)
        
        /* Reset tx logic, TxListPtr will be cleaned */
        iowrite16 (TxDisable, ioaddr + MACCtrl1);
-       iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
-                       ioaddr + ASICCtrl + 2);
-       for (i=50; i > 0; i--) {
-               if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
-                       break;
-               mdelay(1);
-       }
+       sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
+
        /* free all tx skbuff */
        for (i = 0; i < TX_RING_SIZE; i++) {
                skb = np->tx_skbuff[i];
@@ -1467,8 +1464,6 @@ static void set_rx_mode(struct net_device *dev)
        int i;
 
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                memset(mc_filter, 0xff, sizeof(mc_filter));
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
        } else if ((dev->mc_count > multicast_filter_limit)
@@ -1736,7 +1731,7 @@ static int __init sundance_init(void)
 #ifdef MODULE
        printk(version);
 #endif
-       return pci_module_init(&sundance_driver);
+       return pci_register_driver(&sundance_driver);
 }
 
 static void __exit sundance_exit(void)
index b70bbd7..1a441a8 100644 (file)
@@ -3194,7 +3194,7 @@ static struct pci_driver gem_driver = {
 
 static int __init gem_init(void)
 {
-       return pci_module_init(&gem_driver);
+       return pci_register_driver(&gem_driver);
 }
 
 static void __exit gem_cleanup(void)
index 8b53ded..39460fa 100644 (file)
@@ -1725,7 +1725,7 @@ static struct pci_driver tc35815_driver = {
 
 static int __init tc35815_init_module(void)
 {
-       return pci_module_init(&tc35815_driver);
+       return pci_register_driver(&tc35815_driver);
 }
 
 static void __exit tc35815_cleanup_module(void)
index eafabb2..d6e2a68 100644 (file)
@@ -11819,7 +11819,7 @@ static struct pci_driver tg3_driver = {
 
 static int __init tg3_init(void)
 {
-       return pci_module_init(&tg3_driver);
+       return pci_register_driver(&tg3_driver);
 }
 
 static void __exit tg3_cleanup(void)
index 465921e..412390b 100644 (file)
@@ -1815,7 +1815,7 @@ static struct pci_driver xl_3c359_driver = {
 
 static int __init xl_pci_init (void)
 {
-       return pci_module_init (&xl_3c359_driver);
+       return pci_register_driver(&xl_3c359_driver);
 }
 
 
index 28d968f..0d66700 100644 (file)
@@ -1998,7 +1998,7 @@ static struct pci_driver streamer_pci_driver = {
 };
 
 static int __init streamer_init_module(void) {
-  return pci_module_init(&streamer_pci_driver);
+  return pci_register_driver(&streamer_pci_driver);
 }
 
 static void __exit streamer_cleanup_module(void) {
index 683f14b..fa3a2bb 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/21142.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
@@ -26,9 +26,9 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
 
 /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
    of available transceivers.  */
-void t21142_timer(unsigned long data)
+void t21142_media_task(void *data)
 {
-       struct net_device *dev = (struct net_device *)data;
+       struct net_device *dev = data;
        struct tulip_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->base_addr;
        int csr12 = ioread32(ioaddr + CSR12);
index d05c5aa..17a2eba 100644 (file)
@@ -2138,17 +2138,21 @@ static int de_resume (struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata (pdev);
        struct de_private *de = dev->priv;
+       int retval = 0;
 
        rtnl_lock();
        if (netif_device_present(dev))
                goto out;
-       if (netif_running(dev)) {
-               pci_enable_device(pdev);
-               de_init_hw(de);
-               netif_device_attach(dev);
-       } else {
-               netif_device_attach(dev);
+       if (!netif_running(dev))
+               goto out_attach;
+       if ((retval = pci_enable_device(pdev))) {
+               printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
+                       dev->name);
+               goto out;
        }
+       de_init_hw(de);
+out_attach:
+       netif_device_attach(dev);
 out:
        rtnl_unlock();
        return 0;
@@ -2172,7 +2176,7 @@ static int __init de_init (void)
 #ifdef MODULE
        printk("%s", version);
 #endif
-       return pci_module_init (&de_driver);
+       return pci_register_driver(&de_driver);
 }
 
 static void __exit de_exit (void)
index 75ff14a..e661d0a 100644 (file)
@@ -5754,7 +5754,7 @@ static int __init de4x5_module_init (void)
        int err = 0;
 
 #ifdef CONFIG_PCI
-       err = pci_module_init (&de4x5_pci_driver);
+       err = pci_register_driver(&de4x5_pci_driver);
 #endif
 #ifdef CONFIG_EISA
        err |= eisa_driver_register (&de4x5_eisa_driver);
index 4e5b0f2..66dade5 100644 (file)
@@ -2039,7 +2039,7 @@ static int __init dmfe_init_module(void)
        if (HPNA_NoiseFloor > 15)
                HPNA_NoiseFloor = 0;
 
-       rc = pci_module_init(&dmfe_driver);
+       rc = pci_register_driver(&dmfe_driver);
        if (rc < 0)
                return rc;
 
index 5ffbd5b..206918b 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/eeprom.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
index 99ccf2e..7f8f5d4 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/interrupt.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
index e9bc2a9..20bd52b 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/media.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
index ca7e532..85a521e 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/pnic.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
index ab98502..c31be0e 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/pnic2.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
         Modified to hep support PNIC_II by Kevin B. Hendricks
index e058a9f..066e5d6 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/timer.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
 #include "tulip.h"
 
 
-void tulip_timer(unsigned long data)
+void tulip_media_task(void *data)
 {
-       struct net_device *dev = (struct net_device *)data;
+       struct net_device *dev = data;
        struct tulip_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->base_addr;
        u32 csr12 = ioread32(ioaddr + CSR12);
        int next_tick = 2*HZ;
+       unsigned long flags;
 
        if (tulip_debug > 2) {
                printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
@@ -126,6 +127,15 @@ void tulip_timer(unsigned long data)
        }
        break;
        }
+
+
+       spin_lock_irqsave(&tp->lock, flags);
+       if (tp->timeout_recovery) {
+               tulip_tx_timeout_complete(tp, ioaddr);
+               tp->timeout_recovery = 0;
+       }
+       spin_unlock_irqrestore(&tp->lock, flags);
+
        /* mod_timer synchronizes us with potential add_timer calls
         * from interrupts.
         */
index 3bcfbf3..25668dd 100644 (file)
 /* undefine, or define to various debugging levels (>4 == obscene levels) */
 #define TULIP_DEBUG 1
 
-/* undefine USE_IO_OPS for MMIO, define for PIO */
 #ifdef CONFIG_TULIP_MMIO
-# undef USE_IO_OPS
+#define TULIP_BAR      1       /* CBMA */
 #else
-# define USE_IO_OPS 1
+#define TULIP_BAR      0       /* CBIO */
 #endif
 
 
@@ -44,7 +43,8 @@ struct tulip_chip_table {
        int io_size;
        int valid_intrs;        /* CSR7 interrupt enable settings */
        int flags;
-       void (*media_timer) (unsigned long data);
+       void (*media_timer) (unsigned long);
+       void (*media_task) (void *);
 };
 
 
@@ -142,6 +142,7 @@ enum status_bits {
        RxNoBuf = 0x80,
        RxIntr = 0x40,
        TxFIFOUnderflow = 0x20,
+       RxErrIntr = 0x10,
        TxJabber = 0x08,
        TxNoBuf = 0x04,
        TxDied = 0x02,
@@ -192,9 +193,14 @@ struct tulip_tx_desc {
 
 
 enum desc_status_bits {
-       DescOwned = 0x80000000,
-       RxDescFatalErr = 0x8000,
-       RxWholePkt = 0x0300,
+       DescOwned    = 0x80000000,
+       DescWholePkt = 0x60000000,
+       DescEndPkt   = 0x40000000,
+       DescStartPkt = 0x20000000,
+       DescEndRing  = 0x02000000,
+       DescUseLink  = 0x01000000,
+       RxDescFatalErr = 0x008000,
+       RxWholePkt   = 0x00000300,
 };
 
 
@@ -366,6 +372,7 @@ struct tulip_private {
        unsigned int medialock:1;       /* Don't sense media type. */
        unsigned int mediasense:1;      /* Media sensing in progress. */
        unsigned int nway:1, nwayset:1;         /* 21143 internal NWay. */
+       unsigned int timeout_recovery:1;
        unsigned int csr0;      /* CSR0 setting. */
        unsigned int csr6;      /* Current CSR6 control settings. */
        unsigned char eeprom[EEPROM_SIZE];      /* Serial EEPROM contents. */
@@ -384,6 +391,7 @@ struct tulip_private {
        void __iomem *base_addr;
        int csr12_shadow;
        int pad0;               /* Used for 8-byte alignment */
+       struct work_struct media_work;
 };
 
 
@@ -398,7 +406,7 @@ struct eeprom_fixup {
 
 /* 21142.c */
 extern u16 t21142_csr14[];
-void t21142_timer(unsigned long data);
+void t21142_media_task(void *data);
 void t21142_start_nway(struct net_device *dev);
 void t21142_lnk_change(struct net_device *dev, int csr5);
 
@@ -436,7 +444,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5);
 void pnic_timer(unsigned long data);
 
 /* timer.c */
-void tulip_timer(unsigned long data);
+void tulip_media_task(void *data);
 void mxic_timer(unsigned long data);
 void comet_timer(unsigned long data);
 
@@ -485,4 +493,14 @@ static inline void tulip_restart_rxtx(struct tulip_private *tp)
        tulip_start_rxtx(tp);
 }
 
+static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr)
+{
+       /* Stop and restart the chip's Tx processes. */
+       tulip_restart_rxtx(tp);
+       /* Trigger an immediate transmit demand. */
+       iowrite32(0, ioaddr + CSR1);
+
+       tp->stats.tx_errors++;
+}
+
 #endif /* __NET_TULIP_H__ */
index 7351831..2034baf 100644 (file)
@@ -1,7 +1,7 @@
 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
 
 /*
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
@@ -17,9 +17,9 @@
 
 #define DRV_NAME       "tulip"
 #ifdef CONFIG_TULIP_NAPI
-#define DRV_VERSION    "1.1.13-NAPI" /* Keep at least for test */
+#define DRV_VERSION    "1.1.14-NAPI" /* Keep at least for test */
 #else
-#define DRV_VERSION    "1.1.13"
+#define DRV_VERSION    "1.1.14"
 #endif
 #define DRV_RELDATE    "May 11, 2002"
 
@@ -130,7 +130,14 @@ int tulip_debug = TULIP_DEBUG;
 int tulip_debug = 1;
 #endif
 
+static void tulip_timer(unsigned long data)
+{
+       struct net_device *dev = (struct net_device *)data;
+       struct tulip_private *tp = netdev_priv(dev);
 
+       if (netif_running(dev))
+               schedule_work(&tp->media_work);
+}
 
 /*
  * This table use during operation for capabilities and media timer.
@@ -144,59 +151,60 @@ struct tulip_chip_table tulip_tbl[] = {
 
   /* DC21140 */
   { "Digital DS21140 Tulip", 128, 0x0001ebef,
-       HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer },
+       HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
+       tulip_media_task },
 
   /* DC21142, DC21143 */
-  { "Digital DS21143 Tulip", 128, 0x0801fbff,
+  { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
        HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
-       | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer },
+       | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
 
   /* LC82C168 */
   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
-       HAS_MII | HAS_PNICNWAY, pnic_timer },
+       HAS_MII | HAS_PNICNWAY, pnic_timer, },
 
   /* MX98713 */
   { "Macronix 98713 PMAC", 128, 0x0001ebef,
-       HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+       HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
 
   /* MX98715 */
   { "Macronix 98715 PMAC", 256, 0x0001ebef,
-       HAS_MEDIA_TABLE, mxic_timer },
+       HAS_MEDIA_TABLE, mxic_timer, },
 
   /* MX98725 */
   { "Macronix 98725 PMAC", 256, 0x0001ebef,
-       HAS_MEDIA_TABLE, mxic_timer },
+       HAS_MEDIA_TABLE, mxic_timer, },
 
   /* AX88140 */
   { "ASIX AX88140", 128, 0x0001fbff,
        HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
-       | IS_ASIX, tulip_timer },
+       | IS_ASIX, tulip_timer, tulip_media_task },
 
   /* PNIC2 */
   { "Lite-On PNIC-II", 256, 0x0801fbff,
-       HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer },
+       HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
 
   /* COMET */
   { "ADMtek Comet", 256, 0x0001abef,
-       HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+       HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
 
   /* COMPEX9881 */
   { "Compex 9881 PMAC", 128, 0x0001ebef,
-       HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+       HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
 
   /* I21145 */
   { "Intel DS21145 Tulip", 128, 0x0801fbff,
        HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
-       | HAS_NWAY | HAS_PCI_MWI, t21142_timer },
+       | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
 
   /* DM910X */
   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
        HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
-       tulip_timer },
+       tulip_timer, tulip_media_task },
 
   /* RS7112 */
   { "Conexant LANfinity", 256, 0x0001ebef,
-       HAS_MII | HAS_ACPI, tulip_timer },
+       HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
 
 };
 
@@ -295,12 +303,14 @@ static void tulip_up(struct net_device *dev)
 
        /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
        iowrite32(0x00000001, ioaddr + CSR0);
+       pci_read_config_dword(tp->pdev, PCI_COMMAND, &i);  /* flush write */
        udelay(100);
 
        /* Deassert reset.
           Wait the specified 50 PCI cycles after a reset by initializing
           Tx and Rx queues and the address filter list. */
        iowrite32(tp->csr0, ioaddr + CSR0);
+       pci_read_config_dword(tp->pdev, PCI_COMMAND, &i);  /* flush write */
        udelay(100);
 
        if (tulip_debug > 1)
@@ -522,20 +532,9 @@ static void tulip_tx_timeout(struct net_device *dev)
                           "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
                           dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
                           ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
-               if ( ! tp->medialock  &&  tp->mtable) {
-                       do
-                               --tp->cur_index;
-                       while (tp->cur_index >= 0
-                                  && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
-                                          & MediaIsFD));
-                       if (--tp->cur_index < 0) {
-                               /* We start again, but should instead look for default. */
-                               tp->cur_index = tp->mtable->leafcount - 1;
-                       }
-                       tulip_select_media(dev, 0);
-                       printk(KERN_WARNING "%s: transmit timed out, switching to %s "
-                                  "media.\n", dev->name, medianame[dev->if_port]);
-               }
+               tp->timeout_recovery = 1;
+               schedule_work(&tp->media_work);
+               goto out_unlock;
        } else if (tp->chip_id == PNIC2) {
                printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
                       "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
@@ -575,14 +574,9 @@ static void tulip_tx_timeout(struct net_device *dev)
        }
 #endif
 
-       /* Stop and restart the chip's Tx processes . */
-
-       tulip_restart_rxtx(tp);
-       /* Trigger an immediate transmit demand. */
-       iowrite32(0, ioaddr + CSR1);
-
-       tp->stats.tx_errors++;
+       tulip_tx_timeout_complete(tp, ioaddr);
 
+out_unlock:
        spin_unlock_irqrestore (&tp->lock, flags);
        dev->trans_start = jiffies;
        netif_wake_queue (dev);
@@ -732,6 +726,8 @@ static void tulip_down (struct net_device *dev)
        void __iomem *ioaddr = tp->base_addr;
        unsigned long flags;
 
+       flush_scheduled_work();
+
        del_timer_sync (&tp->timer);
 #ifdef CONFIG_TULIP_NAPI
        del_timer_sync (&tp->oom_timer);
@@ -1023,8 +1019,6 @@ static void set_rx_mode(struct net_device *dev)
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
                tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
                csr6 |= AcceptAllMulticast | AcceptAllPhys;
-               /* Unconditionally log net taps. */
-               printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
        } else if ((dev->mc_count > 1000)  ||  (dev->flags & IFF_ALLMULTI)) {
                /* Too many to filter well -- accept all multicasts. */
                tp->csr6 |= AcceptAllMulticast;
@@ -1361,11 +1355,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        if (pci_request_regions (pdev, "tulip"))
                goto err_out_free_netdev;
 
-#ifndef USE_IO_OPS
-       ioaddr =  pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size);
-#else
-       ioaddr =  pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);
-#endif
+       ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
+
        if (!ioaddr)
                goto err_out_free_res;
 
@@ -1398,6 +1389,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        tp->timer.data = (unsigned long)dev;
        tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
 
+       INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+
        dev->base_addr = (unsigned long)ioaddr;
 
 #ifdef CONFIG_TULIP_MWI
@@ -1644,8 +1637,14 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        if (register_netdev(dev))
                goto err_out_free_ring;
 
-       printk(KERN_INFO "%s: %s rev %d at %p,",
-              dev->name, chip_name, chip_rev, ioaddr);
+       printk(KERN_INFO "%s: %s rev %d at "
+#ifdef CONFIG_TULIP_MMIO
+               "MMIO"
+#else
+               "Port"
+#endif
+               " %#llx,", dev->name, chip_name, chip_rev,
+               (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
        pci_set_drvdata(pdev, dev);
 
        if (eeprom_missing)
@@ -1768,7 +1767,10 @@ static int tulip_resume(struct pci_dev *pdev)
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
-       pci_enable_device(pdev);
+       if ((retval = pci_enable_device(pdev))) {
+               printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
+               return retval;
+       }
 
        if ((retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
                printk (KERN_ERR "tulip: request_irq failed in resume\n");
@@ -1849,7 +1851,7 @@ static int __init tulip_init (void)
        tulip_max_interrupt_work = max_interrupt_work;
 
        /* probe for and init boards */
-       return pci_module_init (&tulip_driver);
+       return pci_register_driver(&tulip_driver);
 }
 
 
index fd64b2b..c4c720e 100644 (file)
@@ -1702,7 +1702,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
 
 static int __init uli526x_init_module(void)
 {
-       int rc;
 
        printk(version);
        printed_version = 1;
@@ -1714,22 +1713,19 @@ static int __init uli526x_init_module(void)
        if (cr6set)
                uli526x_cr6_user_set = cr6set;
 
-       switch(mode) {
+       switch (mode) {
        case ULI526X_10MHF:
        case ULI526X_100MHF:
        case ULI526X_10MFD:
        case ULI526X_100MFD:
                uli526x_media_mode = mode;
                break;
-       default:uli526x_media_mode = ULI526X_AUTO;
+       default:
+               uli526x_media_mode = ULI526X_AUTO;
                break;
        }
 
-       rc = pci_module_init(&uli526x_driver);
-       if (rc < 0)
-               return rc;
-
-       return 0;
+       return pci_register_driver(&uli526x_driver);
 }
 
 
index eba9083..0e5344f 100644 (file)
@@ -45,8 +45,8 @@
 */
 
 #define DRV_NAME       "winbond-840"
-#define DRV_VERSION    "1.01-d"
-#define DRV_RELDATE    "Nov-17-2001"
+#define DRV_VERSION    "1.01-e"
+#define DRV_RELDATE    "Sep-11-2006"
 
 
 /* Automatically extracted configuration info:
@@ -90,10 +90,8 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
    Making the Tx ring too large decreases the effectiveness of channel
    bonding and packet priority.
    There are no ill effects from too-large receive rings. */
-#define TX_RING_SIZE   16
 #define TX_QUEUE_LEN   10              /* Limit ring entries actually used.  */
 #define TX_QUEUE_LEN_RESTART   5
-#define RX_RING_SIZE   32
 
 #define TX_BUFLIMIT    (1024-128)
 
@@ -137,6 +135,8 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
 #include <asm/io.h>
 #include <asm/irq.h>
 
+#include "tulip.h"
+
 /* These identify the driver base version and may not be removed. */
 static char version[] =
 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE "  Donald Becker <becker@scyld.com>\n"
@@ -242,8 +242,8 @@ static const struct pci_id_info pci_id_tbl[] __devinitdata = {
 };
 
 /* This driver was written to use PCI memory space, however some x86 systems
-   work only with I/O space accesses.  Pass -DUSE_IO_OPS to use PCI I/O space
-   accesses instead of memory space. */
+   work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
+*/
 
 /* Offsets to the Command and Status Registers, "CSRs".
    While similar to the Tulip, these registers are longword aligned.
@@ -261,21 +261,11 @@ enum w840_offsets {
        CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
 };
 
-/* Bits in the interrupt status/enable registers. */
-/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
-enum intr_status_bits {
-       NormalIntr=0x10000, AbnormalIntr=0x8000,
-       IntrPCIErr=0x2000, TimerInt=0x800,
-       IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
-       TxFIFOUnderflow=0x20, RxErrIntr=0x10,
-       TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
-};
-
 /* Bits in the NetworkConfig register. */
 enum rx_mode_bits {
-       AcceptErr=0x80, AcceptRunt=0x40,
-       AcceptBroadcast=0x20, AcceptMulticast=0x10,
-       AcceptAllPhys=0x08, AcceptMyPhys=0x02,
+       AcceptErr=0x80,
+       RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
+       RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
 };
 
 enum mii_reg_bits {
@@ -297,13 +287,6 @@ struct w840_tx_desc {
        u32 buffer1, buffer2;
 };
 
-/* Bits in network_desc.status */
-enum desc_status_bits {
-       DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
-       DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
-       DescIntr=0x80000000,
-};
-
 #define MII_CNT                1 /* winbond only supports one MII */
 struct netdev_private {
        struct w840_rx_desc *rx_ring;
@@ -371,7 +354,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
        int irq;
        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
        void __iomem *ioaddr;
-       int bar = 1;
 
        i = pci_enable_device(pdev);
        if (i) return i;
@@ -393,10 +375,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
 
        if (pci_request_regions(pdev, DRV_NAME))
                goto err_out_netdev;
-#ifdef USE_IO_OPS
-       bar = 0;
-#endif
-       ioaddr = pci_iomap(pdev, bar, netdev_res_size);
+
+       ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
        if (!ioaddr)
                goto err_out_free_res;
 
@@ -838,7 +818,7 @@ static void init_rxtx_rings(struct net_device *dev)
                                        np->rx_buf_sz,PCI_DMA_FROMDEVICE);
 
                np->rx_ring[i].buffer1 = np->rx_addr[i];
-               np->rx_ring[i].status = DescOwn;
+               np->rx_ring[i].status = DescOwned;
        }
 
        np->cur_rx = 0;
@@ -923,7 +903,7 @@ static void init_registers(struct net_device *dev)
        }
 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
        i |= 0xE000;
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined (CONFIG_PARISC)
        i |= 0x4800;
 #else
 #warning Processor architecture undefined
@@ -1043,11 +1023,11 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
 
        /* Now acquire the irq spinlock.
         * The difficult race is the the ordering between
-        * increasing np->cur_tx and setting DescOwn:
+        * increasing np->cur_tx and setting DescOwned:
         * - if np->cur_tx is increased first the interrupt
         *   handler could consider the packet as transmitted
-        *   since DescOwn is cleared.
-        * - If DescOwn is set first the NIC could report the
+        *   since DescOwned is cleared.
+        * - If DescOwned is set first the NIC could report the
         *   packet as sent, but the interrupt handler would ignore it
         *   since the np->cur_tx was not yet increased.
         */
@@ -1055,7 +1035,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
        np->cur_tx++;
 
        wmb(); /* flush length, buffer1, buffer2 */
-       np->tx_ring[entry].status = DescOwn;
+       np->tx_ring[entry].status = DescOwned;
        wmb(); /* flush status and kick the hardware */
        iowrite32(0, np->base_addr + TxStartDemand);
        np->tx_q_bytes += skb->len;
@@ -1155,12 +1135,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
 
                handled = 1;
 
-               if (intr_status & (IntrRxDone | RxNoBuf))
+               if (intr_status & (RxIntr | RxNoBuf))
                        netdev_rx(dev);
                if (intr_status & RxNoBuf)
                        iowrite32(0, ioaddr + RxStartDemand);
 
-               if (intr_status & (TxIdle | IntrTxDone) &&
+               if (intr_status & (TxNoBuf | TxIntr) &&
                        np->cur_tx != np->dirty_tx) {
                        spin_lock(&np->lock);
                        netdev_tx_done(dev);
@@ -1168,8 +1148,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
                }
 
                /* Abnormal error summary/uncommon events handlers. */
-               if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
-                                                  TimerInt | IntrTxStopped))
+               if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SytemError |
+                                                  TimerInt | TxDied))
                        netdev_error(dev, intr_status);
 
                if (--work_limit < 0) {
@@ -1305,7 +1285,7 @@ static int netdev_rx(struct net_device *dev)
                        np->rx_ring[entry].buffer1 = np->rx_addr[entry];
                }
                wmb();
-               np->rx_ring[entry].status = DescOwn;
+               np->rx_ring[entry].status = DescOwned;
        }
 
        return 0;
@@ -1342,7 +1322,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
                           dev->name, new);
                update_csr6(dev, new);
        }
-       if (intr_status & IntrRxDied) {         /* Missed a Rx frame. */
+       if (intr_status & RxDied) {             /* Missed a Rx frame. */
                np->stats.rx_errors++;
        }
        if (intr_status & TimerInt) {
@@ -1378,16 +1358,14 @@ static u32 __set_rx_mode(struct net_device *dev)
        u32 rx_mode;
 
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                memset(mc_filter, 0xff, sizeof(mc_filter));
-               rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+               rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
                        | AcceptMyPhys;
        } else if ((dev->mc_count > multicast_filter_limit)
                           ||  (dev->flags & IFF_ALLMULTI)) {
                /* Too many to match, or accept all multicasts. */
                memset(mc_filter, 0xff, sizeof(mc_filter));
-               rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+               rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
        } else {
                struct dev_mc_list *mclist;
                int i;
@@ -1398,7 +1376,7 @@ static u32 __set_rx_mode(struct net_device *dev)
                        filterbit &= 0x3f;
                        mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
                }
-               rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+               rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
        }
        iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
        iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
@@ -1646,14 +1624,18 @@ static int w840_resume (struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata (pdev);
        struct netdev_private *np = netdev_priv(dev);
+       int retval = 0;
 
        rtnl_lock();
        if (netif_device_present(dev))
                goto out; /* device not suspended */
        if (netif_running(dev)) {
-               pci_enable_device(pdev);
-       /*      pci_power_on(pdev); */
-
+               if ((retval = pci_enable_device(pdev))) {
+                       printk (KERN_ERR
+                               "%s: pci_enable_device failed in resume\n",
+                               dev->name);
+                       goto out;
+               }
                spin_lock_irq(&np->lock);
                iowrite32(1, np->base_addr+PCIBusCfg);
                ioread32(np->base_addr+PCIBusCfg);
@@ -1671,7 +1653,7 @@ static int w840_resume (struct pci_dev *pdev)
        }
 out:
        rtnl_unlock();
-       return 0;
+       return retval;
 }
 #endif
 
@@ -1689,7 +1671,7 @@ static struct pci_driver w840_driver = {
 static int __init w840_init(void)
 {
        printk(version);
-       return pci_module_init(&w840_driver);
+       return pci_register_driver(&w840_driver);
 }
 
 static void __exit w840_exit(void)
index 17ca7dc..d797b7b 100644 (file)
@@ -1707,7 +1707,7 @@ static int __init xircom_init(void)
 #ifdef MODULE
        printk(version);
 #endif
-       return pci_module_init(&xircom_driver);
+       return pci_register_driver(&xircom_driver);
 }
 
 
index 4103c37..1084180 100644 (file)
@@ -100,8 +100,8 @@ static const int multicast_filter_limit = 32;
 #define PKT_BUF_SZ             1536
 
 #define DRV_MODULE_NAME                "typhoon"
-#define DRV_MODULE_VERSION     "1.5.7"
-#define DRV_MODULE_RELDATE     "05/01/07"
+#define DRV_MODULE_VERSION     "1.5.8"
+#define DRV_MODULE_RELDATE     "06/11/09"
 #define PFX                    DRV_MODULE_NAME ": "
 #define ERR_PFX                        KERN_ERR PFX
 
@@ -937,8 +937,6 @@ typhoon_set_rx_mode(struct net_device *dev)
 
        filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
        if(dev->flags & IFF_PROMISC) {
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-                      dev->name);
                filter |= TYPHOON_RX_FILTER_PROMISCOUS;
        } else if((dev->mc_count > multicast_filter_limit) ||
                  (dev->flags & IFF_ALLMULTI)) {
@@ -2660,7 +2658,7 @@ static struct pci_driver typhoon_driver = {
 static int __init
 typhoon_init(void)
 {
-       return pci_module_init(&typhoon_driver);
+       return pci_register_driver(&typhoon_driver);
 }
 
 static void __exit
index 47f49ef..4e188f4 100644 (file)
@@ -47,7 +47,7 @@
 
 #undef DEBUG
 
-#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006"
+#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:Sept 11, 2006"
 #define DRV_NAME "ucc_geth"
 
 #define ugeth_printk(level, format, arg...)  \
@@ -2510,8 +2510,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
 
        if (dev->flags & IFF_PROMISC) {
 
-               /* Log any net taps. */
-               printk("%s: Promiscuous mode enabled.\n", dev->name);
                uf_regs->upsmr |= UPSMR_PRO;
 
        } else {
index ae97108..f7bc44f 100644 (file)
@@ -30,8 +30,8 @@
 */
 
 #define DRV_NAME       "via-rhine"
-#define DRV_VERSION    "1.4.1"
-#define DRV_RELDATE    "July-24-2006"
+#define DRV_VERSION    "1.4.2"
+#define DRV_RELDATE    "Sept-11-2006"
 
 
 /* A few user-configurable values.
@@ -1679,9 +1679,6 @@ static void rhine_set_rx_mode(struct net_device *dev)
        u8 rx_mode;             /* Note: 0x02=accept runt, 0x01=accept errs */
 
        if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
-                      dev->name);
                rx_mode = 0x1C;
                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
@@ -2005,7 +2002,7 @@ static int __init rhine_init(void)
 #ifdef MODULE
        printk(version);
 #endif
-       return pci_module_init(&rhine_driver);
+       return pci_register_driver(&rhine_driver);
 }
 
 
index aa9cd92..f23d207 100644 (file)
@@ -2109,8 +2109,6 @@ static void velocity_set_multi(struct net_device *dev)
        struct dev_mc_list *mclist;
 
        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                writel(0xffffffff, &regs->MARCAM[0]);
                writel(0xffffffff, &regs->MARCAM[4]);
                rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
@@ -2250,7 +2248,7 @@ static int __init velocity_init_module(void)
        int ret;
 
        velocity_register_notifier();
-       ret = pci_module_init(&velocity_driver);
+       ret = pci_register_driver(&velocity_driver);
        if (ret < 0)
                velocity_unregister_notifier();
        return ret;
index 496c3d5..82968e4 100644 (file)
@@ -29,7 +29,7 @@
 
 #define VELOCITY_NAME          "via-velocity"
 #define VELOCITY_FULL_DRV_NAM  "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
-#define VELOCITY_VERSION       "1.13"
+#define VELOCITY_VERSION       "1.14"
 
 #define VELOCITY_IO_SIZE       256
 
@@ -262,25 +262,6 @@ struct velocity_rd_info {
        dma_addr_t skb_dma;
 };
 
-/**
- *     alloc_rd_info           -       allocate an rd info block
- *
- *     Alocate and initialize a receive info structure used for keeping
- *     track of kernel side information related to each receive
- *     descriptor we are using
- */
-
-static inline struct velocity_rd_info *alloc_rd_info(void)
-{
-       struct velocity_rd_info *ptr;
-       if ((ptr = kmalloc(sizeof(struct velocity_rd_info), GFP_ATOMIC)) == NULL)
-               return NULL;
-       else {
-               memset(ptr, 0, sizeof(struct velocity_rd_info));
-               return ptr;
-       }
-}
-
 /*
  *     Used to track transmit side buffers.
  */
index 430b1f6..a5e7ce1 100644 (file)
@@ -40,7 +40,6 @@
 * 1998/08/08   acme            Initial version.
 */
 
-#include <linux/config.h>      /* OS configuration options */
 #include <linux/stddef.h>      /* offsetof(), etc. */
 #include <linux/errno.h>       /* return codes */
 #include <linux/string.h>      /* inline memset(), etc. */
index 6e1ec5b..7369875 100644 (file)
@@ -28,7 +28,6 @@
  *             2 of the License, or (at your option) any later version.
  */
 
-#include <linux/config.h> /* for CONFIG_DLCI_COUNT */
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
index 684af43..af4d415 100644 (file)
@@ -2062,7 +2062,7 @@ static struct pci_driver dscc4_driver = {
 
 static int __init dscc4_init_module(void)
 {
-       return pci_module_init(&dscc4_driver);
+       return pci_register_driver(&dscc4_driver);
 }
 
 static void __exit dscc4_cleanup_module(void)
index 3705db0..564351a 100644 (file)
@@ -2697,7 +2697,7 @@ fst_init(void)
        for (i = 0; i < FST_MAX_CARDS; i++)
                fst_card_array[i] = NULL;
        spin_lock_init(&fst_work_q_lock);
-       return pci_module_init(&fst_driver);
+       return pci_register_driver(&fst_driver);
 }
 
 static void __exit
index 39f4424..7b5d81d 100644 (file)
@@ -1790,7 +1790,7 @@ static struct pci_driver lmc_driver = {
 
 static int __init init_lmc(void)
 {
-    return pci_module_init(&lmc_driver);
+    return pci_register_driver(&lmc_driver);
 }
 
 static void __exit exit_lmc(void)
index 567efff..56e6940 100644 (file)
@@ -3677,7 +3677,7 @@ static struct pci_driver cpc_driver = {
 
 static int __init cpc_init(void)
 {
-       return pci_module_init(&cpc_driver);
+       return pci_register_driver(&cpc_driver);
 }
 
 static void __exit cpc_cleanup_module(void)
index 4df61fa..a6b9c33 100644 (file)
@@ -476,7 +476,7 @@ static int __init pci200_init_module(void)
                printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
                return -EINVAL;
        }
-       return pci_module_init(&pci200_pci_driver);
+       return pci_register_driver(&pci200_pci_driver);
 }
 
 
index 7628c2d..0ba018f 100644 (file)
@@ -32,7 +32,6 @@
  *             2 of the License, or (at your option) any later version.
  */
 
-#include <linux/config.h> /* for CONFIG_DLCI_MAX */
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
index b2031df..ec68f7d 100644 (file)
@@ -837,7 +837,7 @@ static int __init wanxl_init_module(void)
 #ifdef MODULE
        printk(KERN_INFO "%s\n", version);
 #endif
-       return pci_module_init(&wanxl_pci_driver);
+       return pci_register_driver(&wanxl_pci_driver);
 }
 
 static void __exit wanxl_cleanup_module(void)
index 2e8ac99..bd4a68c 100644 (file)
@@ -271,25 +271,14 @@ config IPW2200_DEBUG
        bool "Enable full debugging output in IPW2200 module."
        depends on IPW2200
        ---help---
-         This option will enable debug tracing output for the IPW2200.  
+         This option will enable low level debug tracing output for IPW2200.
 
-         This will result in the kernel module being ~100k larger.  You can 
-         control which debug output is sent to the kernel log by setting the 
-         value in 
-
-         /sys/bus/pci/drivers/ipw2200/debug_level
-
-         This entry will only exist if this option is enabled.
+         Note, normal debug code is already compiled in. This low level
+         debug option enables debug on hot paths (e.g Tx, Rx, ISR) and
+         will result in the kernel module being ~70 larger.  Most users
+         will typically not need this high verbosity debug information.
 
-         To set a value, simply echo an 8-byte hex value to the same file:
-
-         % echo 0x00000FFO > /sys/bus/pci/drivers/ipw2200/debug_level
-
-         You can find the list of debug mask values in 
-         drivers/net/wireless/ipw2200.h
-
-         If you are not trying to debug or develop the IPW2200 driver, you 
-         most likely want to say N here.
+         If you are not sure, say N here.
 
 config AIRO
        tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
index a4dd139..e088cee 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/pci.h>
 #include <asm/uaccess.h>
 #include <net/ieee80211.h>
+#include <linux/kthread.h>
 
 #include "airo.h"
 
@@ -1187,11 +1188,10 @@ struct airo_info {
                        int whichbap);
        unsigned short *flash;
        tdsRssiEntry *rssi;
-       struct task_struct *task;
+       struct task_struct *list_bss_task;
+       struct task_struct *airo_thread_task;
        struct semaphore sem;
-       pid_t thr_pid;
        wait_queue_head_t thr_wait;
-       struct completion thr_exited;
        unsigned long expires;
        struct {
                struct sk_buff *skb;
@@ -1733,12 +1733,12 @@ static int readBSSListRid(struct airo_info *ai, int first,
                cmd.cmd=CMD_LISTBSS;
                if (down_interruptible(&ai->sem))
                        return -ERESTARTSYS;
+               ai->list_bss_task = current;
                issuecommand(ai, &cmd, &rsp);
                up(&ai->sem);
                /* Let the command take effect */
-               ai->task = current;
-               ssleep(3);
-               ai->task = NULL;
+               schedule_timeout_uninterruptible(3 * HZ);
+               ai->list_bss_task = NULL;
        }
        rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
                            list, ai->bssListRidLen, 1);
@@ -2400,8 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
                clear_bit(FLAG_REGISTERED, &ai->flags);
        }
        set_bit(JOB_DIE, &ai->jobs);
-       kill_proc(ai->thr_pid, SIGTERM, 1);
-       wait_for_completion(&ai->thr_exited);
+       kthread_stop(ai->airo_thread_task);
 
        /*
         * Clean out tx queue
@@ -2811,9 +2810,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
        ai->config.len = 0;
        ai->pci = pci;
        init_waitqueue_head (&ai->thr_wait);
-       init_completion (&ai->thr_exited);
-       ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES);
-       if (ai->thr_pid < 0)
+       ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
+       if (IS_ERR(ai->airo_thread_task))
                goto err_out_free;
        ai->tfm = NULL;
        rc = add_airo_dev( dev );
@@ -2930,8 +2928,7 @@ err_out_unlink:
        del_airo_dev(dev);
 err_out_thr:
        set_bit(JOB_DIE, &ai->jobs);
-       kill_proc(ai->thr_pid, SIGTERM, 1);
-       wait_for_completion(&ai->thr_exited);
+       kthread_stop(ai->airo_thread_task);
 err_out_free:
        free_netdev(dev);
        return NULL;
@@ -3063,13 +3060,7 @@ static int airo_thread(void *data) {
        struct airo_info *ai = dev->priv;
        int locked;
        
-       daemonize("%s", dev->name);
-       allow_signal(SIGTERM);
-
        while(1) {
-               if (signal_pending(current))
-                       flush_signals(current);
-
                /* make swsusp happy with our thread */
                try_to_freeze();
 
@@ -3097,7 +3088,7 @@ static int airo_thread(void *data) {
                                                set_bit(JOB_AUTOWEP, &ai->jobs);
                                                break;
                                        }
-                                       if (!signal_pending(current)) {
+                                       if (!kthread_should_stop()) {
                                                unsigned long wake_at;
                                                if (!ai->expires || !ai->scan_timeout) {
                                                        wake_at = max(ai->expires,
@@ -3109,7 +3100,7 @@ static int airo_thread(void *data) {
                                                schedule_timeout(wake_at - jiffies);
                                                continue;
                                        }
-                               } else if (!signal_pending(current)) {
+                               } else if (!kthread_should_stop()) {
                                        schedule();
                                        continue;
                                }
@@ -3154,7 +3145,8 @@ static int airo_thread(void *data) {
                else  /* Shouldn't get here, but we make sure to unlock */
                        up(&ai->sem);
        }
-       complete_and_exit (&ai->thr_exited, 0);
+
+       return 0;
 }
 
 static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) {
@@ -3235,8 +3227,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
                        if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) {
                                if (auto_wep)
                                        apriv->expires = 0;
-                               if (apriv->task)
-                                       wake_up_process (apriv->task);
+                               if (apriv->list_bss_task)
+                                       wake_up_process(apriv->list_bss_task);
                                set_bit(FLAG_UPDATE_UNI, &apriv->flags);
                                set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
 
@@ -3950,13 +3942,11 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
        pRsp->rsp0 = IN4500(ai, RESP0);
        pRsp->rsp1 = IN4500(ai, RESP1);
        pRsp->rsp2 = IN4500(ai, RESP2);
-       if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) {
-               airo_print_err(ai->dev->name, "cmd= %x\n", pCmd->cmd);
-               airo_print_err(ai->dev->name, "status= %x\n", pRsp->status);
-               airo_print_err(ai->dev->name, "Rsp0= %x\n", pRsp->rsp0);
-               airo_print_err(ai->dev->name, "Rsp1= %x\n", pRsp->rsp1);
-               airo_print_err(ai->dev->name, "Rsp2= %x\n", pRsp->rsp2);
-       }
+       if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET)
+               airo_print_err(ai->dev->name,
+                       "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x",
+                       pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1,
+                       pRsp->rsp2);
 
        // clear stuck command busy if necessary
        if (IN4500(ai, COMMAND) & COMMAND_BUSY) {
index d425c3c..3bfa791 100644 (file)
@@ -76,7 +76,7 @@ static void __devexit atmel_pci_remove(struct pci_dev *pdev)
 
 static int __init atmel_init_module(void)
 {
-       return pci_module_init(&atmel_driver);
+       return pci_register_driver(&atmel_driver);
 }
 
 static void __exit atmel_cleanup_module(void)
index 17a5682..6d4ea36 100644 (file)
 #define BCM43xx_PCICFG_ICR             0x94
 
 /* MMIO offsets */
-#define BCM43xx_MMIO_DMA1_REASON       0x20
-#define BCM43xx_MMIO_DMA1_IRQ_MASK     0x24
-#define BCM43xx_MMIO_DMA2_REASON       0x28
-#define BCM43xx_MMIO_DMA2_IRQ_MASK     0x2C
-#define BCM43xx_MMIO_DMA3_REASON       0x30
-#define BCM43xx_MMIO_DMA3_IRQ_MASK     0x34
-#define BCM43xx_MMIO_DMA4_REASON       0x38
-#define BCM43xx_MMIO_DMA4_IRQ_MASK     0x3C
+#define BCM43xx_MMIO_DMA0_REASON       0x20
+#define BCM43xx_MMIO_DMA0_IRQ_MASK     0x24
+#define BCM43xx_MMIO_DMA1_REASON       0x28
+#define BCM43xx_MMIO_DMA1_IRQ_MASK     0x2C
+#define BCM43xx_MMIO_DMA2_REASON       0x30
+#define BCM43xx_MMIO_DMA2_IRQ_MASK     0x34
+#define BCM43xx_MMIO_DMA3_REASON       0x38
+#define BCM43xx_MMIO_DMA3_IRQ_MASK     0x3C
+#define BCM43xx_MMIO_DMA4_REASON       0x40
+#define BCM43xx_MMIO_DMA4_IRQ_MASK     0x44
+#define BCM43xx_MMIO_DMA5_REASON       0x48
+#define BCM43xx_MMIO_DMA5_IRQ_MASK     0x4C
 #define BCM43xx_MMIO_STATUS_BITFIELD   0x120
 #define BCM43xx_MMIO_STATUS2_BITFIELD  0x124
 #define BCM43xx_MMIO_GEN_IRQ_REASON    0x128
 #define BCM43xx_MMIO_XMITSTAT_1                0x174
 #define BCM43xx_MMIO_REV3PLUS_TSF_LOW  0x180 /* core rev >= 3 only */
 #define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */
-#define BCM43xx_MMIO_DMA1_BASE         0x200
-#define BCM43xx_MMIO_DMA2_BASE         0x220
-#define BCM43xx_MMIO_DMA3_BASE         0x240
-#define BCM43xx_MMIO_DMA4_BASE         0x260
+
+/* 32-bit DMA */
+#define BCM43xx_MMIO_DMA32_BASE0       0x200
+#define BCM43xx_MMIO_DMA32_BASE1       0x220
+#define BCM43xx_MMIO_DMA32_BASE2       0x240
+#define BCM43xx_MMIO_DMA32_BASE3       0x260
+#define BCM43xx_MMIO_DMA32_BASE4       0x280
+#define BCM43xx_MMIO_DMA32_BASE5       0x2A0
+/* 64-bit DMA */
+#define BCM43xx_MMIO_DMA64_BASE0       0x200
+#define BCM43xx_MMIO_DMA64_BASE1       0x240
+#define BCM43xx_MMIO_DMA64_BASE2       0x280
+#define BCM43xx_MMIO_DMA64_BASE3       0x2C0
+#define BCM43xx_MMIO_DMA64_BASE4       0x300
+#define BCM43xx_MMIO_DMA64_BASE5       0x340
+/* PIO */
 #define BCM43xx_MMIO_PIO1_BASE         0x300
 #define BCM43xx_MMIO_PIO2_BASE         0x310
 #define BCM43xx_MMIO_PIO3_BASE         0x320
 #define BCM43xx_MMIO_PIO4_BASE         0x330
+
 #define BCM43xx_MMIO_PHY_VER           0x3E0
 #define BCM43xx_MMIO_PHY_RADIO         0x3E2
 #define BCM43xx_MMIO_ANTENNA           0x3E8
 #define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK  0x20000
 
 /* sbtmstatehigh state flags */
-#define BCM43xx_SBTMSTATEHIGH_SERROR           0x1
-#define BCM43xx_SBTMSTATEHIGH_BUSY             0x4
+#define BCM43xx_SBTMSTATEHIGH_SERROR           0x00000001
+#define BCM43xx_SBTMSTATEHIGH_BUSY             0x00000004
+#define BCM43xx_SBTMSTATEHIGH_TIMEOUT          0x00000020
+#define BCM43xx_SBTMSTATEHIGH_COREFLAGS                0x1FFF0000
+#define BCM43xx_SBTMSTATEHIGH_DMA64BIT         0x10000000
+#define BCM43xx_SBTMSTATEHIGH_GATEDCLK         0x20000000
+#define BCM43xx_SBTMSTATEHIGH_BISTFAILED       0x40000000
+#define BCM43xx_SBTMSTATEHIGH_BISTCOMPLETE     0x80000000
 
 /* sbimstate flags */
 #define BCM43xx_SBIMSTATE_IB_ERROR             0x20000
 #define BCM43xx_SBF_TIME_UPDATE                0x10000000
 #define BCM43xx_SBF_80000000           0x80000000 /*FIXME: fix name*/
 
+/* Microcode */
+#define BCM43xx_UCODE_REVISION         0x0000
+#define BCM43xx_UCODE_PATCHLEVEL       0x0002
+#define BCM43xx_UCODE_DATE             0x0004
+#define BCM43xx_UCODE_TIME             0x0006
+#define BCM43xx_UCODE_STATUS           0x0040
+
 /* MicrocodeFlagsBitfield (addr + lo-word values?)*/
 #define BCM43xx_UCODEFLAGS_OFFSET      0x005E
 
@@ -504,6 +534,12 @@ struct bcm43xx_phyinfo {
         * This lock is only used by bcm43xx_phy_{un}lock()
         */
        spinlock_t lock;
+
+       /* Firmware. */
+       const struct firmware *ucode;
+       const struct firmware *pcm;
+       const struct firmware *initvals0;
+       const struct firmware *initvals1;
 };
 
 
@@ -568,8 +604,11 @@ struct bcm43xx_dma {
        struct bcm43xx_dmaring *tx_ring1;
        struct bcm43xx_dmaring *tx_ring2;
        struct bcm43xx_dmaring *tx_ring3;
+       struct bcm43xx_dmaring *tx_ring4;
+       struct bcm43xx_dmaring *tx_ring5;
+
        struct bcm43xx_dmaring *rx_ring0;
-       struct bcm43xx_dmaring *rx_ring1; /* only available on core.rev < 5 */
+       struct bcm43xx_dmaring *rx_ring3; /* only available on core.rev < 5 */
 };
 
 /* Data structures for PIO transmission, per 80211 core. */
@@ -593,12 +632,14 @@ struct bcm43xx_coreinfo {
        u8 available:1,
           enabled:1,
           initialized:1;
-       /** core_id ID number */
-       u16 id;
        /** core_rev revision number */
        u8 rev;
        /** Index number for _switch_core() */
        u8 index;
+       /** core_id ID number */
+       u16 id;
+       /** Core-specific data. */
+       void *priv;
 };
 
 /* Additional information for each 80211 core. */
@@ -647,7 +688,23 @@ enum {
        BCM43xx_STAT_RESTARTING,        /* controller_restart() called. */
 };
 #define bcm43xx_status(bcm)            atomic_read(&(bcm)->init_status)
-#define bcm43xx_set_status(bcm, stat)  atomic_set(&(bcm)->init_status, (stat))
+#define bcm43xx_set_status(bcm, stat)  do {                    \
+               atomic_set(&(bcm)->init_status, (stat));        \
+               smp_wmb();                                      \
+                                       } while (0)
+
+/*    *** THEORY OF LOCKING ***
+ *
+ * We have two different locks in the bcm43xx driver.
+ * => bcm->mutex:    General sleeping mutex. Protects struct bcm43xx_private
+ *                   and the device registers. This mutex does _not_ protect
+ *                   against concurrency from the IRQ handler.
+ * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
+ *
+ * Please note that, if you only take the irq_lock, you are not protected
+ * against concurrency from the periodic work handlers.
+ * Most times you want to take _both_ locks.
+ */
 
 struct bcm43xx_private {
        struct ieee80211_device *ieee;
@@ -659,7 +716,6 @@ struct bcm43xx_private {
 
        void __iomem *mmio_addr;
 
-       /* Locking, see "theory of locking" text below. */
        spinlock_t irq_lock;
        struct mutex mutex;
 
@@ -691,6 +747,7 @@ struct bcm43xx_private {
        struct bcm43xx_sprominfo sprom;
 #define BCM43xx_NR_LEDS                4
        struct bcm43xx_led leds[BCM43xx_NR_LEDS];
+       spinlock_t leds_lock;
 
        /* The currently active core. */
        struct bcm43xx_coreinfo *current_core;
@@ -708,10 +765,6 @@ struct bcm43xx_private {
        struct bcm43xx_coreinfo core_80211[ BCM43xx_MAX_80211_CORES ];
        /* Additional information, specific to the 80211 cores. */
        struct bcm43xx_coreinfo_80211 core_80211_ext[ BCM43xx_MAX_80211_CORES ];
-       /* Index of the current 80211 core. If current_core is not
-        * an 80211 core, this is -1.
-        */
-       int current_80211_core_idx;
        /* Number of available 80211 cores. */
        int nr_80211_available;
 
@@ -719,11 +772,13 @@ struct bcm43xx_private {
 
        /* Reason code of the last interrupt. */
        u32 irq_reason;
-       u32 dma_reason[4];
+       u32 dma_reason[6];
        /* saved irq enable/disable state bitfield. */
        u32 irq_savedstate;
        /* Link Quality calculation context. */
        struct bcm43xx_noise_calculation noisecalc;
+       /* if > 0 MAC is suspended. if == 0 MAC is enabled. */
+       int mac_suspended;
 
        /* Threshold values. */
        //TODO: The RTS thr has to be _used_. Currently, it is only set via WX.
@@ -746,12 +801,6 @@ struct bcm43xx_private {
        struct bcm43xx_key key[54];
        u8 default_key_idx;
 
-       /* Firmware. */
-       const struct firmware *ucode;
-       const struct firmware *pcm;
-       const struct firmware *initvals0;
-       const struct firmware *initvals1;
-
        /* Random Number Generator. */
        struct hwrng rng;
        char rng_name[20 + 1];
@@ -763,55 +812,6 @@ struct bcm43xx_private {
 };
 
 
-/*    *** THEORY OF LOCKING ***
- *
- * We have two different locks in the bcm43xx driver.
- * => bcm->mutex:    General sleeping mutex. Protects struct bcm43xx_private
- *                   and the device registers.
- * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
- *
- * We have three types of helper function pairs to utilize these locks.
- *     (Always use the helper functions.)
- * 1) bcm43xx_{un}lock_noirq():
- *     Takes bcm->mutex. Does _not_ protect against IRQ concurrency,
- *     so it is almost always unsafe, if device IRQs are enabled.
- *     So only use this, if device IRQs are masked.
- *     Locking may sleep.
- *     You can sleep within the critical section.
- * 2) bcm43xx_{un}lock_irqonly():
- *     Takes bcm->irq_lock. Does _not_ protect against
- *     bcm43xx_lock_noirq() critical sections.
- *     Does only protect against the IRQ handler path and other
- *     irqonly() critical sections.
- *     Locking does not sleep.
- *     You must not sleep within the critical section.
- * 3) bcm43xx_{un}lock_irqsafe():
- *     This is the cummulative lock and takes both, mutex and irq_lock.
- *     Protects against noirq() and irqonly() critical sections (and
- *     the IRQ handler path).
- *     Locking may sleep.
- *     You must not sleep within the critical section.
- */
-
-/* Lock type 1 */
-#define bcm43xx_lock_noirq(bcm)                mutex_lock(&(bcm)->mutex)
-#define bcm43xx_unlock_noirq(bcm)      mutex_unlock(&(bcm)->mutex)
-/* Lock type 2 */
-#define bcm43xx_lock_irqonly(bcm, flags)       \
-       spin_lock_irqsave(&(bcm)->irq_lock, flags)
-#define bcm43xx_unlock_irqonly(bcm, flags)     \
-       spin_unlock_irqrestore(&(bcm)->irq_lock, flags)
-/* Lock type 3 */
-#define bcm43xx_lock_irqsafe(bcm, flags) do {  \
-       bcm43xx_lock_noirq(bcm);                \
-       bcm43xx_lock_irqonly(bcm, flags);       \
-               } while (0)
-#define bcm43xx_unlock_irqsafe(bcm, flags) do {        \
-       bcm43xx_unlock_irqonly(bcm, flags);     \
-       bcm43xx_unlock_noirq(bcm);              \
-               } while (0)
-
-
 static inline
 struct bcm43xx_private * bcm43xx_priv(struct net_device *dev)
 {
@@ -863,34 +863,33 @@ int bcm43xx_using_pio(struct bcm43xx_private *bcm)
  * any of these functions.
  */
 static inline
+struct bcm43xx_coreinfo_80211 *
+bcm43xx_current_80211_priv(struct bcm43xx_private *bcm)
+{
+       assert(bcm->current_core->id == BCM43xx_COREID_80211);
+       return bcm->current_core->priv;
+}
+static inline
 struct bcm43xx_pio * bcm43xx_current_pio(struct bcm43xx_private *bcm)
 {
        assert(bcm43xx_using_pio(bcm));
-       assert(bcm->current_80211_core_idx >= 0);
-       assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
-       return &(bcm->core_80211_ext[bcm->current_80211_core_idx].pio);
+       return &(bcm43xx_current_80211_priv(bcm)->pio);
 }
 static inline
 struct bcm43xx_dma * bcm43xx_current_dma(struct bcm43xx_private *bcm)
 {
        assert(!bcm43xx_using_pio(bcm));
-       assert(bcm->current_80211_core_idx >= 0);
-       assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
-       return &(bcm->core_80211_ext[bcm->current_80211_core_idx].dma);
+       return &(bcm43xx_current_80211_priv(bcm)->dma);
 }
 static inline
 struct bcm43xx_phyinfo * bcm43xx_current_phy(struct bcm43xx_private *bcm)
 {
-       assert(bcm->current_80211_core_idx >= 0);
-       assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
-       return &(bcm->core_80211_ext[bcm->current_80211_core_idx].phy);
+       return &(bcm43xx_current_80211_priv(bcm)->phy);
 }
 static inline
 struct bcm43xx_radioinfo * bcm43xx_current_radio(struct bcm43xx_private *bcm)
 {
-       assert(bcm->current_80211_core_idx >= 0);
-       assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
-       return &(bcm->core_80211_ext[bcm->current_80211_core_idx].radio);
+       return &(bcm43xx_current_80211_priv(bcm)->radio);
 }
 
 
index ce2e40b..923275e 100644 (file)
@@ -77,7 +77,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
 
        down(&big_buffer_sem);
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
                fappend("Board not initialized.\n");
                goto out;
@@ -121,7 +122,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
        fappend("\n");
 
 out:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
        res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
        up(&big_buffer_sem);
        return res;
@@ -159,7 +161,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
        unsigned long flags;
 
        down(&big_buffer_sem);
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
                fappend("Board not initialized.\n");
                goto out;
@@ -169,7 +172,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
        fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags);
 
 out:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
        res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
        up(&big_buffer_sem);
        return res;
@@ -188,7 +192,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
        u64 tsf;
 
        down(&big_buffer_sem);
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
                fappend("Board not initialized.\n");
                goto out;
@@ -199,7 +204,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
                (unsigned int)(tsf & 0xFFFFFFFFULL));
 
 out:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
        res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
        up(&big_buffer_sem);
        return res;
@@ -221,7 +227,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
                res = -EFAULT;
                goto out_up;
        }
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
                printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
                res = -EFAULT;
@@ -237,7 +244,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
        res = buf_size;
        
 out_unlock:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 out_up:
        up(&big_buffer_sem);
        return res;
@@ -258,7 +266,8 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
        int i, cnt, j = 0;
 
        down(&big_buffer_sem);
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
 
        fappend("Last %d logged xmitstatus blobs (Latest first):\n\n",
                BCM43xx_NR_LOGGED_XMITSTATUS);
@@ -294,14 +303,51 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
                        i = BCM43xx_NR_LOGGED_XMITSTATUS - 1;
        }
 
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
        res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
-       bcm43xx_lock_irqsafe(bcm, flags);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (*ppos == pos) {
                /* Done. Drop the copied data. */
                e->xmitstatus_printing = 0;
        }
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
+       up(&big_buffer_sem);
+       return res;
+}
+
+static ssize_t restart_write_file(struct file *file, const char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       struct bcm43xx_private *bcm = file->private_data;
+       char *buf = really_big_buffer;
+       ssize_t buf_size;
+       ssize_t res;
+       unsigned long flags;
+
+       buf_size = min(count, sizeof (really_big_buffer) - 1);
+       down(&big_buffer_sem);
+       if (copy_from_user(buf, user_buf, buf_size)) {
+               res = -EFAULT;
+               goto out_up;
+       }
+       mutex_lock(&(bcm)->mutex);
+       spin_lock_irqsave(&(bcm)->irq_lock, flags);
+       if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
+               printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
+               res = -EFAULT;
+               goto out_unlock;
+       }
+       if (count > 0 && buf[0] == '1') {
+               bcm43xx_controller_restart(bcm, "manually restarted");
+               res = count;
+       } else
+               res = -EINVAL;
+
+out_unlock:
+       spin_unlock_irqrestore(&(bcm)->irq_lock, flags);
+       mutex_unlock(&(bcm)->mutex);
+out_up:
        up(&big_buffer_sem);
        return res;
 }
@@ -339,6 +385,11 @@ static struct file_operations txstat_fops = {
        .open = open_file_generic,
 };
 
+static struct file_operations restart_fops = {
+       .write = restart_write_file,
+       .open = open_file_generic,
+};
+
 
 void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm)
 {
@@ -390,6 +441,10 @@ void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm)
                                                bcm, &txstat_fops);
        if (!e->dentry_txstat)
                printk(KERN_ERR PFX "debugfs: creating \"tx_status\" for \"%s\" failed!\n", devdir);
+       e->dentry_restart = debugfs_create_file("restart", 0222, e->subdir,
+                                               bcm, &restart_fops);
+       if (!e->dentry_restart)
+               printk(KERN_ERR PFX "debugfs: creating \"restart\" for \"%s\" failed!\n", devdir);
 }
 
 void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm)
@@ -405,6 +460,7 @@ void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm)
        debugfs_remove(e->dentry_devinfo);
        debugfs_remove(e->dentry_tsf);
        debugfs_remove(e->dentry_txstat);
+       debugfs_remove(e->dentry_restart);
        debugfs_remove(e->subdir);
        kfree(e->xmitstatus_buffer);
        kfree(e->xmitstatus_print_buffer);
index 50ce267..a40d1af 100644 (file)
@@ -20,6 +20,7 @@ struct bcm43xx_dfsentry {
        struct dentry *dentry_spromdump;
        struct dentry *dentry_tsf;
        struct dentry *dentry_txstat;
+       struct dentry *dentry_restart;
 
        struct bcm43xx_private *bcm;
 
index d0318e5..76e3aed 100644 (file)
@@ -4,7 +4,7 @@
 
   DMA ringbuffer and descriptor allocation/management
 
-  Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
+  Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
 
   Some code in this file is derived from the b44.c driver
   Copyright (C) 2002 David S. Miller
@@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot)
        }
 }
 
+u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
+{
+       static const u16 map64[] = {
+               BCM43xx_MMIO_DMA64_BASE0,
+               BCM43xx_MMIO_DMA64_BASE1,
+               BCM43xx_MMIO_DMA64_BASE2,
+               BCM43xx_MMIO_DMA64_BASE3,
+               BCM43xx_MMIO_DMA64_BASE4,
+               BCM43xx_MMIO_DMA64_BASE5,
+       };
+       static const u16 map32[] = {
+               BCM43xx_MMIO_DMA32_BASE0,
+               BCM43xx_MMIO_DMA32_BASE1,
+               BCM43xx_MMIO_DMA32_BASE2,
+               BCM43xx_MMIO_DMA32_BASE3,
+               BCM43xx_MMIO_DMA32_BASE4,
+               BCM43xx_MMIO_DMA32_BASE5,
+       };
+
+       if (dma64bit) {
+               assert(controller_idx >= 0 &&
+                      controller_idx < ARRAY_SIZE(map64));
+               return map64[controller_idx];
+       }
+       assert(controller_idx >= 0 &&
+              controller_idx < ARRAY_SIZE(map32));
+       return map32[controller_idx];
+}
+
 static inline
 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
                          unsigned char *buf,
@@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
 /* Unmap and free a descriptor buffer. */
 static inline
 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
-                           struct bcm43xx_dmadesc *desc,
                            struct bcm43xx_dmadesc_meta *meta,
                            int irq_context)
 {
@@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
 {
        struct device *dev = &(ring->bcm->pci_dev->dev);
 
-       ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
-                                        &(ring->dmabase), GFP_KERNEL);
-       if (!ring->vbase) {
+       ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
+                                           &(ring->dmabase), GFP_KERNEL);
+       if (!ring->descbase) {
                printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
                return -ENOMEM;
        }
-       if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
-               printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RINGMEMORY >1G "
-                                   "(0x%llx, len: %lu)\n",
-                               (unsigned long long)ring->dmabase,
-                               BCM43xx_DMA_RINGMEMSIZE);
-               dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
-                                 ring->vbase, ring->dmabase);
-               return -ENOMEM;
-       }
-       assert(!(ring->dmabase & 0x000003FF));
-       memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
+       memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
 
        return 0;
 }
@@ -214,26 +232,34 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring)
        struct device *dev = &(ring->bcm->pci_dev->dev);
 
        dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
-                         ring->vbase, ring->dmabase);
+                         ring->descbase, ring->dmabase);
 }
 
 /* Reset the RX DMA channel */
 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
-                                  u16 mmio_base)
+                                  u16 mmio_base, int dma64)
 {
        int i;
        u32 value;
+       u16 offset;
 
-       bcm43xx_write32(bcm,
-                       mmio_base + BCM43xx_DMA_RX_CONTROL,
-                       0x00000000);
+       offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
+       bcm43xx_write32(bcm, mmio_base + offset, 0);
        for (i = 0; i < 1000; i++) {
-               value = bcm43xx_read32(bcm,
-                                      mmio_base + BCM43xx_DMA_RX_STATUS);
-               value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
-               if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
-                       i = -1;
-                       break;
+               offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
+               value = bcm43xx_read32(bcm, mmio_base + offset);
+               if (dma64) {
+                       value &= BCM43xx_DMA64_RXSTAT;
+                       if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
+                               i = -1;
+                               break;
+                       }
+               } else {
+                       value &= BCM43xx_DMA32_RXSTATE;
+                       if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
+                               i = -1;
+                               break;
+                       }
                }
                udelay(10);
        }
@@ -247,31 +273,47 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
 
 /* Reset the RX DMA channel */
 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
-                                  u16 mmio_base)
+                                  u16 mmio_base, int dma64)
 {
        int i;
        u32 value;
+       u16 offset;
 
        for (i = 0; i < 1000; i++) {
-               value = bcm43xx_read32(bcm,
-                                      mmio_base + BCM43xx_DMA_TX_STATUS);
-               value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
-               if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
-                   value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
-                   value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
-                       break;
+               offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
+               value = bcm43xx_read32(bcm, mmio_base + offset);
+               if (dma64) {
+                       value &= BCM43xx_DMA64_TXSTAT;
+                       if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
+                           value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
+                           value == BCM43xx_DMA64_TXSTAT_STOPPED)
+                               break;
+               } else {
+                       value &= BCM43xx_DMA32_TXSTATE;
+                       if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
+                           value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
+                           value == BCM43xx_DMA32_TXSTAT_STOPPED)
+                               break;
+               }
                udelay(10);
        }
-       bcm43xx_write32(bcm,
-                       mmio_base + BCM43xx_DMA_TX_CONTROL,
-                       0x00000000);
+       offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
+       bcm43xx_write32(bcm, mmio_base + offset, 0);
        for (i = 0; i < 1000; i++) {
-               value = bcm43xx_read32(bcm,
-                                      mmio_base + BCM43xx_DMA_TX_STATUS);
-               value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
-               if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
-                       i = -1;
-                       break;
+               offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
+               value = bcm43xx_read32(bcm, mmio_base + offset);
+               if (dma64) {
+                       value &= BCM43xx_DMA64_TXSTAT;
+                       if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
+                               i = -1;
+                               break;
+                       }
+               } else {
+                       value &= BCM43xx_DMA32_TXSTATE;
+                       if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
+                               i = -1;
+                               break;
+                       }
                }
                udelay(10);
        }
@@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
        return 0;
 }
 
+static void fill_descriptor(struct bcm43xx_dmaring *ring,
+                           struct bcm43xx_dmadesc_generic *desc,
+                           dma_addr_t dmaaddr,
+                           u16 bufsize,
+                           int start, int end, int irq)
+{
+       int slot;
+
+       slot = bcm43xx_dma_desc2idx(ring, desc);
+       assert(slot >= 0 && slot < ring->nr_slots);
+
+       if (ring->dma64) {
+               u32 ctl0 = 0, ctl1 = 0;
+               u32 addrlo, addrhi;
+               u32 addrext;
+
+               addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
+               addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
+               addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
+               addrhi |= ring->routing;
+               if (slot == ring->nr_slots - 1)
+                       ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
+               if (start)
+                       ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
+               if (end)
+                       ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
+               if (irq)
+                       ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
+               ctl1 |= (bufsize - ring->frameoffset)
+                       & BCM43xx_DMA64_DCTL1_BYTECNT;
+               ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
+                       & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
+
+               desc->dma64.control0 = cpu_to_le32(ctl0);
+               desc->dma64.control1 = cpu_to_le32(ctl1);
+               desc->dma64.address_low = cpu_to_le32(addrlo);
+               desc->dma64.address_high = cpu_to_le32(addrhi);
+       } else {
+               u32 ctl;
+               u32 addr;
+               u32 addrext;
+
+               addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
+               addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
+                          >> BCM43xx_DMA32_ROUTING_SHIFT;
+               addr |= ring->routing;
+               ctl = (bufsize - ring->frameoffset)
+                     & BCM43xx_DMA32_DCTL_BYTECNT;
+               if (slot == ring->nr_slots - 1)
+                       ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
+               if (start)
+                       ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
+               if (end)
+                       ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
+               if (irq)
+                       ctl |= BCM43xx_DMA32_DCTL_IRQ;
+               ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
+                      & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
+
+               desc->dma32.control = cpu_to_le32(ctl);
+               desc->dma32.address = cpu_to_le32(addr);
+       }
+}
+
 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
-                              struct bcm43xx_dmadesc *desc,
+                              struct bcm43xx_dmadesc_generic *desc,
                               struct bcm43xx_dmadesc_meta *meta,
                               gfp_t gfp_flags)
 {
        struct bcm43xx_rxhdr *rxhdr;
+       struct bcm43xx_hwxmitstatus *xmitstat;
        dma_addr_t dmaaddr;
-       u32 desc_addr;
-       u32 desc_ctl;
-       const int slot = (int)(desc - ring->vbase);
        struct sk_buff *skb;
 
-       assert(slot >= 0 && slot < ring->nr_slots);
        assert(!ring->tx);
 
        skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
        if (unlikely(!skb))
                return -ENOMEM;
        dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
-       if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
-               unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
-               dev_kfree_skb_any(skb);
-               printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA RX SKB >1G "
-                                   "(0x%llx, len: %u)\n",
-                       (unsigned long long)dmaaddr, ring->rx_buffersize);
-               return -ENOMEM;
-       }
        meta->skb = skb;
        meta->dmaaddr = dmaaddr;
        skb->dev = ring->bcm->net_dev;
-       desc_addr = (u32)(dmaaddr + ring->memoffset);
-       desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
-                   (u32)(ring->rx_buffersize - ring->frameoffset));
-       if (slot == ring->nr_slots - 1)
-               desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
-       set_desc_addr(desc, desc_addr);
-       set_desc_ctl(desc, desc_ctl);
+
+       fill_descriptor(ring, desc, dmaaddr,
+                       ring->rx_buffersize, 0, 0, 0);
 
        rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
        rxhdr->frame_length = 0;
        rxhdr->flags1 = 0;
+       xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
+       xmitstat->cookie = 0;
 
        return 0;
 }
@@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
 {
        int i, err = -ENOMEM;
-       struct bcm43xx_dmadesc *desc;
+       struct bcm43xx_dmadesc_generic *desc;
        struct bcm43xx_dmadesc_meta *meta;
 
        for (i = 0; i < ring->nr_slots; i++) {
-               desc = ring->vbase + i;
-               meta = ring->meta + i;
+               desc = bcm43xx_dma_idx2desc(ring, i, &meta);
 
                err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
                if (err)
                        goto err_unwind;
        }
+       mb();
        ring->used_slots = ring->nr_slots;
        err = 0;
 out:
@@ -354,8 +447,7 @@ out:
 
 err_unwind:
        for (i--; i >= 0; i--) {
-               desc = ring->vbase + i;
-               meta = ring->meta + i;
+               desc = bcm43xx_dma_idx2desc(ring, i, &meta);
 
                unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
                dev_kfree_skb(meta->skb);
@@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
 {
        int err = 0;
        u32 value;
+       u32 addrext;
 
        if (ring->tx) {
-               /* Set Transmit Control register to "transmit enable" */
-               bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
-                                 BCM43xx_DMA_TXCTRL_ENABLE);
-               /* Set Transmit Descriptor ring address. */
-               bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING,
-                                 ring->dmabase + ring->memoffset);
+               if (ring->dma64) {
+                       u64 ringbase = (u64)(ring->dmabase);
+
+                       addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
+                       value = BCM43xx_DMA64_TXENABLE;
+                       value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
+                               & BCM43xx_DMA64_TXADDREXT_MASK;
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
+                                       (ringbase & 0xFFFFFFFF));
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
+                                       ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
+                                       | ring->routing);
+               } else {
+                       u32 ringbase = (u32)(ring->dmabase);
+
+                       addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
+                       value = BCM43xx_DMA32_TXENABLE;
+                       value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
+                               & BCM43xx_DMA32_TXADDREXT_MASK;
+                       bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
+                                       (ringbase & ~BCM43xx_DMA32_ROUTING)
+                                       | ring->routing);
+               }
        } else {
                err = alloc_initial_descbuffers(ring);
                if (err)
                        goto out;
-               /* Set Receive Control "receive enable" and frame offset */
-               value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
-               value |= BCM43xx_DMA_RXCTRL_ENABLE;
-               bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value);
-               /* Set Receive Descriptor ring address. */
-               bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING,
-                                 ring->dmabase + ring->memoffset);
-               /* Init the descriptor pointer. */
-               bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200);
+               if (ring->dma64) {
+                       u64 ringbase = (u64)(ring->dmabase);
+
+                       addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
+                       value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
+                       value |= BCM43xx_DMA64_RXENABLE;
+                       value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
+                               & BCM43xx_DMA64_RXADDREXT_MASK;
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
+                                       (ringbase & 0xFFFFFFFF));
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
+                                       ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
+                                       | ring->routing);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
+               } else {
+                       u32 ringbase = (u32)(ring->dmabase);
+
+                       addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
+                       value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
+                       value |= BCM43xx_DMA32_RXENABLE;
+                       value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
+                               & BCM43xx_DMA32_RXADDREXT_MASK;
+                       bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
+                                       (ringbase & ~BCM43xx_DMA32_ROUTING)
+                                       | ring->routing);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
+               }
        }
 
 out:
@@ -402,27 +534,32 @@ out:
 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
 {
        if (ring->tx) {
-               bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
-               /* Zero out Transmit Descriptor ring address. */
-               bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0);
+               bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
+               if (ring->dma64) {
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
+               } else
+                       bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
        } else {
-               bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
-               /* Zero out Receive Descriptor ring address. */
-               bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0);
+               bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
+               if (ring->dma64) {
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
+                       bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
+               } else
+                       bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
        }
 }
 
 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
 {
-       struct bcm43xx_dmadesc *desc;
+       struct bcm43xx_dmadesc_generic *desc;
        struct bcm43xx_dmadesc_meta *meta;
        int i;
 
        if (!ring->used_slots)
                return;
        for (i = 0; i < ring->nr_slots; i++) {
-               desc = ring->vbase + i;
-               meta = ring->meta + i;
+               desc = bcm43xx_dma_idx2desc(ring, i, &meta);
 
                if (!meta->skb) {
                        assert(ring->tx);
@@ -430,62 +567,67 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
                }
                if (ring->tx) {
                        unmap_descbuffer(ring, meta->dmaaddr,
-                                        meta->skb->len, 1);
+                                       meta->skb->len, 1);
                } else {
                        unmap_descbuffer(ring, meta->dmaaddr,
-                                        ring->rx_buffersize, 0);
+                                       ring->rx_buffersize, 0);
                }
-               free_descriptor_buffer(ring, desc, meta, 0);
+               free_descriptor_buffer(ring, meta, 0);
        }
 }
 
 /* Main initialization function. */
 static
 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
-                                              u16 dma_controller_base,
-                                              int nr_descriptor_slots,
-                                              int tx)
+                                              int controller_index,
+                                              int for_tx,
+                                              int dma64)
 {
        struct bcm43xx_dmaring *ring;
        int err;
+       int nr_slots;
 
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (!ring)
                goto out;
 
-       ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
+       nr_slots = BCM43xx_RXRING_SLOTS;
+       if (for_tx)
+               nr_slots = BCM43xx_TXRING_SLOTS;
+
+       ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
                             GFP_KERNEL);
        if (!ring->meta)
                goto err_kfree_ring;
 
-       ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
+       ring->routing = BCM43xx_DMA32_CLIENTTRANS;
+       if (dma64)
+               ring->routing = BCM43xx_DMA64_CLIENTTRANS;
 #ifdef CONFIG_BCM947XX
        if (bcm->pci_dev->bus->number == 0)
-               ring->memoffset = 0;
+               ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
 #endif
 
        ring->bcm = bcm;
-       ring->nr_slots = nr_descriptor_slots;
+       ring->nr_slots = nr_slots;
        ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
        ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
        assert(ring->suspend_mark < ring->resume_mark);
-       ring->mmio_base = dma_controller_base;
-       if (tx) {
+       ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
+       ring->index = controller_index;
+       ring->dma64 = !!dma64;
+       if (for_tx) {
                ring->tx = 1;
                ring->current_slot = -1;
        } else {
-               switch (dma_controller_base) {
-               case BCM43xx_MMIO_DMA1_BASE:
-                       ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
-                       ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
-                       break;
-               case BCM43xx_MMIO_DMA4_BASE:
-                       ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
-                       ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
-                       break;
-               default:
+               if (ring->index == 0) {
+                       ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
+                       ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
+               } else if (ring->index == 3) {
+                       ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
+                       ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
+               } else
                        assert(0);
-               }
        }
 
        err = alloc_ringmemory(ring);
@@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
        if (!ring)
                return;
 
-       dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
+       dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
+               (ring->dma64) ? "64" : "32",
                ring->mmio_base,
                (ring->tx) ? "TX" : "RX",
                ring->max_used_slots, ring->nr_slots);
@@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
                return;
        dma = bcm43xx_current_dma(bcm);
 
-       bcm43xx_destroy_dmaring(dma->rx_ring1);
-       dma->rx_ring1 = NULL;
+       bcm43xx_destroy_dmaring(dma->rx_ring3);
+       dma->rx_ring3 = NULL;
        bcm43xx_destroy_dmaring(dma->rx_ring0);
        dma->rx_ring0 = NULL;
+
+       bcm43xx_destroy_dmaring(dma->tx_ring5);
+       dma->tx_ring5 = NULL;
+       bcm43xx_destroy_dmaring(dma->tx_ring4);
+       dma->tx_ring4 = NULL;
        bcm43xx_destroy_dmaring(dma->tx_ring3);
        dma->tx_ring3 = NULL;
        bcm43xx_destroy_dmaring(dma->tx_ring2);
@@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
        struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
        struct bcm43xx_dmaring *ring;
        int err = -ENOMEM;
+       int dma64 = 0;
+       u32 sbtmstatehi;
+
+       sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
+       if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
+               dma64 = 1;
 
        /* setup TX DMA channels. */
-       ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
-                                    BCM43xx_TXRING_SLOTS, 1);
+       ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
        if (!ring)
                goto out;
        dma->tx_ring0 = ring;
 
-       ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
-                                    BCM43xx_TXRING_SLOTS, 1);
+       ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
        if (!ring)
                goto err_destroy_tx0;
        dma->tx_ring1 = ring;
 
-       ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
-                                    BCM43xx_TXRING_SLOTS, 1);
+       ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
        if (!ring)
                goto err_destroy_tx1;
        dma->tx_ring2 = ring;
 
-       ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
-                                    BCM43xx_TXRING_SLOTS, 1);
+       ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
        if (!ring)
                goto err_destroy_tx2;
        dma->tx_ring3 = ring;
 
-       /* setup RX DMA channels. */
-       ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
-                                    BCM43xx_RXRING_SLOTS, 0);
+       ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
        if (!ring)
                goto err_destroy_tx3;
+       dma->tx_ring4 = ring;
+
+       ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
+       if (!ring)
+               goto err_destroy_tx4;
+       dma->tx_ring5 = ring;
+
+       /* setup RX DMA channels. */
+       ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
+       if (!ring)
+               goto err_destroy_tx5;
        dma->rx_ring0 = ring;
 
        if (bcm->current_core->rev < 5) {
-               ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
-                                            BCM43xx_RXRING_SLOTS, 0);
+               ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
                if (!ring)
                        goto err_destroy_rx0;
-               dma->rx_ring1 = ring;
+               dma->rx_ring3 = ring;
        }
 
-       dprintk(KERN_INFO PFX "DMA initialized\n");
+       dprintk(KERN_INFO PFX "%s DMA initialized\n",
+                       dma64 ? "64-bit" : "32-bit");
        err = 0;
 out:
        return err;
@@ -605,6 +764,12 @@ out:
 err_destroy_rx0:
        bcm43xx_destroy_dmaring(dma->rx_ring0);
        dma->rx_ring0 = NULL;
+err_destroy_tx5:
+       bcm43xx_destroy_dmaring(dma->tx_ring5);
+       dma->tx_ring5 = NULL;
+err_destroy_tx4:
+       bcm43xx_destroy_dmaring(dma->tx_ring4);
+       dma->tx_ring4 = NULL;
 err_destroy_tx3:
        bcm43xx_destroy_dmaring(dma->tx_ring3);
        dma->tx_ring3 = NULL;
@@ -624,7 +789,7 @@ err_destroy_tx0:
 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
                           int slot)
 {
-       u16 cookie = 0xF000;
+       u16 cookie = 0x1000;
 
        /* Use the upper 4 bits of the cookie as
         * DMA controller ID and store the slot number
@@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring,
         * Note that the cookie must never be 0, as this
         * is a special value used in RX path.
         */
-       switch (ring->mmio_base) {
-       default:
-               assert(0);
-       case BCM43xx_MMIO_DMA1_BASE:
+       switch (ring->index) {
+       case 0:
                cookie = 0xA000;
                break;
-       case BCM43xx_MMIO_DMA2_BASE:
+       case 1:
                cookie = 0xB000;
                break;
-       case BCM43xx_MMIO_DMA3_BASE:
+       case 2:
                cookie = 0xC000;
                break;
-       case BCM43xx_MMIO_DMA4_BASE:
+       case 3:
                cookie = 0xD000;
                break;
+       case 4:
+               cookie = 0xE000;
+               break;
+       case 5:
+               cookie = 0xF000;
+               break;
        }
        assert(((u16)slot & 0xF000) == 0x0000);
        cookie |= (u16)slot;
@@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
        case 0xD000:
                ring = dma->tx_ring3;
                break;
+       case 0xE000:
+               ring = dma->tx_ring4;
+               break;
+       case 0xF000:
+               ring = dma->tx_ring5;
+               break;
        default:
                assert(0);
        }
@@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
                                  int slot)
 {
+       u16 offset;
+       int descsize;
+
        /* Everything is ready to start. Buffers are DMA mapped and
         * associated with slots.
         * "slot" is the last slot of the new frame we want to transmit.
@@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
         */
        wmb();
        slot = next_slot(ring, slot);
-       bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX,
-                         (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
+       offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
+       descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
+               : sizeof(struct bcm43xx_dmadesc32);
+       bcm43xx_dma_write(ring, offset,
+                       (u32)(slot * descsize));
 }
 
-static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
-                          struct sk_buff *skb,
-                          u8 cur_frag)
+static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
+                           struct sk_buff *skb,
+                           u8 cur_frag)
 {
        int slot;
-       struct bcm43xx_dmadesc *desc;
+       struct bcm43xx_dmadesc_generic *desc;
        struct bcm43xx_dmadesc_meta *meta;
-       u32 desc_ctl;
-       u32 desc_addr;
+       dma_addr_t dmaaddr;
 
        assert(skb_shinfo(skb)->nr_frags == 0);
 
        slot = request_slot(ring);
-       desc = ring->vbase + slot;
-       meta = ring->meta + slot;
+       desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
 
        /* Add a device specific TX header. */
        assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
@@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
                               generate_cookie(ring, slot));
 
        meta->skb = skb;
-       meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
-       if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
-               return_slot(ring, slot);
-               printk(KERN_ERR PFX ">>>FATAL ERROR<<<  DMA TX SKB >1G "
-                                   "(0x%llx, len: %u)\n",
-                       (unsigned long long)meta->dmaaddr, skb->len);
-               return -ENOMEM;
-       }
+       dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+       meta->dmaaddr = dmaaddr;
 
-       desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
-       desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
-       desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
-       desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
-                    (u32)(meta->skb->len - ring->frameoffset));
-       if (slot == ring->nr_slots - 1)
-               desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
+       fill_descriptor(ring, desc, dmaaddr,
+                       skb->len, 1, 1, 1);
 
-       set_desc_ctl(desc, desc_ctl);
-       set_desc_addr(desc, desc_addr);
        /* Now transfer the whole frame. */
        dmacontroller_poke_tx(ring, slot);
-
-       return 0;
 }
 
 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
@@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
                /* Take skb from ieee80211_txb_free */
                txb->fragments[i] = NULL;
                dma_tx_fragment(ring, skb, i);
-               //TODO: handle failure of dma_tx_fragment
        }
        ieee80211_txb_free(txb);
 
@@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
                                   struct bcm43xx_xmitstatus *status)
 {
        struct bcm43xx_dmaring *ring;
-       struct bcm43xx_dmadesc *desc;
+       struct bcm43xx_dmadesc_generic *desc;
        struct bcm43xx_dmadesc_meta *meta;
        int is_last_fragment;
        int slot;
+       u32 tmp;
 
        ring = parse_cookie(bcm, status->cookie, &slot);
        assert(ring);
        assert(ring->tx);
-       assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
        while (1) {
                assert(slot >= 0 && slot < ring->nr_slots);
-               desc = ring->vbase + slot;
-               meta = ring->meta + slot;
+               desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
 
-               is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
+               if (ring->dma64) {
+                       tmp = le32_to_cpu(desc->dma64.control0);
+                       is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
+               } else {
+                       tmp = le32_to_cpu(desc->dma32.control);
+                       is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
+               }
                unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
-               free_descriptor_buffer(ring, desc, meta, 1);
+               free_descriptor_buffer(ring, meta, 1);
                /* Everything belonging to the slot is unmapped
                 * and freed, so we can return it.
                 */
@@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
 static void dma_rx(struct bcm43xx_dmaring *ring,
                   int *slot)
 {
-       struct bcm43xx_dmadesc *desc;
+       struct bcm43xx_dmadesc_generic *desc;
        struct bcm43xx_dmadesc_meta *meta;
        struct bcm43xx_rxhdr *rxhdr;
        struct sk_buff *skb;
@@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
        int err;
        dma_addr_t dmaaddr;
 
-       desc = ring->vbase + *slot;
-       meta = ring->meta + *slot;
+       desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
 
        sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
        skb = meta->skb;
 
-       if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
+       if (ring->index == 3) {
                /* We received an xmit status. */
                struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
                struct bcm43xx_xmitstatus stat;
@@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
                s32 tmp = len;
 
                while (1) {
-                       desc = ring->vbase + *slot;
-                       meta = ring->meta + *slot;
+                       desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
                        /* recycle the descriptor buffer. */
                        sync_descbuffer_for_device(ring, meta->dmaaddr,
                                                   ring->rx_buffersize);
@@ -906,8 +1072,8 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
                                break;
                }
                printkl(KERN_ERR PFX "DMA RX buffer too small "
-                                    "(len: %u, buffer: %u, nr-dropped: %d)\n",
-                       len, ring->rx_buffersize, cnt);
+                       "(len: %u, buffer: %u, nr-dropped: %d)\n",
+                       len, ring->rx_buffersize, cnt);
                goto drop;
        }
        len -= IEEE80211_FCS_LEN;
@@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
 #endif
 
        assert(!ring->tx);
-       status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS);
-       descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
-       current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
+       if (ring->dma64) {
+               status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
+               descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
+               current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
+       } else {
+               status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
+               descptr = (status & BCM43xx_DMA32_RXDPTR);
+               current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
+       }
        assert(current_slot >= 0 && current_slot < ring->nr_slots);
 
        slot = ring->current_slot;
@@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
                        ring->max_used_slots = used_slots;
 #endif
        }
-       bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX,
-                         (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
+       if (ring->dma64) {
+               bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
+                               (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
+       } else {
+               bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
+                               (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
+       }
        ring->current_slot = slot;
 }
 
@@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
 {
        assert(ring->tx);
        bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
-       bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
-                         bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
-                         | BCM43xx_DMA_TXCTRL_SUSPEND);
+       if (ring->dma64) {
+               bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
+                               bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
+                               | BCM43xx_DMA64_TXSUSPEND);
+       } else {
+               bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
+                               bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
+                               | BCM43xx_DMA32_TXSUSPEND);
+       }
 }
 
 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
 {
        assert(ring->tx);
-       bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
-                         bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
-                         & ~BCM43xx_DMA_TXCTRL_SUSPEND);
+       if (ring->dma64) {
+               bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
+                               bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
+                               & ~BCM43xx_DMA64_TXSUSPEND);
+       } else {
+               bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
+                               bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
+                               & ~BCM43xx_DMA32_TXSUSPEND);
+       }
        bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
 }
index b7d7763..e04bcad 100644 (file)
 #define BCM43xx_DMAIRQ_NONFATALMASK    (1 << 13)
 #define BCM43xx_DMAIRQ_RX_DONE         (1 << 16)
 
-/* DMA controller register offsets. (relative to BCM43xx_DMA#_BASE) */
-#define BCM43xx_DMA_TX_CONTROL         0x00
-#define BCM43xx_DMA_TX_DESC_RING       0x04
-#define BCM43xx_DMA_TX_DESC_INDEX      0x08
-#define BCM43xx_DMA_TX_STATUS          0x0c
-#define BCM43xx_DMA_RX_CONTROL         0x10
-#define BCM43xx_DMA_RX_DESC_RING       0x14
-#define BCM43xx_DMA_RX_DESC_INDEX      0x18
-#define BCM43xx_DMA_RX_STATUS          0x1c
-
-/* DMA controller channel control word values. */
-#define BCM43xx_DMA_TXCTRL_ENABLE              (1 << 0)
-#define BCM43xx_DMA_TXCTRL_SUSPEND             (1 << 1)
-#define BCM43xx_DMA_TXCTRL_LOOPBACK            (1 << 2)
-#define BCM43xx_DMA_TXCTRL_FLUSH               (1 << 4)
-#define BCM43xx_DMA_RXCTRL_ENABLE              (1 << 0)
-#define BCM43xx_DMA_RXCTRL_FRAMEOFF_MASK       0x000000fe
-#define BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT      1
-#define BCM43xx_DMA_RXCTRL_PIO                 (1 << 8)
-/* DMA controller channel status word values. */
-#define BCM43xx_DMA_TXSTAT_DPTR_MASK           0x00000fff
-#define BCM43xx_DMA_TXSTAT_STAT_MASK           0x0000f000
-#define BCM43xx_DMA_TXSTAT_STAT_DISABLED       0x00000000
-#define BCM43xx_DMA_TXSTAT_STAT_ACTIVE         0x00001000
-#define BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT       0x00002000
-#define BCM43xx_DMA_TXSTAT_STAT_STOPPED                0x00003000
-#define BCM43xx_DMA_TXSTAT_STAT_SUSP           0x00004000
-#define BCM43xx_DMA_TXSTAT_ERROR_MASK          0x000f0000
-#define BCM43xx_DMA_TXSTAT_FLUSHED             (1 << 20)
-#define BCM43xx_DMA_RXSTAT_DPTR_MASK           0x00000fff
-#define BCM43xx_DMA_RXSTAT_STAT_MASK           0x0000f000
-#define BCM43xx_DMA_RXSTAT_STAT_DISABLED       0x00000000
-#define BCM43xx_DMA_RXSTAT_STAT_ACTIVE         0x00001000
-#define BCM43xx_DMA_RXSTAT_STAT_IDLEWAIT       0x00002000
-#define BCM43xx_DMA_RXSTAT_STAT_RESERVED       0x00003000
-#define BCM43xx_DMA_RXSTAT_STAT_ERRORS         0x00004000
-#define BCM43xx_DMA_RXSTAT_ERROR_MASK          0x000f0000
-
-/* DMA descriptor control field values. */
-#define BCM43xx_DMADTOR_BYTECNT_MASK           0x00001fff
-#define BCM43xx_DMADTOR_DTABLEEND              (1 << 28) /* End of descriptor table */
-#define BCM43xx_DMADTOR_COMPIRQ                        (1 << 29) /* IRQ on completion request */
-#define BCM43xx_DMADTOR_FRAMEEND               (1 << 30)
-#define BCM43xx_DMADTOR_FRAMESTART             (1 << 31)
+
+/*** 32-bit DMA Engine. ***/
+
+/* 32-bit DMA controller registers. */
+#define BCM43xx_DMA32_TXCTL                            0x00
+#define                BCM43xx_DMA32_TXENABLE                  0x00000001
+#define                BCM43xx_DMA32_TXSUSPEND                 0x00000002
+#define                BCM43xx_DMA32_TXLOOPBACK                0x00000004
+#define                BCM43xx_DMA32_TXFLUSH                   0x00000010
+#define                BCM43xx_DMA32_TXADDREXT_MASK            0x00030000
+#define                BCM43xx_DMA32_TXADDREXT_SHIFT           16
+#define BCM43xx_DMA32_TXRING                           0x04
+#define BCM43xx_DMA32_TXINDEX                          0x08
+#define BCM43xx_DMA32_TXSTATUS                         0x0C
+#define                BCM43xx_DMA32_TXDPTR                    0x00000FFF
+#define                BCM43xx_DMA32_TXSTATE                   0x0000F000
+#define                        BCM43xx_DMA32_TXSTAT_DISABLED   0x00000000
+#define                        BCM43xx_DMA32_TXSTAT_ACTIVE     0x00001000
+#define                        BCM43xx_DMA32_TXSTAT_IDLEWAIT   0x00002000
+#define                        BCM43xx_DMA32_TXSTAT_STOPPED    0x00003000
+#define                        BCM43xx_DMA32_TXSTAT_SUSP       0x00004000
+#define                BCM43xx_DMA32_TXERROR                   0x000F0000
+#define                        BCM43xx_DMA32_TXERR_NOERR       0x00000000
+#define                        BCM43xx_DMA32_TXERR_PROT        0x00010000
+#define                        BCM43xx_DMA32_TXERR_UNDERRUN    0x00020000
+#define                        BCM43xx_DMA32_TXERR_BUFREAD     0x00030000
+#define                        BCM43xx_DMA32_TXERR_DESCREAD    0x00040000
+#define                BCM43xx_DMA32_TXACTIVE                  0xFFF00000
+#define BCM43xx_DMA32_RXCTL                            0x10
+#define                BCM43xx_DMA32_RXENABLE                  0x00000001
+#define                BCM43xx_DMA32_RXFROFF_MASK              0x000000FE
+#define                BCM43xx_DMA32_RXFROFF_SHIFT             1
+#define                BCM43xx_DMA32_RXDIRECTFIFO              0x00000100
+#define                BCM43xx_DMA32_RXADDREXT_MASK            0x00030000
+#define                BCM43xx_DMA32_RXADDREXT_SHIFT           16
+#define BCM43xx_DMA32_RXRING                           0x14
+#define BCM43xx_DMA32_RXINDEX                          0x18
+#define BCM43xx_DMA32_RXSTATUS                         0x1C
+#define                BCM43xx_DMA32_RXDPTR                    0x00000FFF
+#define                BCM43xx_DMA32_RXSTATE                   0x0000F000
+#define                        BCM43xx_DMA32_RXSTAT_DISABLED   0x00000000
+#define                        BCM43xx_DMA32_RXSTAT_ACTIVE     0x00001000
+#define                        BCM43xx_DMA32_RXSTAT_IDLEWAIT   0x00002000
+#define                        BCM43xx_DMA32_RXSTAT_STOPPED    0x00003000
+#define                BCM43xx_DMA32_RXERROR                   0x000F0000
+#define                        BCM43xx_DMA32_RXERR_NOERR       0x00000000
+#define                        BCM43xx_DMA32_RXERR_PROT        0x00010000
+#define                        BCM43xx_DMA32_RXERR_OVERFLOW    0x00020000
+#define                        BCM43xx_DMA32_RXERR_BUFWRITE    0x00030000
+#define                        BCM43xx_DMA32_RXERR_DESCREAD    0x00040000
+#define                BCM43xx_DMA32_RXACTIVE                  0xFFF00000
+
+/* 32-bit DMA descriptor. */
+struct bcm43xx_dmadesc32 {
+       __le32 control;
+       __le32 address;
+} __attribute__((__packed__));
+#define BCM43xx_DMA32_DCTL_BYTECNT             0x00001FFF
+#define BCM43xx_DMA32_DCTL_ADDREXT_MASK                0x00030000
+#define BCM43xx_DMA32_DCTL_ADDREXT_SHIFT       16
+#define BCM43xx_DMA32_DCTL_DTABLEEND           0x10000000
+#define BCM43xx_DMA32_DCTL_IRQ                 0x20000000
+#define BCM43xx_DMA32_DCTL_FRAMEEND            0x40000000
+#define BCM43xx_DMA32_DCTL_FRAMESTART          0x80000000
+
+/* Address field Routing value. */
+#define BCM43xx_DMA32_ROUTING                  0xC0000000
+#define BCM43xx_DMA32_ROUTING_SHIFT            30
+#define                BCM43xx_DMA32_NOTRANS           0x00000000
+#define                BCM43xx_DMA32_CLIENTTRANS       0x40000000
+
+
+
+/*** 64-bit DMA Engine. ***/
+
+/* 64-bit DMA controller registers. */
+#define BCM43xx_DMA64_TXCTL                            0x00
+#define                BCM43xx_DMA64_TXENABLE                  0x00000001
+#define                BCM43xx_DMA64_TXSUSPEND                 0x00000002
+#define                BCM43xx_DMA64_TXLOOPBACK                0x00000004
+#define                BCM43xx_DMA64_TXFLUSH                   0x00000010
+#define                BCM43xx_DMA64_TXADDREXT_MASK            0x00030000
+#define                BCM43xx_DMA64_TXADDREXT_SHIFT           16
+#define BCM43xx_DMA64_TXINDEX                          0x04
+#define BCM43xx_DMA64_TXRINGLO                         0x08
+#define BCM43xx_DMA64_TXRINGHI                         0x0C
+#define BCM43xx_DMA64_TXSTATUS                         0x10
+#define                BCM43xx_DMA64_TXSTATDPTR                0x00001FFF
+#define                BCM43xx_DMA64_TXSTAT                    0xF0000000
+#define                        BCM43xx_DMA64_TXSTAT_DISABLED   0x00000000
+#define                        BCM43xx_DMA64_TXSTAT_ACTIVE     0x10000000
+#define                        BCM43xx_DMA64_TXSTAT_IDLEWAIT   0x20000000
+#define                        BCM43xx_DMA64_TXSTAT_STOPPED    0x30000000
+#define                        BCM43xx_DMA64_TXSTAT_SUSP       0x40000000
+#define BCM43xx_DMA64_TXERROR                          0x14
+#define                BCM43xx_DMA64_TXERRDPTR                 0x0001FFFF
+#define                BCM43xx_DMA64_TXERR                     0xF0000000
+#define                        BCM43xx_DMA64_TXERR_NOERR       0x00000000
+#define                        BCM43xx_DMA64_TXERR_PROT        0x10000000
+#define                        BCM43xx_DMA64_TXERR_UNDERRUN    0x20000000
+#define                        BCM43xx_DMA64_TXERR_TRANSFER    0x30000000
+#define                        BCM43xx_DMA64_TXERR_DESCREAD    0x40000000
+#define                        BCM43xx_DMA64_TXERR_CORE        0x50000000
+#define BCM43xx_DMA64_RXCTL                            0x20
+#define                BCM43xx_DMA64_RXENABLE                  0x00000001
+#define                BCM43xx_DMA64_RXFROFF_MASK              0x000000FE
+#define                BCM43xx_DMA64_RXFROFF_SHIFT             1
+#define                BCM43xx_DMA64_RXDIRECTFIFO              0x00000100
+#define                BCM43xx_DMA64_RXADDREXT_MASK            0x00030000
+#define                BCM43xx_DMA64_RXADDREXT_SHIFT           16
+#define BCM43xx_DMA64_RXINDEX                          0x24
+#define BCM43xx_DMA64_RXRINGLO                         0x28
+#define BCM43xx_DMA64_RXRINGHI                         0x2C
+#define BCM43xx_DMA64_RXSTATUS                         0x30
+#define                BCM43xx_DMA64_RXSTATDPTR                0x00001FFF
+#define                BCM43xx_DMA64_RXSTAT                    0xF0000000
+#define                        BCM43xx_DMA64_RXSTAT_DISABLED   0x00000000
+#define                        BCM43xx_DMA64_RXSTAT_ACTIVE     0x10000000
+#define                        BCM43xx_DMA64_RXSTAT_IDLEWAIT   0x20000000
+#define                        BCM43xx_DMA64_RXSTAT_STOPPED    0x30000000
+#define                        BCM43xx_DMA64_RXSTAT_SUSP       0x40000000
+#define BCM43xx_DMA64_RXERROR                          0x34
+#define                BCM43xx_DMA64_RXERRDPTR                 0x0001FFFF
+#define                BCM43xx_DMA64_RXERR                     0xF0000000
+#define                        BCM43xx_DMA64_RXERR_NOERR       0x00000000
+#define                        BCM43xx_DMA64_RXERR_PROT        0x10000000
+#define                        BCM43xx_DMA64_RXERR_UNDERRUN    0x20000000
+#define                        BCM43xx_DMA64_RXERR_TRANSFER    0x30000000
+#define                        BCM43xx_DMA64_RXERR_DESCREAD    0x40000000
+#define                        BCM43xx_DMA64_RXERR_CORE        0x50000000
+
+/* 64-bit DMA descriptor. */
+struct bcm43xx_dmadesc64 {
+       __le32 control0;
+       __le32 control1;
+       __le32 address_low;
+       __le32 address_high;
+} __attribute__((__packed__));
+#define BCM43xx_DMA64_DCTL0_DTABLEEND          0x10000000
+#define BCM43xx_DMA64_DCTL0_IRQ                        0x20000000
+#define BCM43xx_DMA64_DCTL0_FRAMEEND           0x40000000
+#define BCM43xx_DMA64_DCTL0_FRAMESTART         0x80000000
+#define BCM43xx_DMA64_DCTL1_BYTECNT            0x00001FFF
+#define BCM43xx_DMA64_DCTL1_ADDREXT_MASK       0x00030000
+#define BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT      16
+
+/* Address field Routing value. */
+#define BCM43xx_DMA64_ROUTING                  0xC0000000
+#define BCM43xx_DMA64_ROUTING_SHIFT            30
+#define                BCM43xx_DMA64_NOTRANS           0x00000000
+#define                BCM43xx_DMA64_CLIENTTRANS       0x80000000
+
+
+
+struct bcm43xx_dmadesc_generic {
+       union {
+               struct bcm43xx_dmadesc32 dma32;
+               struct bcm43xx_dmadesc64 dma64;
+       } __attribute__((__packed__));
+} __attribute__((__packed__));
+
 
 /* Misc DMA constants */
 #define BCM43xx_DMA_RINGMEMSIZE                PAGE_SIZE
-#define BCM43xx_DMA_BUSADDRMAX         0x3FFFFFFF
-#define BCM43xx_DMA_DMABUSADDROFFSET   (1 << 30)
-#define BCM43xx_DMA1_RX_FRAMEOFFSET    30
-#define BCM43xx_DMA4_RX_FRAMEOFFSET    0
+#define BCM43xx_DMA0_RX_FRAMEOFFSET    30
+#define BCM43xx_DMA3_RX_FRAMEOFFSET    0
+
 
 /* DMA engine tuning knobs */
 #define BCM43xx_TXRING_SLOTS           512
 #define BCM43xx_RXRING_SLOTS           64
-#define BCM43xx_DMA1_RXBUFFERSIZE      (2304 + 100)
-#define BCM43xx_DMA4_RXBUFFERSIZE      16
+#define BCM43xx_DMA0_RX_BUFFERSIZE     (2304 + 100)
+#define BCM43xx_DMA3_RX_BUFFERSIZE     16
 /* Suspend the tx queue, if less than this percent slots are free. */
 #define BCM43xx_TXSUSPEND_PERCENT      20
 /* Resume the tx queue, if more than this percent slots are free. */
@@ -86,17 +202,6 @@ struct bcm43xx_private;
 struct bcm43xx_xmitstatus;
 
 
-struct bcm43xx_dmadesc {
-       __le32 _control;
-       __le32 _address;
-} __attribute__((__packed__));
-
-/* Macros to access the bcm43xx_dmadesc struct */
-#define get_desc_ctl(desc)             le32_to_cpu((desc)->_control)
-#define set_desc_ctl(desc, ctl)                do { (desc)->_control = cpu_to_le32(ctl); } while (0)
-#define get_desc_addr(desc)            le32_to_cpu((desc)->_address)
-#define set_desc_addr(desc, addr)      do { (desc)->_address = cpu_to_le32(addr); } while (0)
-
 struct bcm43xx_dmadesc_meta {
        /* The kernel DMA-able buffer. */
        struct sk_buff *skb;
@@ -105,15 +210,14 @@ struct bcm43xx_dmadesc_meta {
 };
 
 struct bcm43xx_dmaring {
-       struct bcm43xx_private *bcm;
        /* Kernel virtual base address of the ring memory. */
-       struct bcm43xx_dmadesc *vbase;
-       /* DMA memory offset */
-       dma_addr_t memoffset;
-       /* (Unadjusted) DMA base bus-address of the ring memory. */
-       dma_addr_t dmabase;
+       void *descbase;
        /* Meta data about all descriptors. */
        struct bcm43xx_dmadesc_meta *meta;
+       /* DMA Routing value. */
+       u32 routing;
+       /* (Unadjusted) DMA base bus-address of the ring memory. */
+       dma_addr_t dmabase;
        /* Number of descriptor slots in the ring. */
        int nr_slots;
        /* Number of used descriptor slots. */
@@ -127,12 +231,17 @@ struct bcm43xx_dmaring {
        u32 frameoffset;
        /* Descriptor buffer size. */
        u16 rx_buffersize;
-       /* The MMIO base register of the DMA controller, this
-        * ring is posted to.
-        */
+       /* The MMIO base register of the DMA controller. */
        u16 mmio_base;
-       u8 tx:1,        /* TRUE, if this is a TX ring. */
-          suspended:1; /* TRUE, if transfers are suspended on this ring. */
+       /* DMA controller index number (0-5). */
+       int index;
+       /* Boolean. Is this a TX ring? */
+       u8 tx;
+       /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
+       u8 dma64;
+       /* Boolean. Are transfers suspended on this ring? */
+       u8 suspended;
+       struct bcm43xx_private *bcm;
 #ifdef CONFIG_BCM43XX_DEBUG
        /* Maximum number of used slots. */
        int max_used_slots;
@@ -140,6 +249,34 @@ struct bcm43xx_dmaring {
 };
 
 
+static inline
+int bcm43xx_dma_desc2idx(struct bcm43xx_dmaring *ring,
+                        struct bcm43xx_dmadesc_generic *desc)
+{
+       if (ring->dma64) {
+               struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
+               return (int)(&(desc->dma64) - dd64);
+       } else {
+               struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
+               return (int)(&(desc->dma32) - dd32);
+       }
+}
+
+static inline
+struct bcm43xx_dmadesc_generic * bcm43xx_dma_idx2desc(struct bcm43xx_dmaring *ring,
+                                                     int slot,
+                                                     struct bcm43xx_dmadesc_meta **meta)
+{
+       *meta = &(ring->meta[slot]);
+       if (ring->dma64) {
+               struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
+               return (struct bcm43xx_dmadesc_generic *)(&(dd64[slot]));
+       } else {
+               struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
+               return (struct bcm43xx_dmadesc_generic *)(&(dd32[slot]));
+       }
+}
+
 static inline
 u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring,
                     u16 offset)
@@ -159,9 +296,13 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm);
 void bcm43xx_dma_free(struct bcm43xx_private *bcm);
 
 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
-                                  u16 dmacontroller_mmio_base);
+                                  u16 dmacontroller_mmio_base,
+                                  int dma64);
 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
-                                  u16 dmacontroller_mmio_base);
+                                  u16 dmacontroller_mmio_base,
+                                  int dma64);
+
+u16 bcm43xx_dmacontroller_base(int dma64bit, int dmacontroller_idx);
 
 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring);
 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring);
@@ -173,7 +314,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
                   struct ieee80211_txb *txb);
 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
 
-
 #else /* CONFIG_BCM43XX_DMA */
 
 
@@ -188,13 +328,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
 }
 static inline
 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
-                                  u16 dmacontroller_mmio_base)
+                                  u16 dmacontroller_mmio_base,
+                                  int dma64)
 {
        return 0;
 }
 static inline
 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
-                                  u16 dmacontroller_mmio_base)
+                                  u16 dmacontroller_mmio_base,
+                                  int dma64)
 {
        return 0;
 }
index ec80692..c3f90c8 100644 (file)
@@ -51,12 +51,12 @@ static void bcm43xx_led_blink(unsigned long d)
        struct bcm43xx_private *bcm = led->bcm;
        unsigned long flags;
 
-       bcm43xx_lock_irqonly(bcm, flags);
+       spin_lock_irqsave(&bcm->leds_lock, flags);
        if (led->blink_interval) {
                bcm43xx_led_changestate(led);
                mod_timer(&led->blink_timer, jiffies + led->blink_interval);
        }
-       bcm43xx_unlock_irqonly(bcm, flags);
+       spin_unlock_irqrestore(&bcm->leds_lock, flags);
 }
 
 static void bcm43xx_led_blink_start(struct bcm43xx_led *led,
@@ -177,7 +177,9 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
        int i, turn_on;
        unsigned long interval = 0;
        u16 ledctl;
+       unsigned long flags;
 
+       spin_lock_irqsave(&bcm->leds_lock, flags);
        ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
        for (i = 0; i < BCM43xx_NR_LEDS; i++) {
                led = &(bcm->leds[i]);
@@ -266,6 +268,7 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
                        ledctl &= ~(1 << i);
        }
        bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
+       spin_unlock_irqrestore(&bcm->leds_lock, flags);
 }
 
 void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
@@ -274,7 +277,9 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
        u16 ledctl;
        int i;
        int bit_on;
+       unsigned long flags;
 
+       spin_lock_irqsave(&bcm->leds_lock, flags);
        ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
        for (i = 0; i < BCM43xx_NR_LEDS; i++) {
                led = &(bcm->leds[i]);
@@ -290,4 +295,5 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
                        ledctl &= ~(1 << i);
        }
        bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
+       spin_unlock_irqrestore(&bcm->leds_lock, flags);
 }
index df317c1..cb9a3ae 100644 (file)
@@ -509,23 +509,20 @@ static void bcm43xx_synchronize_irq(struct bcm43xx_private *bcm)
 }
 
 /* Make sure we don't receive more data from the device. */
-static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *oldstate)
+static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm)
 {
        unsigned long flags;
-       u32 old;
 
-       bcm43xx_lock_irqonly(bcm, flags);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) {
-               bcm43xx_unlock_irqonly(bcm, flags);
+               spin_unlock_irqrestore(&bcm->irq_lock, flags);
                return -EBUSY;
        }
-       old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
-       bcm43xx_unlock_irqonly(bcm, flags);
+       bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
+       bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK); /* flush */
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
        bcm43xx_synchronize_irq(bcm);
 
-       if (oldstate)
-               *oldstate = old;
-
        return 0;
 }
 
@@ -537,7 +534,6 @@ static int bcm43xx_read_radioinfo(struct bcm43xx_private *bcm)
        u16 manufact;
        u16 version;
        u8 revision;
-       s8 i;
 
        if (bcm->chip_id == 0x4317) {
                if (bcm->chip_rev == 0x00)
@@ -580,20 +576,11 @@ static int bcm43xx_read_radioinfo(struct bcm43xx_private *bcm)
        radio->version = version;
        radio->revision = revision;
 
-       /* Set default attenuation values. */
-       radio->baseband_atten = bcm43xx_default_baseband_attenuation(bcm);
-       radio->radio_atten = bcm43xx_default_radio_attenuation(bcm);
-       radio->txctl1 = bcm43xx_default_txctl1(bcm);
-       radio->txctl2 = 0xFFFF;
        if (phy->type == BCM43xx_PHYTYPE_A)
                radio->txpower_desired = bcm->sprom.maxpower_aphy;
        else
                radio->txpower_desired = bcm->sprom.maxpower_bgphy;
 
-       /* Initialize the in-memory nrssi Lookup Table. */
-       for (i = 0; i < 64; i++)
-               radio->nrssi_lt[i] = i;
-
        return 0;
 
 err_unsupported_radio:
@@ -1250,10 +1237,6 @@ int bcm43xx_switch_core(struct bcm43xx_private *bcm, struct bcm43xx_coreinfo *ne
                goto out;
 
        bcm->current_core = new_core;
-       bcm->current_80211_core_idx = -1;
-       if (new_core->id == BCM43xx_COREID_80211)
-               bcm->current_80211_core_idx = (int)(new_core - &(bcm->core_80211[0]));
-
 out:
        return err;
 }
@@ -1389,6 +1372,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
        if ((bcm43xx_core_enabled(bcm)) &&
            !bcm43xx_using_pio(bcm)) {
 //FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
+#if 0
 #ifndef CONFIG_BCM947XX
                /* reset all used DMA controllers. */
                bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
@@ -1398,6 +1382,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
                bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
                if (bcm->current_core->rev < 5)
                        bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
+#endif
 #endif
        }
        if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
@@ -1423,43 +1408,23 @@ static void bcm43xx_wireless_core_disable(struct bcm43xx_private *bcm)
        bcm43xx_core_disable(bcm, 0);
 }
 
-/* Mark the current 80211 core inactive.
- * "active_80211_core" is the other 80211 core, which is used.
- */
-static int bcm43xx_wireless_core_mark_inactive(struct bcm43xx_private *bcm,
-                                              struct bcm43xx_coreinfo *active_80211_core)
+/* Mark the current 80211 core inactive. */
+static void bcm43xx_wireless_core_mark_inactive(struct bcm43xx_private *bcm)
 {
        u32 sbtmstatelow;
-       struct bcm43xx_coreinfo *old_core;
-       int err = 0;
 
        bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
        bcm43xx_radio_turn_off(bcm);
        sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
-       sbtmstatelow &= ~0x200a0000;
-       sbtmstatelow |= 0xa0000;
+       sbtmstatelow &= 0xDFF5FFFF;
+       sbtmstatelow |= 0x000A0000;
        bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
        udelay(1);
        sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
-       sbtmstatelow &= ~0xa0000;
-       sbtmstatelow |= 0x80000;
+       sbtmstatelow &= 0xFFF5FFFF;
+       sbtmstatelow |= 0x00080000;
        bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
        udelay(1);
-
-       if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_G) {
-               old_core = bcm->current_core;
-               err = bcm43xx_switch_core(bcm, active_80211_core);
-               if (err)
-                       goto out;
-               sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
-               sbtmstatelow &= ~0x20000000;
-               sbtmstatelow |= 0x20000000;
-               bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
-               err = bcm43xx_switch_core(bcm, old_core);
-       }
-
-out:
-       return err;
 }
 
 static void handle_irq_transmit_status(struct bcm43xx_private *bcm)
@@ -1581,17 +1546,7 @@ static void handle_irq_noise(struct bcm43xx_private *bcm)
                else
                        average -= 48;
 
-/* FIXME: This is wrong, but people want fancy stats. well... */
-bcm->stats.noise = average;
-               if (average > -65)
-                       bcm->stats.link_quality = 0;
-               else if (average > -75)
-                       bcm->stats.link_quality = 1;
-               else if (average > -85)
-                       bcm->stats.link_quality = 2;
-               else
-                       bcm->stats.link_quality = 3;
-//             dprintk(KERN_INFO PFX "Link Quality: %u (avg was %d)\n", bcm->stats.link_quality, average);
+               bcm->stats.noise = average;
 drop_calculation:
                bcm->noisecalc.calculation_running = 0;
                return;
@@ -1709,8 +1664,9 @@ static void handle_irq_beacon(struct bcm43xx_private *bcm)
 static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
 {
        u32 reason;
-       u32 dma_reason[4];
-       int activity = 0;
+       u32 dma_reason[6];
+       u32 merged_dma_reason = 0;
+       int i, activity = 0;
        unsigned long flags;
 
 #ifdef CONFIG_BCM43XX_DEBUG
@@ -1720,12 +1676,12 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
 # define bcmirq_handled(irq)   do { /* nothing */ } while (0)
 #endif /* CONFIG_BCM43XX_DEBUG*/
 
-       bcm43xx_lock_irqonly(bcm, flags);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        reason = bcm->irq_reason;
-       dma_reason[0] = bcm->dma_reason[0];
-       dma_reason[1] = bcm->dma_reason[1];
-       dma_reason[2] = bcm->dma_reason[2];
-       dma_reason[3] = bcm->dma_reason[3];
+       for (i = 5; i >= 0; i--) {
+               dma_reason[i] = bcm->dma_reason[i];
+               merged_dma_reason |= dma_reason[i];
+       }
 
        if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) {
                /* TX error. We get this when Template Ram is written in wrong endianess
@@ -1736,27 +1692,25 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
                printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n");
                bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR);
        }
-       if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_FATALMASK) |
-                    (dma_reason[1] & BCM43xx_DMAIRQ_FATALMASK) |
-                    (dma_reason[2] & BCM43xx_DMAIRQ_FATALMASK) |
-                    (dma_reason[3] & BCM43xx_DMAIRQ_FATALMASK))) {
+       if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_FATALMASK)) {
                printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: "
-                                    "0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
+                                    "0x%08X, 0x%08X, 0x%08X, "
+                                    "0x%08X, 0x%08X, 0x%08X\n",
                        dma_reason[0], dma_reason[1],
-                       dma_reason[2], dma_reason[3]);
+                       dma_reason[2], dma_reason[3],
+                       dma_reason[4], dma_reason[5]);
                bcm43xx_controller_restart(bcm, "DMA error");
                mmiowb();
-               bcm43xx_unlock_irqonly(bcm, flags);
+               spin_unlock_irqrestore(&bcm->irq_lock, flags);
                return;
        }
-       if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) |
-                    (dma_reason[1] & BCM43xx_DMAIRQ_NONFATALMASK) |
-                    (dma_reason[2] & BCM43xx_DMAIRQ_NONFATALMASK) |
-                    (dma_reason[3] & BCM43xx_DMAIRQ_NONFATALMASK))) {
+       if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_NONFATALMASK)) {
                printkl(KERN_ERR PFX "DMA error: "
-                                    "0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
+                                    "0x%08X, 0x%08X, 0x%08X, "
+                                    "0x%08X, 0x%08X, 0x%08X\n",
                        dma_reason[0], dma_reason[1],
-                       dma_reason[2], dma_reason[3]);
+                       dma_reason[2], dma_reason[3],
+                       dma_reason[4], dma_reason[5]);
        }
 
        if (reason & BCM43xx_IRQ_PS) {
@@ -1791,8 +1745,6 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
        }
 
        /* Check the DMA reason registers for received data. */
-       assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
-       assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
        if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) {
                if (bcm43xx_using_pio(bcm))
                        bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0);
@@ -1800,13 +1752,17 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
                        bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0);
                /* We intentionally don't set "activity" to 1, here. */
        }
+       assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
+       assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
        if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) {
                if (bcm43xx_using_pio(bcm))
                        bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3);
                else
-                       bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring1);
+                       bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring3);
                activity = 1;
        }
+       assert(!(dma_reason[4] & BCM43xx_DMAIRQ_RX_DONE));
+       assert(!(dma_reason[5] & BCM43xx_DMAIRQ_RX_DONE));
        bcmirq_handled(BCM43xx_IRQ_RX);
 
        if (reason & BCM43xx_IRQ_XMIT_STATUS) {
@@ -1834,7 +1790,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
                bcm43xx_leds_update(bcm, activity);
        bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
        mmiowb();
-       bcm43xx_unlock_irqonly(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
 }
 
 static void pio_irq_workaround(struct bcm43xx_private *bcm,
@@ -1863,14 +1819,18 @@ static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason)
 
        bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason);
 
-       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_REASON,
                        bcm->dma_reason[0]);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
                        bcm->dma_reason[1]);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
                        bcm->dma_reason[2]);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
                        bcm->dma_reason[3]);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
+                       bcm->dma_reason[4]);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_REASON,
+                       bcm->dma_reason[5]);
 }
 
 /* Interrupt handler top-half */
@@ -1885,14 +1845,8 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
 
        spin_lock(&bcm->irq_lock);
 
-       /* Only accept IRQs, if we are initialized properly.
-        * This avoids an RX race while initializing.
-        * We should probably not enable IRQs before we are initialized
-        * completely, but some careful work is needed to fix this. I think it
-        * is best to stay with this cheap workaround for now... .
-        */
-       if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED))
-               goto out;
+       assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
+       assert(bcm->current_core->id == BCM43xx_COREID_80211);
 
        reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
        if (reason == 0xffffffff) {
@@ -1904,14 +1858,18 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
        if (!reason)
                goto out;
 
-       bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
-                            & 0x0001dc00;
-       bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
-                            & 0x0000dc00;
-       bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
-                            & 0x0000dc00;
-       bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
-                            & 0x0001dc00;
+       bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON)
+                            & 0x0001DC00;
+       bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
+                            & 0x0000DC00;
+       bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
+                            & 0x0000DC00;
+       bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
+                            & 0x0001DC00;
+       bcm->dma_reason[4] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
+                            & 0x0000DC00;
+       bcm->dma_reason[5] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA5_REASON)
+                            & 0x0000DC00;
 
        bcm43xx_interrupt_ack(bcm, reason);
 
@@ -1930,16 +1888,18 @@ out:
 
 static void bcm43xx_release_firmware(struct bcm43xx_private *bcm, int force)
 {
+       struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
+
        if (bcm->firmware_norelease && !force)
                return; /* Suspending or controller reset. */
-       release_firmware(bcm->ucode);
-       bcm->ucode = NULL;
-       release_firmware(bcm->pcm);
-       bcm->pcm = NULL;
-       release_firmware(bcm->initvals0);
-       bcm->initvals0 = NULL;
-       release_firmware(bcm->initvals1);
-       bcm->initvals1 = NULL;
+       release_firmware(phy->ucode);
+       phy->ucode = NULL;
+       release_firmware(phy->pcm);
+       phy->pcm = NULL;
+       release_firmware(phy->initvals0);
+       phy->initvals0 = NULL;
+       release_firmware(phy->initvals1);
+       phy->initvals1 = NULL;
 }
 
 static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
@@ -1950,11 +1910,11 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
        int nr;
        char buf[22 + sizeof(modparam_fwpostfix) - 1] = { 0 };
 
-       if (!bcm->ucode) {
+       if (!phy->ucode) {
                snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_microcode%d%s.fw",
                         (rev >= 5 ? 5 : rev),
                         modparam_fwpostfix);
-               err = request_firmware(&bcm->ucode, buf, &bcm->pci_dev->dev);
+               err = request_firmware(&phy->ucode, buf, &bcm->pci_dev->dev);
                if (err) {
                        printk(KERN_ERR PFX 
                               "Error: Microcode \"%s\" not available or load failed.\n",
@@ -1963,12 +1923,12 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
                }
        }
 
-       if (!bcm->pcm) {
+       if (!phy->pcm) {
                snprintf(buf, ARRAY_SIZE(buf),
                         "bcm43xx_pcm%d%s.fw",
                         (rev < 5 ? 4 : 5),
                         modparam_fwpostfix);
-               err = request_firmware(&bcm->pcm, buf, &bcm->pci_dev->dev);
+               err = request_firmware(&phy->pcm, buf, &bcm->pci_dev->dev);
                if (err) {
                        printk(KERN_ERR PFX
                               "Error: PCM \"%s\" not available or load failed.\n",
@@ -1977,7 +1937,7 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
                }
        }
 
-       if (!bcm->initvals0) {
+       if (!phy->initvals0) {
                if (rev == 2 || rev == 4) {
                        switch (phy->type) {
                        case BCM43xx_PHYTYPE_A:
@@ -2008,20 +1968,20 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
                snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw",
                         nr, modparam_fwpostfix);
 
-               err = request_firmware(&bcm->initvals0, buf, &bcm->pci_dev->dev);
+               err = request_firmware(&phy->initvals0, buf, &bcm->pci_dev->dev);
                if (err) {
                        printk(KERN_ERR PFX 
                               "Error: InitVals \"%s\" not available or load failed.\n",
                                buf);
                        goto error;
                }
-               if (bcm->initvals0->size % sizeof(struct bcm43xx_initval)) {
+               if (phy->initvals0->size % sizeof(struct bcm43xx_initval)) {
                        printk(KERN_ERR PFX "InitVals fileformat error.\n");
                        goto error;
                }
        }
 
-       if (!bcm->initvals1) {
+       if (!phy->initvals1) {
                if (rev >= 5) {
                        u32 sbtmstatehigh;
 
@@ -2043,14 +2003,14 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
                        snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw",
                                 nr, modparam_fwpostfix);
 
-                       err = request_firmware(&bcm->initvals1, buf, &bcm->pci_dev->dev);
+                       err = request_firmware(&phy->initvals1, buf, &bcm->pci_dev->dev);
                        if (err) {
                                printk(KERN_ERR PFX 
                                       "Error: InitVals \"%s\" not available or load failed.\n",
                                        buf);
                                goto error;
                        }
-                       if (bcm->initvals1->size % sizeof(struct bcm43xx_initval)) {
+                       if (phy->initvals1->size % sizeof(struct bcm43xx_initval)) {
                                printk(KERN_ERR PFX "InitVals fileformat error.\n");
                                goto error;
                        }
@@ -2070,12 +2030,13 @@ err_noinitval:
 
 static void bcm43xx_upload_microcode(struct bcm43xx_private *bcm)
 {
+       struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
        const u32 *data;
        unsigned int i, len;
 
        /* Upload Microcode. */
-       data = (u32 *)(bcm->ucode->data);
-       len = bcm->ucode->size / sizeof(u32);
+       data = (u32 *)(phy->ucode->data);
+       len = phy->ucode->size / sizeof(u32);
        bcm43xx_shm_control_word(bcm, BCM43xx_SHM_UCODE, 0x0000);
        for (i = 0; i < len; i++) {
                bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA,
@@ -2084,8 +2045,8 @@ static void bcm43xx_upload_microcode(struct bcm43xx_private *bcm)
        }
 
        /* Upload PCM data. */
-       data = (u32 *)(bcm->pcm->data);
-       len = bcm->pcm->size / sizeof(u32);
+       data = (u32 *)(phy->pcm->data);
+       len = phy->pcm->size / sizeof(u32);
        bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01ea);
        bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA, 0x00004000);
        bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01eb);
@@ -2131,15 +2092,16 @@ err_format:
 
 static int bcm43xx_upload_initvals(struct bcm43xx_private *bcm)
 {
+       struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
        int err;
 
-       err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)bcm->initvals0->data,
-                                    bcm->initvals0->size / sizeof(struct bcm43xx_initval));
+       err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)phy->initvals0->data,
+                                    phy->initvals0->size / sizeof(struct bcm43xx_initval));
        if (err)
                goto out;
-       if (bcm->initvals1) {
-               err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)bcm->initvals1->data,
-                                            bcm->initvals1->size / sizeof(struct bcm43xx_initval));
+       if (phy->initvals1) {
+               err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)phy->initvals1->data,
+                                            phy->initvals1->size / sizeof(struct bcm43xx_initval));
                if (err)
                        goto out;
        }
@@ -2156,9 +2118,7 @@ static struct pci_device_id bcm43xx_47xx_ids[] = {
 
 static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
 {
-       int res;
-       unsigned int i;
-       u32 data;
+       int err;
 
        bcm->irq = bcm->pci_dev->irq;
 #ifdef CONFIG_BCM947XX
@@ -2175,32 +2135,12 @@ static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
                }
        }
 #endif
-       res = request_irq(bcm->irq, bcm43xx_interrupt_handler,
+       err = request_irq(bcm->irq, bcm43xx_interrupt_handler,
                          IRQF_SHARED, KBUILD_MODNAME, bcm);
-       if (res) {
+       if (err)
                printk(KERN_ERR PFX "Cannot register IRQ%d\n", bcm->irq);
-               return -ENODEV;
-       }
-       bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0xffffffff);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 0x00020402);
-       i = 0;
-       while (1) {
-               data = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
-               if (data == BCM43xx_IRQ_READY)
-                       break;
-               i++;
-               if (i >= BCM43xx_IRQWAIT_MAX_RETRIES) {
-                       printk(KERN_ERR PFX "Card IRQ register not responding. "
-                                           "Giving up.\n");
-                       free_irq(bcm->irq, bcm);
-                       return -ENODEV;
-               }
-               udelay(10);
-       }
-       // dummy read
-       bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
 
-       return 0;
+       return err;
 }
 
 /* Switch to the core used to write the GPIO register.
@@ -2298,13 +2238,17 @@ static int bcm43xx_gpio_cleanup(struct bcm43xx_private *bcm)
 /* http://bcm-specs.sipsolutions.net/EnableMac */
 void bcm43xx_mac_enable(struct bcm43xx_private *bcm)
 {
-       bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
-                       bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
-                       | BCM43xx_SBF_MAC_ENABLED);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, BCM43xx_IRQ_READY);
-       bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
-       bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
-       bcm43xx_power_saving_ctl_bits(bcm, -1, -1);
+       bcm->mac_suspended--;
+       assert(bcm->mac_suspended >= 0);
+       if (bcm->mac_suspended == 0) {
+               bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
+                               bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
+                               | BCM43xx_SBF_MAC_ENABLED);
+               bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, BCM43xx_IRQ_READY);
+               bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
+               bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
+               bcm43xx_power_saving_ctl_bits(bcm, -1, -1);
+       }
 }
 
 /* http://bcm-specs.sipsolutions.net/SuspendMAC */
@@ -2313,18 +2257,23 @@ void bcm43xx_mac_suspend(struct bcm43xx_private *bcm)
        int i;
        u32 tmp;
 
-       bcm43xx_power_saving_ctl_bits(bcm, -1, 1);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
-                       bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
-                       & ~BCM43xx_SBF_MAC_ENABLED);
-       bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
-       for (i = 100000; i; i--) {
-               tmp = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
-               if (tmp & BCM43xx_IRQ_READY)
-                       return;
-               udelay(10);
+       assert(bcm->mac_suspended >= 0);
+       if (bcm->mac_suspended == 0) {
+               bcm43xx_power_saving_ctl_bits(bcm, -1, 1);
+               bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
+                               bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
+                               & ~BCM43xx_SBF_MAC_ENABLED);
+               bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
+               for (i = 10000; i; i--) {
+                       tmp = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
+                       if (tmp & BCM43xx_IRQ_READY)
+                               goto out;
+                       udelay(1);
+               }
+               printkl(KERN_ERR PFX "MAC suspend failed\n");
        }
-       printkl(KERN_ERR PFX "MAC suspend failed\n");
+out:
+       bcm->mac_suspended++;
 }
 
 void bcm43xx_set_iwmode(struct bcm43xx_private *bcm,
@@ -2394,7 +2343,6 @@ static void bcm43xx_chip_cleanup(struct bcm43xx_private *bcm)
        if (!modparam_noleds)
                bcm43xx_leds_exit(bcm);
        bcm43xx_gpio_cleanup(bcm);
-       free_irq(bcm->irq, bcm);
        bcm43xx_release_firmware(bcm, 0);
 }
 
@@ -2406,7 +2354,7 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
        struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
        struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
        int err;
-       int tmp;
+       int i, tmp;
        u32 value32;
        u16 value16;
 
@@ -2419,13 +2367,53 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
                goto out;
        bcm43xx_upload_microcode(bcm);
 
-       err = bcm43xx_initialize_irq(bcm);
-       if (err)
+       bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0xFFFFFFFF);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 0x00020402);
+       i = 0;
+       while (1) {
+               value32 = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
+               if (value32 == BCM43xx_IRQ_READY)
+                       break;
+               i++;
+               if (i >= BCM43xx_IRQWAIT_MAX_RETRIES) {
+                       printk(KERN_ERR PFX "IRQ_READY timeout\n");
+                       err = -ENODEV;
+                       goto err_release_fw;
+               }
+               udelay(10);
+       }
+       bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
+
+       value16 = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                    BCM43xx_UCODE_REVISION);
+
+       dprintk(KERN_INFO PFX "Microcode rev 0x%x, pl 0x%x "
+               "(20%.2i-%.2i-%.2i  %.2i:%.2i:%.2i)\n", value16,
+               bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                  BCM43xx_UCODE_PATCHLEVEL),
+               (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                   BCM43xx_UCODE_DATE) >> 12) & 0xf,
+               (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                   BCM43xx_UCODE_DATE) >> 8) & 0xf,
+               bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                  BCM43xx_UCODE_DATE) & 0xff,
+               (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                  BCM43xx_UCODE_TIME) >> 11) & 0x1f,
+               (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                  BCM43xx_UCODE_TIME) >> 5) & 0x3f,
+               bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                  BCM43xx_UCODE_TIME) & 0x1f);
+
+       if ( value16 > 0x128 ) {
+               dprintk(KERN_ERR PFX
+                       "Firmware: no support for microcode rev > 0x128\n");
+               err = -1;
                goto err_release_fw;
+       }
 
        err = bcm43xx_gpio_init(bcm);
        if (err)
-               goto err_free_irq;
+               goto err_release_fw;
 
        err = bcm43xx_upload_initvals(bcm);
        if (err)
@@ -2489,10 +2477,12 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
                bcm43xx_write32(bcm, 0x018C, 0x02000000);
        }
        bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0001DC00);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
        bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0000DC00);
-       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0001DC00);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
+       bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
 
        value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
        value32 |= 0x00100000;
@@ -2509,8 +2499,6 @@ err_radio_off:
        bcm43xx_radio_turn_off(bcm);
 err_gpio_cleanup:
        bcm43xx_gpio_cleanup(bcm);
-err_free_irq:
-       free_irq(bcm->irq, bcm);
 err_release_fw:
        bcm43xx_release_firmware(bcm, 1);
        goto out;
@@ -2550,11 +2538,9 @@ static void bcm43xx_init_struct_phyinfo(struct bcm43xx_phyinfo *phy)
 {
        /* Initialize a "phyinfo" structure. The structure is already
         * zeroed out.
+        * This is called on insmod time to initialize members.
         */
-       phy->antenna_diversity = 0xFFFF;
        phy->savedpctlreg = 0xFFFF;
-       phy->minlowsig[0] = 0xFFFF;
-       phy->minlowsig[1] = 0xFFFF;
        spin_lock_init(&phy->lock);
 }
 
@@ -2562,14 +2548,11 @@ static void bcm43xx_init_struct_radioinfo(struct bcm43xx_radioinfo *radio)
 {
        /* Initialize a "radioinfo" structure. The structure is already
         * zeroed out.
+        * This is called on insmod time to initialize members.
         */
        radio->interfmode = BCM43xx_RADIO_INTERFMODE_NONE;
        radio->channel = 0xFF;
        radio->initial_channel = 0xFF;
-       radio->lofcal = 0xFFFF;
-       radio->initval = 0xFFFF;
-       radio->nrssi[0] = -1000;
-       radio->nrssi[1] = -1000;
 }
 
 static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
@@ -2587,7 +2570,6 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
                                    * BCM43xx_MAX_80211_CORES);
        memset(&bcm->core_80211_ext, 0, sizeof(struct bcm43xx_coreinfo_80211)
                                        * BCM43xx_MAX_80211_CORES);
-       bcm->current_80211_core_idx = -1;
        bcm->nr_80211_available = 0;
        bcm->current_core = NULL;
        bcm->active_80211_core = NULL;
@@ -2757,6 +2739,7 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
                                goto out;
                        }
                        bcm->nr_80211_available++;
+                       core->priv = ext_80211;
                        bcm43xx_init_struct_phyinfo(&ext_80211->phy);
                        bcm43xx_init_struct_radioinfo(&ext_80211->radio);
                        break;
@@ -2857,7 +2840,8 @@ static void bcm43xx_wireless_core_cleanup(struct bcm43xx_private *bcm)
 }
 
 /* http://bcm-specs.sipsolutions.net/80211Init */
-static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm)
+static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm,
+                                     int active_wlcore)
 {
        struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
        struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
@@ -2939,19 +2923,26 @@ static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm)
        if (bcm->current_core->rev >= 5)
                bcm43xx_write16(bcm, 0x043C, 0x000C);
 
-       if (bcm43xx_using_pio(bcm))
-               err = bcm43xx_pio_init(bcm);
-       else
-               err = bcm43xx_dma_init(bcm);
-       if (err)
-               goto err_chip_cleanup;
+       if (active_wlcore) {
+               if (bcm43xx_using_pio(bcm))
+                       err = bcm43xx_pio_init(bcm);
+               else
+                       err = bcm43xx_dma_init(bcm);
+               if (err)
+                       goto err_chip_cleanup;
+       }
        bcm43xx_write16(bcm, 0x0612, 0x0050);
        bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0416, 0x0050);
        bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0414, 0x01F4);
 
-       bcm43xx_mac_enable(bcm);
-       bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
+       if (active_wlcore) {
+               if (radio->initial_channel != 0xFF)
+                       bcm43xx_radio_selectchannel(bcm, radio->initial_channel, 0);
+       }
 
+       /* Don't enable MAC/IRQ here, as it will race with the IRQ handler.
+        * We enable it later.
+        */
        bcm->current_core->initialized = 1;
 out:
        return err;
@@ -3066,11 +3057,6 @@ out:
        return err;
 }
 
-static void bcm43xx_softmac_init(struct bcm43xx_private *bcm)
-{
-       ieee80211softmac_start(bcm->net_dev);
-}
-
 static void bcm43xx_periodic_every120sec(struct bcm43xx_private *bcm)
 {
        struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
@@ -3182,47 +3168,46 @@ static void bcm43xx_periodic_work_handler(void *d)
                /* Periodic work will take a long time, so we want it to
                 * be preemtible.
                 */
-               bcm43xx_lock_irqonly(bcm, flags);
+               mutex_lock(&bcm->mutex);
                netif_stop_queue(bcm->net_dev);
+               synchronize_net();
+               spin_lock_irqsave(&bcm->irq_lock, flags);
+               bcm43xx_mac_suspend(bcm);
                if (bcm43xx_using_pio(bcm))
                        bcm43xx_pio_freeze_txqueues(bcm);
                savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
-               bcm43xx_unlock_irqonly(bcm, flags);
-               bcm43xx_lock_noirq(bcm);
+               spin_unlock_irqrestore(&bcm->irq_lock, flags);
                bcm43xx_synchronize_irq(bcm);
        } else {
                /* Periodic work should take short time, so we want low
                 * locking overhead.
                 */
-               bcm43xx_lock_irqsafe(bcm, flags);
+               mutex_lock(&bcm->mutex);
+               spin_lock_irqsave(&bcm->irq_lock, flags);
        }
 
        do_periodic_work(bcm);
 
        if (badness > BADNESS_LIMIT) {
-               bcm43xx_lock_irqonly(bcm, flags);
-               if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) {
-                       tasklet_enable(&bcm->isr_tasklet);
-                       bcm43xx_interrupt_enable(bcm, savedirqs);
-                       if (bcm43xx_using_pio(bcm))
-                               bcm43xx_pio_thaw_txqueues(bcm);
-               }
+               spin_lock_irqsave(&bcm->irq_lock, flags);
+               tasklet_enable(&bcm->isr_tasklet);
+               bcm43xx_interrupt_enable(bcm, savedirqs);
+               if (bcm43xx_using_pio(bcm))
+                       bcm43xx_pio_thaw_txqueues(bcm);
+               bcm43xx_mac_enable(bcm);
                netif_wake_queue(bcm->net_dev);
-               mmiowb();
-               bcm43xx_unlock_irqonly(bcm, flags);
-               bcm43xx_unlock_noirq(bcm);
-       } else {
-               mmiowb();
-               bcm43xx_unlock_irqsafe(bcm, flags);
        }
+       mmiowb();
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 }
 
-static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
+void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
 {
        cancel_rearming_delayed_work(&bcm->periodic_work);
 }
 
-static void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
+void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
 {
        struct work_struct *work = &(bcm->periodic_work);
 
@@ -3243,9 +3228,9 @@ static int bcm43xx_rng_read(struct hwrng *rng, u32 *data)
        struct bcm43xx_private *bcm = (struct bcm43xx_private *)rng->priv;
        unsigned long flags;
 
-       bcm43xx_lock_irqonly(bcm, flags);
+       spin_lock_irqsave(&(bcm)->irq_lock, flags);
        *data = bcm43xx_read16(bcm, BCM43xx_MMIO_RNG);
-       bcm43xx_unlock_irqonly(bcm, flags);
+       spin_unlock_irqrestore(&(bcm)->irq_lock, flags);
 
        return (sizeof(u16));
 }
@@ -3271,139 +3256,329 @@ static int bcm43xx_rng_init(struct bcm43xx_private *bcm)
        return err;
 }
 
-/* This is the opposite of bcm43xx_init_board() */
-static void bcm43xx_free_board(struct bcm43xx_private *bcm)
+static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm)
 {
+       int ret = 0;
        int i, err;
+       struct bcm43xx_coreinfo *core;
 
-       bcm43xx_lock_noirq(bcm);
+       bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN);
+       for (i = 0; i < bcm->nr_80211_available; i++) {
+               core = &(bcm->core_80211[i]);
+               assert(core->available);
+               if (!core->initialized)
+                       continue;
+               err = bcm43xx_switch_core(bcm, core);
+               if (err) {
+                       dprintk(KERN_ERR PFX "shutdown_all_wireless_cores "
+                                            "switch_core failed (%d)\n", err);
+                       ret = err;
+                       continue;
+               }
+               bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
+               bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
+               bcm43xx_wireless_core_cleanup(bcm);
+               if (core == bcm->active_80211_core)
+                       bcm->active_80211_core = NULL;
+       }
+       free_irq(bcm->irq, bcm);
+       bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
+
+       return ret;
+}
+
+/* This is the opposite of bcm43xx_init_board() */
+static void bcm43xx_free_board(struct bcm43xx_private *bcm)
+{
+       bcm43xx_rng_exit(bcm);
        bcm43xx_sysfs_unregister(bcm);
        bcm43xx_periodic_tasks_delete(bcm);
 
-       bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN);
+       mutex_lock(&(bcm)->mutex);
+       bcm43xx_shutdown_all_wireless_cores(bcm);
+       bcm43xx_pctl_set_crystal(bcm, 0);
+       mutex_unlock(&(bcm)->mutex);
+}
 
-       bcm43xx_rng_exit(bcm);
+static void prepare_phydata_for_init(struct bcm43xx_phyinfo *phy)
+{
+       phy->antenna_diversity = 0xFFFF;
+       memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
+       memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
+
+       /* Flags */
+       phy->calibrated = 0;
+       phy->is_locked = 0;
+
+       if (phy->_lo_pairs) {
+               memset(phy->_lo_pairs, 0,
+                      sizeof(struct bcm43xx_lopair) * BCM43xx_LO_COUNT);
+       }
+       memset(phy->loopback_gain, 0, sizeof(phy->loopback_gain));
+}
+
+static void prepare_radiodata_for_init(struct bcm43xx_private *bcm,
+                                      struct bcm43xx_radioinfo *radio)
+{
+       int i;
+
+       /* Set default attenuation values. */
+       radio->baseband_atten = bcm43xx_default_baseband_attenuation(bcm);
+       radio->radio_atten = bcm43xx_default_radio_attenuation(bcm);
+       radio->txctl1 = bcm43xx_default_txctl1(bcm);
+       radio->txctl2 = 0xFFFF;
+       radio->txpwr_offset = 0;
+
+       /* NRSSI */
+       radio->nrssislope = 0;
+       for (i = 0; i < ARRAY_SIZE(radio->nrssi); i++)
+               radio->nrssi[i] = -1000;
+       for (i = 0; i < ARRAY_SIZE(radio->nrssi_lt); i++)
+               radio->nrssi_lt[i] = i;
+
+       radio->lofcal = 0xFFFF;
+       radio->initval = 0xFFFF;
+
+       radio->aci_enable = 0;
+       radio->aci_wlan_automatic = 0;
+       radio->aci_hw_rssi = 0;
+}
+
+static void prepare_priv_for_init(struct bcm43xx_private *bcm)
+{
+       int i;
+       struct bcm43xx_coreinfo *core;
+       struct bcm43xx_coreinfo_80211 *wlext;
+
+       assert(!bcm->active_80211_core);
+
+       bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
+
+       /* Flags */
+       bcm->was_initialized = 0;
+       bcm->reg124_set_0x4 = 0;
+
+       /* Stats */
+       memset(&bcm->stats, 0, sizeof(bcm->stats));
+
+       /* Wireless core data */
        for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
-               if (!bcm->core_80211[i].available)
-                       continue;
-               if (!bcm->core_80211[i].initialized)
+               core = &(bcm->core_80211[i]);
+               wlext = core->priv;
+
+               if (!core->available)
                        continue;
+               assert(wlext == &(bcm->core_80211_ext[i]));
 
-               err = bcm43xx_switch_core(bcm, &bcm->core_80211[i]);
-               assert(err == 0);
-               bcm43xx_wireless_core_cleanup(bcm);
+               prepare_phydata_for_init(&wlext->phy);
+               prepare_radiodata_for_init(bcm, &wlext->radio);
        }
 
-       bcm43xx_pctl_set_crystal(bcm, 0);
+       /* IRQ related flags */
+       bcm->irq_reason = 0;
+       memset(bcm->dma_reason, 0, sizeof(bcm->dma_reason));
+       bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
 
-       bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
-       bcm43xx_unlock_noirq(bcm);
+       bcm->mac_suspended = 1;
+
+       /* Noise calculation context */
+       memset(&bcm->noisecalc, 0, sizeof(bcm->noisecalc));
+
+       /* Periodic work context */
+       bcm->periodic_state = 0;
 }
 
-static int bcm43xx_init_board(struct bcm43xx_private *bcm)
+static int wireless_core_up(struct bcm43xx_private *bcm,
+                           int active_wlcore)
+{
+       int err;
+
+       if (!bcm43xx_core_enabled(bcm))
+               bcm43xx_wireless_core_reset(bcm, 1);
+       if (!active_wlcore)
+               bcm43xx_wireless_core_mark_inactive(bcm);
+       err = bcm43xx_wireless_core_init(bcm, active_wlcore);
+       if (err)
+               goto out;
+       if (!active_wlcore)
+               bcm43xx_radio_turn_off(bcm);
+out:
+       return err;
+}
+
+/* Select and enable the "to be used" wireless core.
+ * Locking: bcm->mutex must be aquired before calling this.
+ *          bcm->irq_lock must not be aquired.
+ */
+int bcm43xx_select_wireless_core(struct bcm43xx_private *bcm,
+                                int phytype)
 {
        int i, err;
-       int connect_phy;
+       struct bcm43xx_coreinfo *active_core = NULL;
+       struct bcm43xx_coreinfo_80211 *active_wlext = NULL;
+       struct bcm43xx_coreinfo *core;
+       struct bcm43xx_coreinfo_80211 *wlext;
+       int adjust_active_sbtmstatelow = 0;
 
        might_sleep();
 
-       bcm43xx_lock_noirq(bcm);
-       bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
+       if (phytype < 0) {
+               /* If no phytype is requested, select the first core. */
+               assert(bcm->core_80211[0].available);
+               wlext = bcm->core_80211[0].priv;
+               phytype = wlext->phy.type;
+       }
+       /* Find the requested core. */
+       for (i = 0; i < bcm->nr_80211_available; i++) {
+               core = &(bcm->core_80211[i]);
+               wlext = core->priv;
+               if (wlext->phy.type == phytype) {
+                       active_core = core;
+                       active_wlext = wlext;
+                       break;
+               }
+       }
+       if (!active_core)
+               return -ESRCH; /* No such PHYTYPE on this board. */
+
+       if (bcm->active_80211_core) {
+               /* We already selected a wl core in the past.
+                * So first clean up everything.
+                */
+               dprintk(KERN_INFO PFX "select_wireless_core: cleanup\n");
+               ieee80211softmac_stop(bcm->net_dev);
+               bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
+               err = bcm43xx_disable_interrupts_sync(bcm);
+               assert(!err);
+               tasklet_enable(&bcm->isr_tasklet);
+               err = bcm43xx_shutdown_all_wireless_cores(bcm);
+               if (err)
+                       goto error;
+               /* Ok, everything down, continue to re-initialize. */
+               bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
+       }
+
+       /* Reset all data structures. */
+       prepare_priv_for_init(bcm);
 
-       err = bcm43xx_pctl_set_crystal(bcm, 1);
-       if (err)
-               goto out;
-       err = bcm43xx_pctl_init(bcm);
-       if (err)
-               goto err_crystal_off;
        err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_FAST);
        if (err)
-               goto err_crystal_off;
+               goto error;
 
-       tasklet_enable(&bcm->isr_tasklet);
+       /* Mark all unused cores "inactive". */
        for (i = 0; i < bcm->nr_80211_available; i++) {
-               err = bcm43xx_switch_core(bcm, &bcm->core_80211[i]);
-               assert(err != -ENODEV);
-               if (err)
-                       goto err_80211_unwind;
+               core = &(bcm->core_80211[i]);
+               wlext = core->priv;
 
-               /* Enable the selected wireless core.
-                * Connect PHY only on the first core.
-                */
-               if (!bcm43xx_core_enabled(bcm)) {
-                       if (bcm->nr_80211_available == 1) {
-                               connect_phy = bcm43xx_current_phy(bcm)->connected;
-                       } else {
-                               if (i == 0)
-                                       connect_phy = 1;
-                               else
-                                       connect_phy = 0;
-                       }
-                       bcm43xx_wireless_core_reset(bcm, connect_phy);
+               if (core == active_core)
+                       continue;
+               err = bcm43xx_switch_core(bcm, core);
+               if (err) {
+                       dprintk(KERN_ERR PFX "Could not switch to inactive "
+                                            "802.11 core (%d)\n", err);
+                       goto error;
                }
+               err = wireless_core_up(bcm, 0);
+               if (err) {
+                       dprintk(KERN_ERR PFX "core_up for inactive 802.11 core "
+                                            "failed (%d)\n", err);
+                       goto error;
+               }
+               adjust_active_sbtmstatelow = 1;
+       }
 
-               if (i != 0)
-                       bcm43xx_wireless_core_mark_inactive(bcm, &bcm->core_80211[0]);
-
-               err = bcm43xx_wireless_core_init(bcm);
-               if (err)
-                       goto err_80211_unwind;
+       /* Now initialize the active 802.11 core. */
+       err = bcm43xx_switch_core(bcm, active_core);
+       if (err) {
+               dprintk(KERN_ERR PFX "Could not switch to active "
+                                    "802.11 core (%d)\n", err);
+               goto error;
+       }
+       if (adjust_active_sbtmstatelow &&
+           active_wlext->phy.type == BCM43xx_PHYTYPE_G) {
+               u32 sbtmstatelow;
 
-               if (i != 0) {
-                       bcm43xx_mac_suspend(bcm);
-                       bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
-                       bcm43xx_radio_turn_off(bcm);
-               }
+               sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
+               sbtmstatelow |= 0x20000000;
+               bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
        }
-       bcm->active_80211_core = &bcm->core_80211[0];
-       if (bcm->nr_80211_available >= 2) {
-               bcm43xx_switch_core(bcm, &bcm->core_80211[0]);
-               bcm43xx_mac_enable(bcm);
+       err = wireless_core_up(bcm, 1);
+       if (err) {
+               dprintk(KERN_ERR PFX "core_up for active 802.11 core "
+                                    "failed (%d)\n", err);
+               goto error;
        }
-       err = bcm43xx_rng_init(bcm);
+       err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_DYNAMIC);
        if (err)
-               goto err_80211_unwind;
+               goto error;
+       bcm->active_80211_core = active_core;
+
        bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC);
        bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr));
-       dprintk(KERN_INFO PFX "80211 cores initialized\n");
        bcm43xx_security_init(bcm);
-       bcm43xx_softmac_init(bcm);
+       ieee80211softmac_start(bcm->net_dev);
 
-       bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_DYNAMIC);
+       /* Let's go! Be careful after enabling the IRQs.
+        * Don't switch cores, for example.
+        */
+       bcm43xx_mac_enable(bcm);
+       bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
+       err = bcm43xx_initialize_irq(bcm);
+       if (err)
+               goto error;
+       bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
 
-       if (bcm43xx_current_radio(bcm)->initial_channel != 0xFF) {
-               bcm43xx_mac_suspend(bcm);
-               bcm43xx_radio_selectchannel(bcm, bcm43xx_current_radio(bcm)->initial_channel, 0);
-               bcm43xx_mac_enable(bcm);
-       }
+       dprintk(KERN_INFO PFX "Selected 802.11 core (phytype %d)\n",
+               active_wlext->phy.type);
 
-       /* Initialization of the board is done. Flag it as such. */
-       bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
+       return 0;
+
+error:
+       bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
+       bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_SLOW);
+       return err;
+}
 
+static int bcm43xx_init_board(struct bcm43xx_private *bcm)
+{
+       int err;
+
+       mutex_lock(&(bcm)->mutex);
+
+       tasklet_enable(&bcm->isr_tasklet);
+       err = bcm43xx_pctl_set_crystal(bcm, 1);
+       if (err)
+               goto err_tasklet;
+       err = bcm43xx_pctl_init(bcm);
+       if (err)
+               goto err_crystal_off;
+       err = bcm43xx_select_wireless_core(bcm, -1);
+       if (err)
+               goto err_crystal_off;
+       err = bcm43xx_sysfs_register(bcm);
+       if (err)
+               goto err_wlshutdown;
+       err = bcm43xx_rng_init(bcm);
+       if (err)
+               goto err_sysfs_unreg;
        bcm43xx_periodic_tasks_setup(bcm);
-       bcm43xx_sysfs_register(bcm);
-       //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though...
 
        /*FIXME: This should be handled by softmac instead. */
        schedule_work(&bcm->softmac->associnfo.work);
 
-       assert(err == 0);
 out:
-       bcm43xx_unlock_noirq(bcm);
+       mutex_unlock(&(bcm)->mutex);
 
        return err;
 
-err_80211_unwind:
-       tasklet_disable(&bcm->isr_tasklet);
-       /* unwind all 80211 initialization */
-       for (i = 0; i < bcm->nr_80211_available; i++) {
-               if (!bcm->core_80211[i].initialized)
-                       continue;
-               bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
-               bcm43xx_wireless_core_cleanup(bcm);
-       }
+err_sysfs_unreg:
+       bcm43xx_sysfs_unregister(bcm);
+err_wlshutdown:
+       bcm43xx_shutdown_all_wireless_cores(bcm);
 err_crystal_off:
        bcm43xx_pctl_set_crystal(bcm, 0);
+err_tasklet:
+       tasklet_disable(&bcm->isr_tasklet);
        goto out;
 }
 
@@ -3647,7 +3822,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
        struct bcm43xx_radioinfo *radio;
        unsigned long flags;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
                bcm43xx_mac_suspend(bcm);
                bcm43xx_radio_selectchannel(bcm, channel, 0);
@@ -3656,7 +3832,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
                radio = bcm43xx_current_radio(bcm);
                radio->initial_channel = channel;
        }
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 }
 
 /* set_security() callback in struct ieee80211_device */
@@ -3670,7 +3847,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
        
        dprintk(KERN_INFO PFX "set security called");
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
 
        for (keyidx = 0; keyidx<WEP_KEYS; keyidx++)
                if (sec->flags & (1<<keyidx)) {
@@ -3739,7 +3917,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
                } else
                                bcm43xx_clear_keys(bcm);
        }
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 }
 
 /* hard_start_xmit() callback in struct ieee80211_device */
@@ -3751,12 +3930,14 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb,
        int err = -ENODEV;
        unsigned long flags;
 
-       bcm43xx_lock_irqonly(bcm, flags);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED))
                err = bcm43xx_tx(bcm, txb);
-       bcm43xx_unlock_irqonly(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
 
-       return err;
+       if (unlikely(err))
+               return NETDEV_TX_BUSY;
+       return NETDEV_TX_OK;
 }
 
 static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev)
@@ -3769,9 +3950,9 @@ static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        unsigned long flags;
 
-       bcm43xx_lock_irqonly(bcm, flags);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        bcm43xx_controller_restart(bcm, "TX timeout");
-       bcm43xx_unlock_irqonly(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3781,7 +3962,8 @@ static void bcm43xx_net_poll_controller(struct net_device *net_dev)
        unsigned long flags;
 
        local_irq_save(flags);
-       bcm43xx_interrupt_handler(bcm->irq, bcm, NULL);
+       if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
+               bcm43xx_interrupt_handler(bcm->irq, bcm, NULL);
        local_irq_restore(flags);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -3799,9 +3981,10 @@ static int bcm43xx_net_stop(struct net_device *net_dev)
        int err;
 
        ieee80211softmac_stop(net_dev);
-       err = bcm43xx_disable_interrupts_sync(bcm, NULL);
+       err = bcm43xx_disable_interrupts_sync(bcm);
        assert(!err);
        bcm43xx_free_board(bcm);
+       flush_scheduled_work();
 
        return 0;
 }
@@ -3818,10 +4001,12 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm,
        bcm->softmac->set_channel = bcm43xx_ieee80211_set_chan;
 
        bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
+       bcm->mac_suspended = 1;
        bcm->pci_dev = pci_dev;
        bcm->net_dev = net_dev;
        bcm->bad_frames_preempt = modparam_bad_frames_preempt;
        spin_lock_init(&bcm->irq_lock);
+       spin_lock_init(&bcm->leds_lock);
        mutex_init(&bcm->mutex);
        tasklet_init(&bcm->isr_tasklet,
                     (void (*)(unsigned long))bcm43xx_interrupt_tasklet,
@@ -3940,7 +4125,6 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
        bcm43xx_debugfs_remove_device(bcm);
        unregister_netdev(net_dev);
        bcm43xx_detach_board(bcm);
-       assert(bcm->ucode == NULL);
        free_ieee80211softmac(net_dev);
 }
 
@@ -3950,47 +4134,31 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
 static void bcm43xx_chip_reset(void *_bcm)
 {
        struct bcm43xx_private *bcm = _bcm;
-       struct net_device *net_dev = bcm->net_dev;
-       struct pci_dev *pci_dev = bcm->pci_dev;
-       int err;
-       int was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
-
-       netif_stop_queue(bcm->net_dev);
-       tasklet_disable(&bcm->isr_tasklet);
+       struct bcm43xx_phyinfo *phy;
+       int err = -ENODEV;
 
-       bcm->firmware_norelease = 1;
-       if (was_initialized)
-               bcm43xx_free_board(bcm);
-       bcm->firmware_norelease = 0;
-       bcm43xx_detach_board(bcm);
-       err = bcm43xx_init_private(bcm, net_dev, pci_dev);
-       if (err)
-               goto failure;
-       err = bcm43xx_attach_board(bcm);
-       if (err)
-               goto failure;
-       if (was_initialized) {
-               err = bcm43xx_init_board(bcm);
-               if (err)
-                       goto failure;
+       mutex_lock(&(bcm)->mutex);
+       if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
+               bcm43xx_periodic_tasks_delete(bcm);
+               phy = bcm43xx_current_phy(bcm);
+               err = bcm43xx_select_wireless_core(bcm, phy->type);
+               if (!err)
+                       bcm43xx_periodic_tasks_setup(bcm);
        }
-       netif_wake_queue(bcm->net_dev);
-       printk(KERN_INFO PFX "Controller restarted\n");
+       mutex_unlock(&(bcm)->mutex);
 
-       return;
-failure:
-       printk(KERN_ERR PFX "Controller restart failed\n");
+       printk(KERN_ERR PFX "Controller restart%s\n",
+              (err == 0) ? "ed" : " failed");
 }
 
 /* Hard-reset the chip.
  * This can be called from interrupt or process context.
- * Make sure to _not_ re-enable device interrupts after this has been called.
-*/
+ * bcm->irq_lock must be locked.
+ */
 void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason)
 {
-       bcm43xx_set_status(bcm, BCM43xx_STAT_RESTARTING);
-       bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
-       bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
+       if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
+               return;
        printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
        INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
        schedule_work(&bcm->restart_work);
@@ -4002,21 +4170,16 @@ static int bcm43xx_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *net_dev = pci_get_drvdata(pdev);
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
-       unsigned long flags;
-       int try_to_shutdown = 0, err;
+       int err;
 
        dprintk(KERN_INFO PFX "Suspending...\n");
 
-       bcm43xx_lock_irqsafe(bcm, flags);
-       bcm->was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
-       if (bcm->was_initialized)
-               try_to_shutdown = 1;
-       bcm43xx_unlock_irqsafe(bcm, flags);
-
        netif_device_detach(net_dev);
-       if (try_to_shutdown) {
+       bcm->was_initialized = 0;
+       if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
+               bcm->was_initialized = 1;
                ieee80211softmac_stop(net_dev);
-               err = bcm43xx_disable_interrupts_sync(bcm, &bcm->irq_savedstate);
+               err = bcm43xx_disable_interrupts_sync(bcm);
                if (unlikely(err)) {
                        dprintk(KERN_ERR PFX "Suspend failed.\n");
                        return -EAGAIN;
@@ -4049,17 +4212,14 @@ static int bcm43xx_resume(struct pci_dev *pdev)
        pci_restore_state(pdev);
 
        bcm43xx_chipset_attach(bcm);
-       if (bcm->was_initialized) {
-               bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
+       if (bcm->was_initialized)
                err = bcm43xx_init_board(bcm);
-       }
        if (err) {
                printk(KERN_ERR PFX "Resume failed!\n");
                return err;
        }
-
        netif_device_attach(net_dev);
-       
+
        dprintk(KERN_INFO PFX "Device resumed.\n");
 
        return 0;
index 1164936..f763571 100644 (file)
@@ -133,11 +133,17 @@ void bcm43xx_dummy_transmission(struct bcm43xx_private *bcm);
 
 int bcm43xx_switch_core(struct bcm43xx_private *bcm, struct bcm43xx_coreinfo *new_core);
 
+int bcm43xx_select_wireless_core(struct bcm43xx_private *bcm,
+                                int phytype);
+
 void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy);
 
 void bcm43xx_mac_suspend(struct bcm43xx_private *bcm);
 void bcm43xx_mac_enable(struct bcm43xx_private *bcm);
 
+void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm);
+void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm);
+
 void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason);
 
 int bcm43xx_sprom_read(struct bcm43xx_private *bcm, u16 *sprom);
index f8200de..eafd0f6 100644 (file)
@@ -81,6 +81,16 @@ static const s8 bcm43xx_tssi2dbm_g_table[] = {
 static void bcm43xx_phy_initg(struct bcm43xx_private *bcm);
 
 
+static inline
+void bcm43xx_voluntary_preempt(void)
+{
+       assert(!in_atomic() && !in_irq() &&
+              !in_interrupt() && !irqs_disabled());
+#ifndef CONFIG_PREEMPT
+       cond_resched();
+#endif /* CONFIG_PREEMPT */
+}
+
 void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm)
 {
        struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
@@ -133,22 +143,14 @@ void bcm43xx_phy_write(struct bcm43xx_private *bcm, u16 offset, u16 val)
 void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm)
 {
        struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
-       unsigned long flags;
 
        bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* Dummy read. */
        if (phy->calibrated)
                return;
        if (phy->type == BCM43xx_PHYTYPE_G && phy->rev == 1) {
-               /* We do not want to be preempted while calibrating
-                * the hardware.
-                */
-               local_irq_save(flags);
-
                bcm43xx_wireless_core_reset(bcm, 0);
                bcm43xx_phy_initg(bcm);
                bcm43xx_wireless_core_reset(bcm, 1);
-
-               local_irq_restore(flags);
        }
        phy->calibrated = 1;
 }
@@ -1299,7 +1301,9 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm)
 {
        int i;
        u16 ret = 0;
+       unsigned long flags;
 
+       local_irq_save(flags);
        for (i = 0; i < 10; i++){
                bcm43xx_phy_write(bcm, 0x0015, 0xAFA0);
                udelay(1);
@@ -1309,6 +1313,8 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm)
                udelay(40);
                ret += bcm43xx_phy_read(bcm, 0x002C);
        }
+       local_irq_restore(flags);
+       bcm43xx_voluntary_preempt();
 
        return ret;
 }
@@ -1435,6 +1441,7 @@ u16 bcm43xx_phy_lo_g_deviation_subval(struct bcm43xx_private *bcm, u16 control)
        }
        ret = bcm43xx_phy_read(bcm, 0x002D);
        local_irq_restore(flags);
+       bcm43xx_voluntary_preempt();
 
        return ret;
 }
@@ -1760,6 +1767,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
                        bcm43xx_radio_write16(bcm, 0x43, i);
                        bcm43xx_radio_write16(bcm, 0x52, radio->txctl2);
                        udelay(10);
+                       bcm43xx_voluntary_preempt();
 
                        bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
 
@@ -1803,6 +1811,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
                                              radio->txctl2
                                              | (3/*txctl1*/ << 4));//FIXME: shouldn't txctl1 be zero here and 3 in the loop above?
                        udelay(10);
+                       bcm43xx_voluntary_preempt();
 
                        bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
 
@@ -1824,6 +1833,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
                bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA2);
                udelay(2);
                bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA3);
+               bcm43xx_voluntary_preempt();
        } else
                bcm43xx_phy_write(bcm, 0x0015, r27 | 0xEFA0);
        bcm43xx_phy_lo_adjust(bcm, is_initializing);
@@ -2188,12 +2198,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm)
 {
        struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
        int err = -ENODEV;
-       unsigned long flags;
-
-       /* We do not want to be preempted while calibrating
-        * the hardware.
-        */
-       local_irq_save(flags);
 
        switch (phy->type) {
        case BCM43xx_PHYTYPE_A:
@@ -2227,7 +2231,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm)
                err = 0;
                break;
        }
-       local_irq_restore(flags);
        if (err)
                printk(KERN_WARNING PFX "Unknown PHYTYPE found!\n");
 
index 574085c..c60c174 100644 (file)
@@ -262,7 +262,7 @@ static void tx_tasklet(unsigned long d)
        int err;
        u16 txctl;
 
-       bcm43xx_lock_irqonly(bcm, flags);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
 
        if (queue->tx_frozen)
                goto out_unlock;
@@ -300,7 +300,7 @@ static void tx_tasklet(unsigned long d)
                continue;
        }
 out_unlock:
-       bcm43xx_unlock_irqonly(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
 }
 
 static void setup_txqueues(struct bcm43xx_pioqueue *queue)
index 6a23bdc..c71b998 100644 (file)
@@ -120,12 +120,14 @@ static ssize_t bcm43xx_attr_sprom_show(struct device *dev,
                        GFP_KERNEL);
        if (!sprom)
                return -ENOMEM;
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        err = bcm43xx_sprom_read(bcm, sprom);
        if (!err)
                err = sprom2hex(sprom, buf, PAGE_SIZE);
        mmiowb();
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
        kfree(sprom);
 
        return err;
@@ -150,10 +152,14 @@ static ssize_t bcm43xx_attr_sprom_store(struct device *dev,
        err = hex2sprom(sprom, buf, count);
        if (err)
                goto out_kfree;
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
+       spin_lock(&bcm->leds_lock);
        err = bcm43xx_sprom_write(bcm, sprom);
        mmiowb();
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock(&bcm->leds_lock);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 out_kfree:
        kfree(sprom);
 
@@ -170,13 +176,12 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
                                            char *buf)
 {
        struct bcm43xx_private *bcm = dev_to_bcm(dev);
-       int err;
        ssize_t count = 0;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
-       bcm43xx_lock_noirq(bcm);
+       mutex_lock(&bcm->mutex);
 
        switch (bcm43xx_current_radio(bcm)->interfmode) {
        case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -191,11 +196,10 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
        default:
                assert(0);
        }
-       err = 0;
 
-       bcm43xx_unlock_noirq(bcm);
+       mutex_unlock(&bcm->mutex);
 
-       return err ? err : count;
+       return count;
 
 }
 
@@ -229,7 +233,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
                return -EINVAL;
        }
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
 
        err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
        if (err) {
@@ -237,7 +242,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
                                    "supported by device\n");
        }
        mmiowb();
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err ? err : count;
 }
@@ -251,23 +257,21 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
                                          char *buf)
 {
        struct bcm43xx_private *bcm = dev_to_bcm(dev);
-       int err;
        ssize_t count;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
-       bcm43xx_lock_noirq(bcm);
+       mutex_lock(&bcm->mutex);
 
        if (bcm->short_preamble)
                count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n");
        else
                count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n");
 
-       err = 0;
-       bcm43xx_unlock_noirq(bcm);
+       mutex_unlock(&bcm->mutex);
 
-       return err ? err : count;
+       return count;
 }
 
 static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
@@ -276,7 +280,6 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
 {
        struct bcm43xx_private *bcm = dev_to_bcm(dev);
        unsigned long flags;
-       int err;
        int value;
 
        if (!capable(CAP_NET_ADMIN))
@@ -285,20 +288,141 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
        value = get_boolean(buf, count);
        if (value < 0)
                return value;
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
 
        bcm->short_preamble = !!value;
 
-       err = 0;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
-       return err ? err : count;
+       return count;
 }
 
 static DEVICE_ATTR(shortpreamble, 0644,
                   bcm43xx_attr_preamble_show,
                   bcm43xx_attr_preamble_store);
 
+static ssize_t bcm43xx_attr_phymode_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       struct bcm43xx_private *bcm = dev_to_bcm(dev);
+       int phytype;
+       int err = -EINVAL;
+
+       if (count < 1)
+               goto out;
+       switch (buf[0]) {
+       case 'a':  case 'A':
+               phytype = BCM43xx_PHYTYPE_A;
+               break;
+       case 'b':  case 'B':
+               phytype = BCM43xx_PHYTYPE_B;
+               break;
+       case 'g':  case 'G':
+               phytype = BCM43xx_PHYTYPE_G;
+               break;
+       default:
+               goto out;
+       }
+
+       bcm43xx_periodic_tasks_delete(bcm);
+       mutex_lock(&(bcm)->mutex);
+       err = bcm43xx_select_wireless_core(bcm, phytype);
+       if (!err)
+               bcm43xx_periodic_tasks_setup(bcm);
+       mutex_unlock(&(bcm)->mutex);
+       if (err == -ESRCH)
+               err = -ENODEV;
+
+out:
+       return err ? err : count;
+}
+
+static ssize_t bcm43xx_attr_phymode_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct bcm43xx_private *bcm = dev_to_bcm(dev);
+       ssize_t count = 0;
+
+       mutex_lock(&(bcm)->mutex);
+       switch (bcm43xx_current_phy(bcm)->type) {
+       case BCM43xx_PHYTYPE_A:
+               snprintf(buf, PAGE_SIZE, "A");
+               break;
+       case BCM43xx_PHYTYPE_B:
+               snprintf(buf, PAGE_SIZE, "B");
+               break;
+       case BCM43xx_PHYTYPE_G:
+               snprintf(buf, PAGE_SIZE, "G");
+               break;
+       default:
+               assert(0);
+       }
+       mutex_unlock(&(bcm)->mutex);
+
+       return count;
+}
+
+static DEVICE_ATTR(phymode, 0644,
+                  bcm43xx_attr_phymode_show,
+                  bcm43xx_attr_phymode_store);
+
+static ssize_t bcm43xx_attr_microcode_show(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       unsigned long flags;
+       struct bcm43xx_private *bcm = dev_to_bcm(dev);
+       ssize_t count = 0;
+       u16 status;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       mutex_lock(&(bcm)->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
+       status = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
+                                   BCM43xx_UCODE_STATUS);
+
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&(bcm)->mutex);
+       switch (status) {
+       case 0x0000:
+               count = snprintf(buf, PAGE_SIZE, "0x%.4x (invalid)\n",
+                                status);
+               break;
+       case 0x0001:
+               count = snprintf(buf, PAGE_SIZE, "0x%.4x (init)\n",
+                                status);
+               break;
+       case 0x0002:
+               count = snprintf(buf, PAGE_SIZE, "0x%.4x (active)\n",
+                                status);
+               break;
+       case 0x0003:
+               count = snprintf(buf, PAGE_SIZE, "0x%.4x (suspended)\n",
+                                status);
+               break;
+       case 0x0004:
+               count = snprintf(buf, PAGE_SIZE, "0x%.4x (asleep)\n",
+                                status);
+               break;
+       default:
+               count = snprintf(buf, PAGE_SIZE, "0x%.4x (unknown)\n",
+                                status);
+               break;
+       }
+
+       return count;
+}
+
+static DEVICE_ATTR(microcodestatus, 0444,
+                  bcm43xx_attr_microcode_show,
+                  NULL);
+
 int bcm43xx_sysfs_register(struct bcm43xx_private *bcm)
 {
        struct device *dev = &bcm->pci_dev->dev;
@@ -315,9 +439,19 @@ int bcm43xx_sysfs_register(struct bcm43xx_private *bcm)
        err = device_create_file(dev, &dev_attr_shortpreamble);
        if (err)
                goto err_remove_interfmode;
+       err = device_create_file(dev, &dev_attr_phymode);
+       if (err)
+               goto err_remove_shortpreamble;
+       err = device_create_file(dev, &dev_attr_microcodestatus);
+       if (err)
+               goto err_remove_phymode;
 
 out:
        return err;
+err_remove_phymode:
+       device_remove_file(dev, &dev_attr_phymode);
+err_remove_shortpreamble:
+       device_remove_file(dev, &dev_attr_shortpreamble);
 err_remove_interfmode:
        device_remove_file(dev, &dev_attr_interference);
 err_remove_sprom:
@@ -329,6 +463,8 @@ void bcm43xx_sysfs_unregister(struct bcm43xx_private *bcm)
 {
        struct device *dev = &bcm->pci_dev->dev;
 
+       device_remove_file(dev, &dev_attr_microcodestatus);
+       device_remove_file(dev, &dev_attr_phymode);
        device_remove_file(dev, &dev_attr_shortpreamble);
        device_remove_file(dev, &dev_attr_interference);
        device_remove_file(dev, &dev_attr_sprom);
index 5c36e29..888077f 100644 (file)
@@ -47,6 +47,8 @@
 #define BCM43xx_WX_VERSION     18
 
 #define MAX_WX_STRING          80
+/* FIXME: the next line is a guess as to what the maximum RSSI value might be */
+#define RX_RSSI_MAX            60
 
 
 static int bcm43xx_wx_get_name(struct net_device *net_dev,
@@ -56,12 +58,11 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        int i;
-       unsigned long flags;
        struct bcm43xx_phyinfo *phy;
        char suffix[7] = { 0 };
        int have_a = 0, have_b = 0, have_g = 0;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        for (i = 0; i < bcm->nr_80211_available; i++) {
                phy = &(bcm->core_80211_ext[i].phy);
                switch (phy->type) {
@@ -77,7 +78,7 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
                        assert(0);
                }
        }
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        i = 0;
        if (have_a) {
@@ -111,7 +112,9 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
        int freq;
        int err = -EINVAL;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
+
        if ((data->freq.m >= 0) && (data->freq.m <= 1000)) {
                channel = data->freq.m;
                freq = bcm43xx_channel_to_freq(bcm, channel);
@@ -131,7 +134,8 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
                err = 0;
        }
 out_unlock:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err;
 }
@@ -143,11 +147,10 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        struct bcm43xx_radioinfo *radio;
-       unsigned long flags;
        int err = -ENODEV;
        u16 channel;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        radio = bcm43xx_current_radio(bcm);
        channel = radio->channel;
        if (channel == 0xFF) {
@@ -162,7 +165,7 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
 
        err = 0;
 out_unlock:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err;
 }
@@ -180,13 +183,15 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
        if (mode == IW_MODE_AUTO)
                mode = BCM43xx_INITIAL_IWMODE;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
                if (bcm->ieee->iw_mode != mode)
                        bcm43xx_set_iwmode(bcm, mode);
        } else
                bcm->ieee->iw_mode = mode;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -197,11 +202,10 @@ static int bcm43xx_wx_get_mode(struct net_device *net_dev,
                               char *extra)
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
-       unsigned long flags;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        data->mode = bcm->ieee->iw_mode;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -214,7 +218,6 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        struct iw_range *range = (struct iw_range *)extra;
        const struct ieee80211_geo *geo;
-       unsigned long flags;
        int i, j;
        struct bcm43xx_phyinfo *phy;
 
@@ -226,15 +229,14 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
        range->throughput = 27 * 1000 * 1000;
 
        range->max_qual.qual = 100;
-       /* TODO: Real max RSSI */
-       range->max_qual.level = 3;
-       range->max_qual.noise = 100;
-       range->max_qual.updated = 7;
+       range->max_qual.level = 146; /* set floor at -110 dBm (146 - 256) */
+       range->max_qual.noise = 146;
+       range->max_qual.updated = IW_QUAL_ALL_UPDATED;
 
-       range->avg_qual.qual = 70;
-       range->avg_qual.level = 2;
-       range->avg_qual.noise = 40;
-       range->avg_qual.updated = 7;
+       range->avg_qual.qual = 50;
+       range->avg_qual.level = 0;
+       range->avg_qual.noise = 0;
+       range->avg_qual.updated = IW_QUAL_ALL_UPDATED;
 
        range->min_rts = BCM43xx_MIN_RTS_THRESHOLD;
        range->max_rts = BCM43xx_MAX_RTS_THRESHOLD;
@@ -254,7 +256,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
                          IW_ENC_CAPA_CIPHER_TKIP |
                          IW_ENC_CAPA_CIPHER_CCMP;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        phy = bcm43xx_current_phy(bcm);
 
        range->num_bitrates = 0;
@@ -301,7 +303,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
        }
        range->num_frequency = j;
 
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -314,11 +316,11 @@ static int bcm43xx_wx_set_nick(struct net_device *net_dev,
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        size_t len;
 
-       bcm43xx_lock_noirq(bcm);
+       mutex_lock(&bcm->mutex);
        len =  min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE);
        memcpy(bcm->nick, extra, len);
        bcm->nick[len] = '\0';
-       bcm43xx_unlock_noirq(bcm);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -331,12 +333,12 @@ static int bcm43xx_wx_get_nick(struct net_device *net_dev,
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        size_t len;
 
-       bcm43xx_lock_noirq(bcm);
+       mutex_lock(&bcm->mutex);
        len = strlen(bcm->nick) + 1;
        memcpy(extra, bcm->nick, len);
        data->data.length = (__u16)len;
        data->data.flags = 1;
-       bcm43xx_unlock_noirq(bcm);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -350,7 +352,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
        unsigned long flags;
        int err = -EINVAL;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (data->rts.disabled) {
                bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD;
                err = 0;
@@ -361,7 +364,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
                        err = 0;
                }
        }
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err;
 }
@@ -372,13 +376,12 @@ static int bcm43xx_wx_get_rts(struct net_device *net_dev,
                              char *extra)
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
-       unsigned long flags;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        data->rts.value = bcm->rts_threshold;
        data->rts.fixed = 0;
        data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD);
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -392,7 +395,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
        unsigned long flags;
        int err = -EINVAL;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (data->frag.disabled) {
                bcm->ieee->fts = MAX_FRAG_THRESHOLD;
                err = 0;
@@ -403,7 +407,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
                        err = 0;
                }
        }
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err;
 }
@@ -414,13 +419,12 @@ static int bcm43xx_wx_get_frag(struct net_device *net_dev,
                               char *extra)
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
-       unsigned long flags;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        data->frag.value = bcm->ieee->fts;
        data->frag.fixed = 0;
        data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD);
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -442,7 +446,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
                return -EOPNOTSUPP;
        }
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
                goto out_unlock;
        radio = bcm43xx_current_radio(bcm);
@@ -466,7 +471,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
        err = 0;
 
 out_unlock:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err;
 }
@@ -478,10 +484,9 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        struct bcm43xx_radioinfo *radio;
-       unsigned long flags;
        int err = -ENODEV;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
                goto out_unlock;
        radio = bcm43xx_current_radio(bcm);
@@ -493,7 +498,7 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
 
        err = 0;
 out_unlock:
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err;
 }
@@ -580,7 +585,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
                return -EINVAL;
        }
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
                err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
                if (err) {
@@ -595,7 +601,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
                } else
                        bcm43xx_current_radio(bcm)->interfmode = mode;
        }
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return err;
 }
@@ -606,12 +613,11 @@ static int bcm43xx_wx_get_interfmode(struct net_device *net_dev,
                                     char *extra)
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
-       unsigned long flags;
        int mode;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        mode = bcm43xx_current_radio(bcm)->interfmode;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        switch (mode) {
        case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -641,9 +647,11 @@ static int bcm43xx_wx_set_shortpreamble(struct net_device *net_dev,
        int on;
 
        on = *((int *)extra);
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        bcm->short_preamble = !!on;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -654,12 +662,11 @@ static int bcm43xx_wx_get_shortpreamble(struct net_device *net_dev,
                                        char *extra)
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
-       unsigned long flags;
        int on;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        on = bcm->short_preamble;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        if (on)
                strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING);
@@ -681,11 +688,13 @@ static int bcm43xx_wx_set_swencryption(struct net_device *net_dev,
        
        on = *((int *)extra);
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        bcm->ieee->host_encrypt = !!on;
        bcm->ieee->host_decrypt = !!on;
        bcm->ieee->host_build_iv = !on;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 
        return 0;
 }
@@ -696,12 +705,11 @@ static int bcm43xx_wx_get_swencryption(struct net_device *net_dev,
                                       char *extra)
 {
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
-       unsigned long flags;
        int on;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
        on = bcm->ieee->host_encrypt;
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       mutex_unlock(&bcm->mutex);
 
        if (on)
                strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING);
@@ -764,11 +772,13 @@ static int bcm43xx_wx_sprom_read(struct net_device *net_dev,
        if (!sprom)
                goto out;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
        err = -ENODEV;
        if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
                err = bcm43xx_sprom_read(bcm, sprom);
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
        if (!err)
                data->data.length = sprom2hex(sprom, extra);
        kfree(sprom);
@@ -809,11 +819,15 @@ static int bcm43xx_wx_sprom_write(struct net_device *net_dev,
        if (err)
                goto out_kfree;
 
-       bcm43xx_lock_irqsafe(bcm, flags);
+       mutex_lock(&bcm->mutex);
+       spin_lock_irqsave(&bcm->irq_lock, flags);
+       spin_lock(&bcm->leds_lock);
        err = -ENODEV;
        if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
                err = bcm43xx_sprom_write(bcm, sprom);
-       bcm43xx_unlock_irqsafe(bcm, flags);
+       spin_unlock(&bcm->leds_lock);
+       spin_unlock_irqrestore(&bcm->irq_lock, flags);
+       mutex_unlock(&bcm->mutex);
 out_kfree:
        kfree(sprom);
 out:
@@ -827,6 +841,10 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d
        struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
        struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
        struct iw_statistics *wstats;
+       struct ieee80211_network *network = NULL;
+       static int tmp_level = 0;
+       static int tmp_qual = 0;
+       unsigned long flags;
 
        wstats = &bcm->stats.wstats;
        if (!mac->associated) {
@@ -844,16 +862,28 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d
                wstats->qual.level = 0;
                wstats->qual.noise = 0;
                wstats->qual.updated = 7;
-               wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
-                       IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
+               wstats->qual.updated |= IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
                return wstats;
        }
        /* fill in the real statistics when iface associated */
-       wstats->qual.qual = 100;     // TODO: get the real signal quality
-       wstats->qual.level = 3 - bcm->stats.link_quality;
+       spin_lock_irqsave(&mac->ieee->lock, flags);
+       list_for_each_entry(network, &mac->ieee->network_list, list) {
+               if (!memcmp(mac->associnfo.bssid, network->bssid, ETH_ALEN)) {
+                       if (!tmp_level) {       /* get initial values */
+                               tmp_level = network->stats.signal;
+                               tmp_qual = network->stats.rssi;
+                       } else {                /* smooth results */
+                               tmp_level = (15 * tmp_level + network->stats.signal)/16;
+                               tmp_qual = (15 * tmp_qual + network->stats.rssi)/16;
+                       }
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&mac->ieee->lock, flags);
+       wstats->qual.level = tmp_level;
+       wstats->qual.qual = 100 * tmp_qual / RX_RSSI_MAX;
        wstats->qual.noise = bcm->stats.noise;
-       wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
-                       IW_QUAL_NOISE_UPDATED;
+       wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
        wstats->discard.code = bcm->ieee->ieee_stats.rx_discards_undecryptable;
        wstats->discard.retries = bcm->ieee->ieee_stats.tx_retry_limit_exceeded;
        wstats->discard.nwid = bcm->ieee->ieee_stats.tx_discards_wrong_sa;
index 6dbd855..c0efbfe 100644 (file)
@@ -492,16 +492,15 @@ int bcm43xx_rx(struct bcm43xx_private *bcm,
 
        memset(&stats, 0, sizeof(stats));
        stats.mac_time = le16_to_cpu(rxhdr->mactime);
-       stats.rssi = bcm43xx_rssi_postprocess(bcm, rxhdr->rssi, is_ofdm,
+       stats.rssi = rxhdr->rssi;
+       stats.signal = bcm43xx_rssi_postprocess(bcm, rxhdr->rssi, is_ofdm,
                                              !!(rxflags1 & BCM43xx_RXHDR_FLAGS1_2053RSSIADJ),
                                              !!(rxflags3 & BCM43xx_RXHDR_FLAGS3_2050RSSIADJ));
-       stats.signal = rxhdr->signal_quality;   //FIXME
 //TODO stats.noise = 
        if (is_ofdm)
                stats.rate = bcm43xx_plcp_get_bitrate_ofdm(plcp);
        else
                stats.rate = bcm43xx_plcp_get_bitrate_cck(plcp);
-//printk("RX ofdm %d, rate == %u\n", is_ofdm, stats.rate);
        stats.received_channel = radio->channel;
 //TODO stats.control = 
        stats.mask = IEEE80211_STATMASK_SIGNAL |
index 52e6df5..686d895 100644 (file)
@@ -847,6 +847,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
        PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
        PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
        PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
+       PCMCIA_DEVICE_MANF_CARD(0x0126, 0x0002),
        PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL",
                                         0x74c5e40d),
        PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
index e955db4..d2db8eb 100644 (file)
@@ -6254,13 +6254,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
         * member to call a function that then just turns and calls ipw2100_up.
         * net_dev->init is called after name allocation but before the
         * notifier chain is called */
-       mutex_lock(&priv->action_mutex);
        err = register_netdev(dev);
        if (err) {
                printk(KERN_WARNING DRV_NAME
                       "Error calling register_netdev.\n");
-               goto fail_unlock;
+               goto fail;
        }
+
+       mutex_lock(&priv->action_mutex);
        registered = 1;
 
        IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
@@ -6531,7 +6532,7 @@ static int __init ipw2100_init(void)
        printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
        printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
 
-       ret = pci_module_init(&ipw2100_pci_driver);
+       ret = pci_register_driver(&ipw2100_pci_driver);
 
 #ifdef CONFIG_IPW2100_DEBUG
        ipw2100_debug_level = debug;
index b3300ff..f29ec0e 100644 (file)
@@ -70,7 +70,7 @@
 #define VQ
 #endif
 
-#define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
+#define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ
 #define DRV_DESCRIPTION        "Intel(R) PRO/Wireless 2200/2915 Network Driver"
 #define DRV_COPYRIGHT  "Copyright(c) 2003-2006 Intel Corporation"
 #define DRV_VERSION     IPW2200_VERSION
@@ -83,9 +83,7 @@ MODULE_AUTHOR(DRV_COPYRIGHT);
 MODULE_LICENSE("GPL");
 
 static int cmdlog = 0;
-#ifdef CONFIG_IPW2200_DEBUG
 static int debug = 0;
-#endif
 static int channel = 0;
 static int mode = 0;
 
@@ -567,7 +565,6 @@ static inline void ipw_disable_interrupts(struct ipw_priv *priv)
        spin_unlock_irqrestore(&priv->irq_lock, flags);
 }
 
-#ifdef CONFIG_IPW2200_DEBUG
 static char *ipw_error_desc(u32 val)
 {
        switch (val) {
@@ -634,7 +631,6 @@ static void ipw_dump_error_log(struct ipw_priv *priv,
                          error->log[i].time,
                          error->log[i].data, error->log[i].event);
 }
-#endif
 
 static inline int ipw_is_init(struct ipw_priv *priv)
 {
@@ -1435,9 +1431,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
                              const char *buf, size_t count)
 {
        struct ipw_priv *priv = dev_get_drvdata(d);
-#ifdef CONFIG_IPW2200_DEBUG
        struct net_device *dev = priv->net_dev;
-#endif
        char buffer[] = "00000000";
        unsigned long len =
            (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
@@ -1958,14 +1952,12 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
                IPW_WARNING("Firmware error detected.  Restarting.\n");
                if (priv->error) {
                        IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
-#ifdef CONFIG_IPW2200_DEBUG
                        if (ipw_debug_level & IPW_DL_FW_ERRORS) {
                                struct ipw_fw_error *error =
                                    ipw_alloc_error_log(priv);
                                ipw_dump_error_log(priv, error);
                                kfree(error);
                        }
-#endif
                } else {
                        priv->error = ipw_alloc_error_log(priv);
                        if (priv->error)
@@ -1973,10 +1965,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
                        else
                                IPW_DEBUG_FW("Error allocating sysfs 'error' "
                                             "log.\n");
-#ifdef CONFIG_IPW2200_DEBUG
                        if (ipw_debug_level & IPW_DL_FW_ERRORS)
                                ipw_dump_error_log(priv, priv->error);
-#endif
                }
 
                /* XXX: If hardware encryption is for WPA/WPA2,
@@ -2287,7 +2277,7 @@ static int ipw_send_scan_abort(struct ipw_priv *priv)
 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
 {
        struct ipw_sensitivity_calib calib = {
-               .beacon_rssi_raw = sens,
+               .beacon_rssi_raw = cpu_to_le16(sens),
        };
 
        return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
@@ -2353,6 +2343,7 @@ static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
                return -1;
        }
 
+       phy_off = cpu_to_le32(phy_off);
        return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
                                &phy_off);
 }
@@ -2414,7 +2405,7 @@ static int ipw_set_tx_power(struct ipw_priv *priv)
 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
 {
        struct ipw_rts_threshold rts_threshold = {
-               .rts_threshold = rts,
+               .rts_threshold = cpu_to_le16(rts),
        };
 
        if (!priv) {
@@ -2429,7 +2420,7 @@ static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
 {
        struct ipw_frag_threshold frag_threshold = {
-               .frag_threshold = frag,
+               .frag_threshold = cpu_to_le16(frag),
        };
 
        if (!priv) {
@@ -2464,6 +2455,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
                break;
        }
 
+       param = cpu_to_le32(mode);
        return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
                                &param);
 }
@@ -2667,7 +2659,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
 
        IPW_DEBUG_FW(">> :\n");
 
-       //set the Stop and Abort bit
+       /* set the Stop and Abort bit */
        control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
        ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
        priv->sram_desc.last_cb_index = 0;
@@ -3002,8 +2994,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
        if (rc < 0)
                return rc;
 
-//      spin_lock_irqsave(&priv->lock, flags);
-
        for (addr = IPW_SHARED_LOWER_BOUND;
             addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
                ipw_write32(priv, addr, 0);
@@ -3097,8 +3087,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
           firmware have problem getting alive resp. */
        ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
 
-//      spin_unlock_irqrestore(&priv->lock, flags);
-
        return rc;
 }
 
@@ -3919,7 +3907,6 @@ static const struct ipw_status_code ipw_status_codes[] = {
        {0x2E, "Cipher suite is rejected per security policy"},
 };
 
-#ifdef CONFIG_IPW2200_DEBUG
 static const char *ipw_get_status_code(u16 status)
 {
        int i;
@@ -3928,7 +3915,6 @@ static const char *ipw_get_status_code(u16 status)
                        return ipw_status_codes[i].reason;
        return "Unknown status value.";
 }
-#endif
 
 static void inline average_init(struct average *avg)
 {
@@ -4398,7 +4384,6 @@ static void ipw_rx_notification(struct ipw_priv *priv,
                                        if (priv->
                                            status & (STATUS_ASSOCIATED |
                                                      STATUS_AUTH)) {
-#ifdef CONFIG_IPW2200_DEBUG
                                                struct notif_authenticate *auth
                                                    = &notif->u.auth;
                                                IPW_DEBUG(IPW_DL_NOTIF |
@@ -4416,7 +4401,6 @@ static void ipw_rx_notification(struct ipw_priv *priv,
                                                          ipw_get_status_code
                                                          (ntohs
                                                           (auth->status)));
-#endif
 
                                                priv->status &=
                                                    ~(STATUS_ASSOCIATING |
@@ -5059,7 +5043,6 @@ static void ipw_rx_queue_replenish(void *data)
                }
                list_del(element);
 
-               rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
                rxb->dma_addr =
                    pci_map_single(priv->pci_dev, rxb->skb->data,
                                   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
@@ -5838,8 +5821,8 @@ static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
        key.station_index = 0;  /* always 0 for BSS */
        key.flags = 0;
        /* 0 for new key; previous value of counter (after fatal error) */
-       key.tx_counter[0] = 0;
-       key.tx_counter[1] = 0;
+       key.tx_counter[0] = cpu_to_le32(0);
+       key.tx_counter[1] = cpu_to_le32(0);
 
        ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
 }
@@ -5973,7 +5956,6 @@ static void ipw_bg_adhoc_check(void *data)
        mutex_unlock(&priv->mutex);
 }
 
-#ifdef CONFIG_IPW2200_DEBUG
 static void ipw_debug_config(struct ipw_priv *priv)
 {
        IPW_DEBUG_INFO("Scan completed, no valid APs matched "
@@ -5998,9 +5980,6 @@ static void ipw_debug_config(struct ipw_priv *priv)
                IPW_DEBUG_INFO("PRIVACY off\n");
        IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
 }
-#else
-#define ipw_debug_config(x) do {} while (0)
-#endif
 
 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
 {
@@ -6188,7 +6167,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
        }
 }
 
-static int ipw_request_scan(struct ipw_priv *priv)
+static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
 {
        struct ipw_scan_request_ext scan;
        int err = 0, scan_type;
@@ -6219,19 +6198,29 @@ static int ipw_request_scan(struct ipw_priv *priv)
        }
 
        memset(&scan, 0, sizeof(scan));
+       scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
 
-       if (priv->config & CFG_SPEED_SCAN)
+       if (type == IW_SCAN_TYPE_PASSIVE) {
+               IPW_DEBUG_WX("use passive scanning\n");
+               scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
+               scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
+                       cpu_to_le16(120);
+               ipw_add_scan_channels(priv, &scan, scan_type);
+               goto send_request;
+       }
+
+       /* Use active scan by default. */
+       if (priv->config & CFG_SPEED_SCAN)
                scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
-                   cpu_to_le16(30);
+                       cpu_to_le16(30);
        else
                scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
-                   cpu_to_le16(20);
+                       cpu_to_le16(20);
 
        scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
-           cpu_to_le16(20);
-       scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
+               cpu_to_le16(20);
 
-       scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
+       scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
 
 #ifdef CONFIG_IPW2200_MONITOR
        if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
@@ -6268,7 +6257,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
                 *
                 * TODO: Move SPEED SCAN support to all modes and bands */
                scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
-                   cpu_to_le16(2000);
+                       cpu_to_le16(2000);
        } else {
 #endif                         /* CONFIG_IPW2200_MONITOR */
                /* If we are roaming, then make this a directed scan for the
@@ -6294,6 +6283,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
        }
 #endif
 
+send_request:
        err = ipw_send_scan_request_ext(priv, &scan);
        if (err) {
                IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
@@ -6304,11 +6294,19 @@ static int ipw_request_scan(struct ipw_priv *priv)
        priv->status &= ~STATUS_SCAN_PENDING;
        queue_delayed_work(priv->workqueue, &priv->scan_check,
                           IPW_SCAN_CHECK_WATCHDOG);
-      done:
+done:
        mutex_unlock(&priv->mutex);
        return err;
 }
 
+static int ipw_request_passive_scan(struct ipw_priv *priv) {
+       return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
+}
+
+static int ipw_request_scan(struct ipw_priv *priv) {
+       return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
+}
+
 static void ipw_bg_abort_scan(void *data)
 {
        struct ipw_priv *priv = data;
@@ -6387,13 +6385,6 @@ static int ipw_wx_set_genie(struct net_device *dev,
            (wrqu->data.length && extra == NULL))
                return -EINVAL;
 
-       //mutex_lock(&priv->mutex);
-
-       //if (!ieee->wpa_enabled) {
-       //      err = -EOPNOTSUPP;
-       //      goto out;
-       //}
-
        if (wrqu->data.length) {
                buf = kmalloc(wrqu->data.length, GFP_KERNEL);
                if (buf == NULL) {
@@ -6413,7 +6404,6 @@ static int ipw_wx_set_genie(struct net_device *dev,
 
        ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
       out:
-       //mutex_unlock(&priv->mutex);
        return err;
 }
 
@@ -6426,13 +6416,6 @@ static int ipw_wx_get_genie(struct net_device *dev,
        struct ieee80211_device *ieee = priv->ieee;
        int err = 0;
 
-       //mutex_lock(&priv->mutex);
-
-       //if (!ieee->wpa_enabled) {
-       //      err = -EOPNOTSUPP;
-       //      goto out;
-       //}
-
        if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
                wrqu->data.length = 0;
                goto out;
@@ -6447,7 +6430,6 @@ static int ipw_wx_get_genie(struct net_device *dev,
        memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
 
       out:
-       //mutex_unlock(&priv->mutex);
        return err;
 }
 
@@ -6558,7 +6540,6 @@ static int ipw_wx_set_auth(struct net_device *dev,
                ieee->ieee802_1x = param->value;
                break;
 
-               //case IW_AUTH_ROAMING_CONTROL:
        case IW_AUTH_PRIVACY_INVOKED:
                ieee->privacy_invoked = param->value;
                break;
@@ -6680,7 +6661,7 @@ static int ipw_wx_set_mlme(struct net_device *dev,
 
        switch (mlme->cmd) {
        case IW_MLME_DEAUTH:
-               // silently ignore
+               /* silently ignore */
                break;
 
        case IW_MLME_DISASSOC:
@@ -6811,7 +6792,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
                burst_duration = ipw_qos_get_burst_duration(priv);
                for (i = 0; i < QOS_QUEUE_NUM; i++)
                        qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
-                           (u16) burst_duration;
+                           (u16)burst_duration;
        } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
                if (type == IEEE_B) {
                        IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
@@ -6843,11 +6824,20 @@ static int ipw_qos_activate(struct ipw_priv *priv,
                        burst_duration = ipw_qos_get_burst_duration(priv);
                        for (i = 0; i < QOS_QUEUE_NUM; i++)
                                qos_parameters[QOS_PARAM_SET_ACTIVE].
-                                   tx_op_limit[i] = (u16) burst_duration;
+                                   tx_op_limit[i] = (u16)burst_duration;
                }
        }
 
        IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
+       for (i = 0; i < 3; i++) {
+               int j;
+               for (j = 0; j < QOS_QUEUE_NUM; j++) {
+                       qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
+                       qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
+                       qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
+               }
+       }
+
        err = ipw_send_qos_params_command(priv,
                                          (struct ieee80211_qos_parameters *)
                                          &(qos_parameters[0]));
@@ -7086,7 +7076,7 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
 
        if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
                tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
-               tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
+               tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
        }
        return 0;
 }
@@ -7667,7 +7657,6 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
        /* Big bitfield of all the fields we provide in radiotap */
        ipw_rt->rt_hdr.it_present =
            ((1 << IEEE80211_RADIOTAP_FLAGS) |
-            (1 << IEEE80211_RADIOTAP_TSFT) |
             (1 << IEEE80211_RADIOTAP_RATE) |
             (1 << IEEE80211_RADIOTAP_CHANNEL) |
             (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7676,6 +7665,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
 
        /* Zero the flags, we'll add to them as we go */
        ipw_rt->rt_flags = 0;
+       ipw_rt->rt_tsf = 0ULL;
 
        /* Convert signal to DBM */
        ipw_rt->rt_dbmsignal = antsignal;
@@ -7794,7 +7784,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
        s8 noise = frame->noise;
        u8 rate = frame->rate;
        short len = le16_to_cpu(pkt->u.frame.length);
-       u64 tsf = 0;
        struct sk_buff *skb;
        int hdr_only = 0;
        u16 filter = priv->prom_priv->filter;
@@ -7829,17 +7818,17 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
        }
 
        hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
-       if (ieee80211_is_management(hdr->frame_ctl)) {
+       if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
                if (filter & IPW_PROM_NO_MGMT)
                        return;
                if (filter & IPW_PROM_MGMT_HEADER_ONLY)
                        hdr_only = 1;
-       } else if (ieee80211_is_control(hdr->frame_ctl)) {
+       } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
                if (filter & IPW_PROM_NO_CTL)
                        return;
                if (filter & IPW_PROM_CTL_HEADER_ONLY)
                        hdr_only = 1;
-       } else if (ieee80211_is_data(hdr->frame_ctl)) {
+       } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
                if (filter & IPW_PROM_NO_DATA)
                        return;
                if (filter & IPW_PROM_DATA_HEADER_ONLY)
@@ -7857,7 +7846,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
        ipw_rt = (void *)skb->data;
 
        if (hdr_only)
-               len = ieee80211_get_hdrlen(hdr->frame_ctl);
+               len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
 
        memcpy(ipw_rt->payload, hdr, len);
 
@@ -7880,7 +7869,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
        /* Big bitfield of all the fields we provide in radiotap */
        ipw_rt->rt_hdr.it_present =
            ((1 << IEEE80211_RADIOTAP_FLAGS) |
-            (1 << IEEE80211_RADIOTAP_TSFT) |
             (1 << IEEE80211_RADIOTAP_RATE) |
             (1 << IEEE80211_RADIOTAP_CHANNEL) |
             (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7889,8 +7877,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
 
        /* Zero the flags, we'll add to them as we go */
        ipw_rt->rt_flags = 0;
-
-       ipw_rt->rt_tsf = tsf;
+       ipw_rt->rt_tsf = 0ULL;
 
        /* Convert to DBM */
        ipw_rt->rt_dbmsignal = signal;
@@ -8163,8 +8150,7 @@ static void ipw_rx(struct ipw_priv *priv)
                switch (pkt->header.message_type) {
                case RX_FRAME_TYPE:     /* 802.11 frame */  {
                                struct ieee80211_rx_stats stats = {
-                                       .rssi =
-                                           le16_to_cpu(pkt->u.frame.rssi_dbm) -
+                                       .rssi = pkt->u.frame.rssi_dbm -
                                            IPW_RSSI_TO_DBM,
                                        .signal =
                                            le16_to_cpu(pkt->u.frame.rssi_dbm) -
@@ -8599,9 +8585,26 @@ static int ipw_wx_get_freq(struct net_device *dev,
         * configured CHANNEL then return that; otherwise return ANY */
        mutex_lock(&priv->mutex);
        if (priv->config & CFG_STATIC_CHANNEL ||
-           priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
-               wrqu->freq.m = priv->channel;
-       else
+           priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
+               int i;
+
+               i = ieee80211_channel_to_index(priv->ieee, priv->channel);
+               BUG_ON(i == -1);
+               wrqu->freq.e = 1;
+
+               switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
+               case IEEE80211_52GHZ_BAND:
+                       wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
+                       break;
+
+               case IEEE80211_24GHZ_BAND:
+                       wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
+                       break;
+
+               default:
+                       BUG();
+               }
+       } else
                wrqu->freq.m = 0;
 
        mutex_unlock(&priv->mutex);
@@ -8857,42 +8860,38 @@ static int ipw_wx_set_essid(struct net_device *dev,
                            union iwreq_data *wrqu, char *extra)
 {
        struct ipw_priv *priv = ieee80211_priv(dev);
-       char *essid = "";       /* ANY */
-       int length = 0;
-       mutex_lock(&priv->mutex);
-       if (wrqu->essid.flags && wrqu->essid.length) {
-               length = wrqu->essid.length - 1;
-               essid = extra;
-       }
-       if (length == 0) {
-               IPW_DEBUG_WX("Setting ESSID to ANY\n");
-               if ((priv->config & CFG_STATIC_ESSID) &&
-                   !(priv->status & (STATUS_ASSOCIATED |
-                                     STATUS_ASSOCIATING))) {
-                       IPW_DEBUG_ASSOC("Attempting to associate with new "
-                                       "parameters.\n");
-                       priv->config &= ~CFG_STATIC_ESSID;
-                       ipw_associate(priv);
-               }
-               mutex_unlock(&priv->mutex);
-               return 0;
-       }
+        int length;
 
-       length = min(length, IW_ESSID_MAX_SIZE);
+        mutex_lock(&priv->mutex);
+
+        if (!wrqu->essid.flags)
+        {
+                IPW_DEBUG_WX("Setting ESSID to ANY\n");
+                ipw_disassociate(priv);
+                priv->config &= ~CFG_STATIC_ESSID;
+                ipw_associate(priv);
+                mutex_unlock(&priv->mutex);
+                return 0;
+        }
+
+       length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
+       if (!extra[length - 1])
+               length--;
 
        priv->config |= CFG_STATIC_ESSID;
 
-       if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
+       if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
+           && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
                IPW_DEBUG_WX("ESSID set to current ESSID.\n");
                mutex_unlock(&priv->mutex);
                return 0;
        }
 
-       IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
+       IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
                     length);
 
        priv->essid_len = length;
-       memcpy(priv->essid, essid, priv->essid_len);
+       memcpy(priv->essid, extra, priv->essid_len);
 
        /* Network configuration changed -- force [re]association */
        IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
@@ -9273,7 +9272,7 @@ static int ipw_wx_set_retry(struct net_device *dev,
        if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
                return 0;
 
-       if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
+       if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
                return -EINVAL;
 
        mutex_lock(&priv->mutex);
@@ -9396,15 +9395,19 @@ static int ipw_wx_set_scan(struct net_device *dev,
                           union iwreq_data *wrqu, char *extra)
 {
        struct ipw_priv *priv = ieee80211_priv(dev);
-       struct iw_scan_req *req = NULL;
-       if (wrqu->data.length
-           && wrqu->data.length == sizeof(struct iw_scan_req)) {
-               req = (struct iw_scan_req *)extra;
+       struct iw_scan_req *req = (struct iw_scan_req *)extra;
+
+       if (wrqu->data.length == sizeof(struct iw_scan_req)) {
                if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
                        ipw_request_direct_scan(priv, req->essid,
                                                req->essid_len);
                        return 0;
                }
+               if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
+                       queue_work(priv->workqueue,
+                                  &priv->request_passive_scan);
+                       return 0;
+               }
        }
 
        IPW_DEBUG_WX("Start scan\n");
@@ -9766,7 +9769,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
        return 0;
 }
 
-#endif                         // CONFIG_IPW2200_MONITOR
+#endif                         /* CONFIG_IPW2200_MONITOR */
 
 static int ipw_wx_reset(struct net_device *dev,
                        struct iw_request_info *info,
@@ -10009,7 +10012,7 @@ static  void init_sys_config(struct ipw_sys_config *sys_config)
        sys_config->dot11g_auto_detection = 0;
        sys_config->enable_cts_to_self = 0;
        sys_config->bt_coexist_collision_thr = 0;
-       sys_config->pass_noise_stats_to_host = 1;       //1 -- fix for 256
+       sys_config->pass_noise_stats_to_host = 1;       /* 1 -- fix for 256 */
        sys_config->silence_threshold = 0x1e;
 }
 
@@ -10113,7 +10116,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
                switch (priv->ieee->sec.level) {
                case SEC_LEVEL_3:
                        tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
-                           IEEE80211_FCTL_PROTECTED;
+                           cpu_to_le16(IEEE80211_FCTL_PROTECTED);
                        /* XXX: ACK flag must be set for CCMP even if it
                         * is a multicast/broadcast packet, because CCMP
                         * group communication encrypted by GTK is
@@ -10128,14 +10131,14 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
                        break;
                case SEC_LEVEL_2:
                        tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
-                           IEEE80211_FCTL_PROTECTED;
+                           cpu_to_le16(IEEE80211_FCTL_PROTECTED);
                        tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
                        tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
                        tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
                        break;
                case SEC_LEVEL_1:
                        tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
-                           IEEE80211_FCTL_PROTECTED;
+                           cpu_to_le16(IEEE80211_FCTL_PROTECTED);
                        tfd->u.data.key_index = priv->ieee->tx_keyidx;
                        if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
                            40)
@@ -10267,17 +10270,17 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
 
        /* Filtering of fragment chains is done agains the first fragment */
        hdr = (void *)txb->fragments[0]->data;
-       if (ieee80211_is_management(hdr->frame_ctl)) {
+       if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
                if (filter & IPW_PROM_NO_MGMT)
                        return;
                if (filter & IPW_PROM_MGMT_HEADER_ONLY)
                        hdr_only = 1;
-       } else if (ieee80211_is_control(hdr->frame_ctl)) {
+       } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
                if (filter & IPW_PROM_NO_CTL)
                        return;
                if (filter & IPW_PROM_CTL_HEADER_ONLY)
                        hdr_only = 1;
-       } else if (ieee80211_is_data(hdr->frame_ctl)) {
+       } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
                if (filter & IPW_PROM_NO_DATA)
                        return;
                if (filter & IPW_PROM_DATA_HEADER_ONLY)
@@ -10292,7 +10295,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
 
                if (hdr_only) {
                        hdr = (void *)src->data;
-                       len = ieee80211_get_hdrlen(hdr->frame_ctl);
+                       len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
                } else
                        len = src->len;
 
@@ -10636,6 +10639,8 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
        INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
        INIT_WORK(&priv->request_scan,
                  (void (*)(void *))ipw_request_scan, priv);
+       INIT_WORK(&priv->request_passive_scan,
+                 (void (*)(void *))ipw_request_passive_scan, priv);
        INIT_WORK(&priv->gather_stats,
                  (void (*)(void *))ipw_bg_gather_stats, priv);
        INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
@@ -11488,9 +11493,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        priv->net_dev = net_dev;
        priv->pci_dev = pdev;
-#ifdef CONFIG_IPW2200_DEBUG
        ipw_debug_level = debug;
-#endif
        spin_lock_init(&priv->irq_lock);
        spin_lock_init(&priv->lock);
        for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
@@ -11755,6 +11758,16 @@ static int ipw_pci_resume(struct pci_dev *pdev)
 }
 #endif
 
+static void ipw_pci_shutdown(struct pci_dev *pdev)
+{
+       struct ipw_priv *priv = pci_get_drvdata(pdev);
+
+       /* Take down the device; powers it off, etc. */
+       ipw_down(priv);
+
+       pci_disable_device(pdev);
+}
+
 /* driver initialization stuff */
 static struct pci_driver ipw_driver = {
        .name = DRV_NAME,
@@ -11765,6 +11778,7 @@ static struct pci_driver ipw_driver = {
        .suspend = ipw_pci_suspend,
        .resume = ipw_pci_resume,
 #endif
+       .shutdown = ipw_pci_shutdown,
 };
 
 static int __init ipw_init(void)
@@ -11774,7 +11788,7 @@ static int __init ipw_init(void)
        printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
        printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
 
-       ret = pci_module_init(&ipw_driver);
+       ret = pci_register_driver(&ipw_driver);
        if (ret) {
                IPW_ERROR("Unable to initialize PCI module\n");
                return ret;
@@ -11808,10 +11822,8 @@ MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
 module_param(led, int, 0444);
 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
 
-#ifdef CONFIG_IPW2200_DEBUG
 module_param(debug, int, 0444);
 MODULE_PARM_DESC(debug, "debug output mask");
-#endif
 
 module_param(channel, int, 0444);
 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
index 8b1cd7c..dad5eed 100644 (file)
@@ -713,7 +713,6 @@ struct ipw_rx_packet {
 
 struct ipw_rx_mem_buffer {
        dma_addr_t dma_addr;
-       struct ipw_rx_buffer *rxb;
        struct sk_buff *skb;
        struct list_head list;
 };                             /* Not transferred over network, so not  __attribute__ ((packed)) */
@@ -1297,6 +1296,7 @@ struct ipw_priv {
        struct work_struct system_config;
        struct work_struct rx_replenish;
        struct work_struct request_scan;
+       struct work_struct request_passive_scan;
        struct work_struct adapter_restart;
        struct work_struct rf_kill;
        struct work_struct up;
@@ -1381,13 +1381,18 @@ BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\
 BIT_ARG16(x)
 
 
-#ifdef CONFIG_IPW2200_DEBUG
 #define IPW_DEBUG(level, fmt, args...) \
+do { if (ipw_debug_level & (level)) \
+  printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
+         in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+
+#ifdef CONFIG_IPW2200_DEBUG
+#define IPW_LL_DEBUG(level, fmt, args...) \
 do { if (ipw_debug_level & (level)) \
   printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
          in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
 #else
-#define IPW_DEBUG(level, fmt, args...) do {} while (0)
+#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
 #endif                         /* CONFIG_IPW2200_DEBUG */
 
 /*
@@ -1457,28 +1462,27 @@ do { if (ipw_debug_level & (level)) \
 
 #define IPW_DEBUG_WX(f, a...)     IPW_DEBUG(IPW_DL_WX, f, ## a)
 #define IPW_DEBUG_SCAN(f, a...)   IPW_DEBUG(IPW_DL_SCAN, f, ## a)
-#define IPW_DEBUG_STATUS(f, a...) IPW_DEBUG(IPW_DL_STATUS, f, ## a)
-#define IPW_DEBUG_TRACE(f, a...)  IPW_DEBUG(IPW_DL_TRACE, f, ## a)
-#define IPW_DEBUG_RX(f, a...)     IPW_DEBUG(IPW_DL_RX, f, ## a)
-#define IPW_DEBUG_TX(f, a...)     IPW_DEBUG(IPW_DL_TX, f, ## a)
-#define IPW_DEBUG_ISR(f, a...)    IPW_DEBUG(IPW_DL_ISR, f, ## a)
+#define IPW_DEBUG_TRACE(f, a...)  IPW_LL_DEBUG(IPW_DL_TRACE, f, ## a)
+#define IPW_DEBUG_RX(f, a...)     IPW_LL_DEBUG(IPW_DL_RX, f, ## a)
+#define IPW_DEBUG_TX(f, a...)     IPW_LL_DEBUG(IPW_DL_TX, f, ## a)
+#define IPW_DEBUG_ISR(f, a...)    IPW_LL_DEBUG(IPW_DL_ISR, f, ## a)
 #define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a)
-#define IPW_DEBUG_LED(f, a...) IPW_DEBUG(IPW_DL_LED, f, ## a)
-#define IPW_DEBUG_WEP(f, a...)    IPW_DEBUG(IPW_DL_WEP, f, ## a)
-#define IPW_DEBUG_HC(f, a...) IPW_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
-#define IPW_DEBUG_FRAG(f, a...) IPW_DEBUG(IPW_DL_FRAG, f, ## a)
-#define IPW_DEBUG_FW(f, a...) IPW_DEBUG(IPW_DL_FW, f, ## a)
+#define IPW_DEBUG_LED(f, a...) IPW_LL_DEBUG(IPW_DL_LED, f, ## a)
+#define IPW_DEBUG_WEP(f, a...)    IPW_LL_DEBUG(IPW_DL_WEP, f, ## a)
+#define IPW_DEBUG_HC(f, a...) IPW_LL_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
+#define IPW_DEBUG_FRAG(f, a...) IPW_LL_DEBUG(IPW_DL_FRAG, f, ## a)
+#define IPW_DEBUG_FW(f, a...) IPW_LL_DEBUG(IPW_DL_FW, f, ## a)
 #define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a)
 #define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a)
-#define IPW_DEBUG_IO(f, a...) IPW_DEBUG(IPW_DL_IO, f, ## a)
-#define IPW_DEBUG_ORD(f, a...) IPW_DEBUG(IPW_DL_ORD, f, ## a)
-#define IPW_DEBUG_FW_INFO(f, a...) IPW_DEBUG(IPW_DL_FW_INFO, f, ## a)
+#define IPW_DEBUG_IO(f, a...) IPW_LL_DEBUG(IPW_DL_IO, f, ## a)
+#define IPW_DEBUG_ORD(f, a...) IPW_LL_DEBUG(IPW_DL_ORD, f, ## a)
+#define IPW_DEBUG_FW_INFO(f, a...) IPW_LL_DEBUG(IPW_DL_FW_INFO, f, ## a)
 #define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a)
 #define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
 #define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
-#define IPW_DEBUG_STATS(f, a...) IPW_DEBUG(IPW_DL_STATS, f, ## a)
-#define IPW_DEBUG_MERGE(f, a...) IPW_DEBUG(IPW_DL_MERGE, f, ## a)
-#define IPW_DEBUG_QOS(f, a...)   IPW_DEBUG(IPW_DL_QOS, f, ## a)
+#define IPW_DEBUG_STATS(f, a...) IPW_LL_DEBUG(IPW_DL_STATS, f, ## a)
+#define IPW_DEBUG_MERGE(f, a...) IPW_LL_DEBUG(IPW_DL_MERGE, f, ## a)
+#define IPW_DEBUG_QOS(f, a...)   IPW_LL_DEBUG(IPW_DL_QOS, f, ## a)
 
 #include <linux/ctype.h>
 
@@ -1947,10 +1951,17 @@ struct host_cmd {
        u32 *param;
 } __attribute__ ((packed));
 
+struct cmdlog_host_cmd {
+       u8 cmd;
+       u8 len;
+       u16 reserved;
+       char param[124];
+} __attribute__ ((packed));
+
 struct ipw_cmd_log {
        unsigned long jiffies;
        int retcode;
-       struct host_cmd cmd;
+       struct cmdlog_host_cmd cmd;
 };
 
 /* SysConfig command parameters ... */
index 317ace7..1174ff5 100644 (file)
@@ -82,6 +82,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
+#include <linux/if_arp.h>
 #include <linux/wireless.h>
 #include <net/iw_handler.h>
 #include <net/ieee80211.h>
index 16db3e1..fb5700d 100644 (file)
@@ -134,11 +134,7 @@ extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *reg
 /* Locking and synchronization functions                            */
 /********************************************************************/
 
-/* These functions *must* be inline or they will break horribly on
- * SPARC, due to its weird semantics for save/restore flags. extern
- * inline should prevent the kernel from linking or module from
- * loading if they are not inlined. */
-extern inline int orinoco_lock(struct orinoco_private *priv,
+static inline int orinoco_lock(struct orinoco_private *priv,
                               unsigned long *flags)
 {
        spin_lock_irqsave(&priv->lock, *flags);
@@ -151,7 +147,7 @@ extern inline int orinoco_lock(struct orinoco_private *priv,
        return 0;
 }
 
-extern inline void orinoco_unlock(struct orinoco_private *priv,
+static inline void orinoco_unlock(struct orinoco_private *priv,
                                  unsigned long *flags)
 {
        spin_unlock_irqrestore(&priv->lock, *flags);
index bf05b90..eaf3d13 100644 (file)
@@ -304,7 +304,7 @@ MODULE_LICENSE("Dual MPL/GPL");
 static int __init orinoco_nortel_init(void)
 {
        printk(KERN_DEBUG "%s\n", version);
-       return pci_module_init(&orinoco_nortel_driver);
+       return pci_register_driver(&orinoco_nortel_driver);
 }
 
 static void __exit orinoco_nortel_exit(void)
index 1759c54..97a8b4f 100644 (file)
@@ -244,7 +244,7 @@ MODULE_LICENSE("Dual MPL/GPL");
 static int __init orinoco_pci_init(void)
 {
        printk(KERN_DEBUG "%s\n", version);
-       return pci_module_init(&orinoco_pci_driver);
+       return pci_register_driver(&orinoco_pci_driver);
 }
 
 static void __exit orinoco_pci_exit(void)
index 7f006f6..31162ac 100644 (file)
@@ -351,7 +351,7 @@ MODULE_LICENSE("Dual MPL/GPL");
 static int __init orinoco_plx_init(void)
 {
        printk(KERN_DEBUG "%s\n", version);
-       return pci_module_init(&orinoco_plx_driver);
+       return pci_register_driver(&orinoco_plx_driver);
 }
 
 static void __exit orinoco_plx_exit(void)
index 0831721..7c7b960 100644 (file)
@@ -228,7 +228,7 @@ MODULE_LICENSE("Dual MPL/GPL");
 static int __init orinoco_tmd_init(void)
 {
        printk(KERN_DEBUG "%s\n", version);
-       return pci_module_init(&orinoco_tmd_driver);
+       return pci_register_driver(&orinoco_tmd_driver);
 }
 
 static void __exit orinoco_tmd_exit(void)
index 989599a..c09fbf7 100644 (file)
 
 #include <net/iw_handler.h>    /* New driver API */
 
+#define KEY_SIZE_WEP104 13     /* 104/128-bit WEP keys */
+#define KEY_SIZE_WEP40  5      /* 40/64-bit WEP keys */
+/* KEY_SIZE_TKIP should match isl_oid.h, struct obj_key.key[] size */
+#define KEY_SIZE_TKIP   32     /* TKIP keys */
 
-static void prism54_wpa_ie_add(islpci_private *priv, u8 *bssid,
+static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
                                u8 *wpa_ie, size_t wpa_ie_len);
-static size_t prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);
+static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);
 static int prism54_set_wpa(struct net_device *, struct iw_request_info *,
                                __u32 *, char *);
 
+/* In 500 kbps */
+static const unsigned char scan_rate_list[] = { 2, 4, 11, 22,
+                                               12, 18, 24, 36,
+                                               48, 72, 96, 108 };
 
 /**
  * prism54_mib_mode_helper - MIB change mode helper function
@@ -468,6 +476,9 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
        range->event_capa[1] = IW_EVENT_CAPA_K_1;
        range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
 
+       range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+               IW_ENC_CAPA_CIPHER_TKIP;
+
        if (islpci_get_state(priv) < PRV_STATE_INIT)
                return 0;
 
@@ -567,6 +578,8 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev,
        struct iw_event iwe;    /* Temporary buffer */
        short cap;
        islpci_private *priv = netdev_priv(ndev);
+       u8 wpa_ie[MAX_WPA_IE_LEN];
+       size_t wpa_ie_len;
 
        /* The first entry must be the MAC address */
        memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
@@ -627,28 +640,40 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev,
        current_ev =
            iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
 
-       if (priv->wpa) {
-               u8 wpa_ie[MAX_WPA_IE_LEN];
-               char *buf, *p;
-               size_t wpa_ie_len;
+       /* Add WPA/RSN Information Element, if any */
+       wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
+       if (wpa_ie_len > 0) {
+               iwe.cmd = IWEVGENIE;
+               iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN);
+               current_ev = iwe_stream_add_point(current_ev, end_buf,
+                               &iwe, wpa_ie);
+       }
+       /* Do the bitrates */
+       {
+               char *  current_val = current_ev + IW_EV_LCP_LEN;
                int i;
-
-               wpa_ie_len = prism54_wpa_ie_get(priv, bss->address, wpa_ie);
-               if (wpa_ie_len > 0 &&
-                   (buf = kmalloc(wpa_ie_len * 2 + 10, GFP_ATOMIC))) {
-                       p = buf;
-                       p += sprintf(p, "wpa_ie=");
-                       for (i = 0; i < wpa_ie_len; i++) {
-                               p += sprintf(p, "%02x", wpa_ie[i]);
+               int mask;
+
+               iwe.cmd = SIOCGIWRATE;
+               /* Those two flags are ignored... */
+               iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+               /* Parse the bitmask */
+               mask = 0x1;
+               for(i = 0; i < sizeof(scan_rate_list); i++) {
+                       if(bss->rates & mask) {
+                               iwe.u.bitrate.value = (scan_rate_list[i] * 500000);
+                               current_val = iwe_stream_add_value(current_ev, current_val,
+                                                                  end_buf, &iwe,
+                                                                  IW_EV_PARAM_LEN);
                        }
-                       memset(&iwe, 0, sizeof (iwe));
-                       iwe.cmd = IWEVCUSTOM;
-                       iwe.u.data.length = strlen(buf);
-                       current_ev = iwe_stream_add_point(current_ev, end_buf,
-                                                         &iwe, buf);
-                       kfree(buf);
+                       mask <<= 1;
                }
+               /* Check if we added any event */
+               if ((current_val - current_ev) > IW_EV_LCP_LEN)
+                       current_ev = current_val;
        }
+
        return current_ev;
 }
 
@@ -1051,12 +1076,24 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
                current_index = r.u;
                /* Verify that the key is not marked as invalid */
                if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
-                       key.length = dwrq->length > sizeof (key.key) ?
-                           sizeof (key.key) : dwrq->length;
-                       memcpy(key.key, extra, key.length);
-                       if (key.length == 32)
-                               /* we want WPA-PSK */
+                       if (dwrq->length > KEY_SIZE_TKIP) {
+                               /* User-provided key data too big */
+                               return -EINVAL;
+                       }
+                       if (dwrq->length > KEY_SIZE_WEP104) {
+                               /* WPA-PSK TKIP */
                                key.type = DOT11_PRIV_TKIP;
+                               key.length = KEY_SIZE_TKIP;
+                       } else if (dwrq->length > KEY_SIZE_WEP40) {
+                               /* WEP 104/128 */
+                               key.length = KEY_SIZE_WEP104;
+                       } else {
+                               /* WEP 40/64 */
+                               key.length = KEY_SIZE_WEP40;
+                       }
+                       memset(key.key, 0, sizeof (key.key));
+                       memcpy(key.key, extra, dwrq->length);
+
                        if ((index < 0) || (index > 3))
                                /* no index provided use the current one */
                                index = current_index;
@@ -1210,6 +1247,489 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
        }
 }
 
+static int prism54_set_genie(struct net_device *ndev,
+                            struct iw_request_info *info,
+                            struct iw_point *data, char *extra)
+{
+       islpci_private *priv = netdev_priv(ndev);
+       int alen, ret = 0;
+       struct obj_attachment *attach;
+
+       if (data->length > MAX_WPA_IE_LEN ||
+           (data->length && extra == NULL))
+               return -EINVAL;
+
+       memcpy(priv->wpa_ie, extra, data->length);
+       priv->wpa_ie_len = data->length;
+
+       alen = sizeof(*attach) + priv->wpa_ie_len;
+       attach = kzalloc(alen, GFP_KERNEL);
+       if (attach == NULL)
+               return -ENOMEM;
+
+#define WLAN_FC_TYPE_MGMT 0
+#define WLAN_FC_STYPE_ASSOC_REQ 0
+#define WLAN_FC_STYPE_REASSOC_REQ 2
+
+       /* Note: endianness is covered by mgt_set_varlen */
+       attach->type = (WLAN_FC_TYPE_MGMT << 2) |
+               (WLAN_FC_STYPE_ASSOC_REQ << 4);
+       attach->id = -1;
+       attach->size = priv->wpa_ie_len;
+       memcpy(attach->data, extra, priv->wpa_ie_len);
+
+       ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
+               priv->wpa_ie_len);
+       if (ret == 0) {
+               attach->type = (WLAN_FC_TYPE_MGMT << 2) |
+                       (WLAN_FC_STYPE_REASSOC_REQ << 4);
+
+               ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
+                       priv->wpa_ie_len);
+               if (ret == 0)
+                       printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
+                               ndev->name);
+       }
+
+       kfree(attach);
+       return ret;
+}
+
+
+static int prism54_get_genie(struct net_device *ndev,
+                            struct iw_request_info *info,
+                            struct iw_point *data, char *extra)
+{
+       islpci_private *priv = netdev_priv(ndev);
+       int len = priv->wpa_ie_len;
+
+       if (len <= 0) {
+               data->length = 0;
+               return 0;
+       }
+
+       if (data->length < len)
+               return -E2BIG;
+
+       data->length = len;
+       memcpy(extra, priv->wpa_ie, len);
+
+       return 0;
+}
+
+static int prism54_set_auth(struct net_device *ndev,
+                              struct iw_request_info *info,
+                              union iwreq_data *wrqu, char *extra)
+{
+       islpci_private *priv = netdev_priv(ndev);
+       struct iw_param *param = &wrqu->param;
+       u32 mlmelevel = 0, authen = 0, dot1x = 0;
+       u32 exunencrypt = 0, privinvoked = 0, wpa = 0;
+       u32 old_wpa;
+       int ret = 0;
+       union oid_res_t r;
+
+       if (islpci_get_state(priv) < PRV_STATE_INIT)
+               return 0;
+
+       /* first get the flags */
+       down_write(&priv->mib_sem);
+       wpa = old_wpa = priv->wpa;
+       up_write(&priv->mib_sem);
+       ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
+       authen = r.u;
+       ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
+       privinvoked = r.u;
+       ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
+       exunencrypt = r.u;
+       ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
+       dot1x = r.u;
+       ret = mgt_get_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, NULL, &r);
+       mlmelevel = r.u;
+
+       if (ret < 0)
+               goto out;
+
+       switch (param->flags & IW_AUTH_INDEX) {
+       case IW_AUTH_CIPHER_PAIRWISE:
+       case IW_AUTH_CIPHER_GROUP:
+       case IW_AUTH_KEY_MGMT:
+               break;
+
+       case IW_AUTH_WPA_ENABLED:
+               /* Do the same thing as IW_AUTH_WPA_VERSION */
+               if (param->value) {
+                       wpa = 1;
+                       privinvoked = 1; /* For privacy invoked */
+                       exunencrypt = 1; /* Filter out all unencrypted frames */
+                       dot1x = 0x01; /* To enable eap filter */
+                       mlmelevel = DOT11_MLME_EXTENDED;
+                       authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
+               } else {
+                       wpa = 0;
+                       privinvoked = 0;
+                       exunencrypt = 0; /* Do not filter un-encrypted data */
+                       dot1x = 0;
+                       mlmelevel = DOT11_MLME_AUTO;
+               }
+               break;
+
+       case IW_AUTH_WPA_VERSION:
+               if (param->value & IW_AUTH_WPA_VERSION_DISABLED) {
+                       wpa = 0;
+                       privinvoked = 0;
+                       exunencrypt = 0; /* Do not filter un-encrypted data */
+                       dot1x = 0;
+                       mlmelevel = DOT11_MLME_AUTO;
+               } else {
+                       if (param->value & IW_AUTH_WPA_VERSION_WPA)
+                               wpa = 1;
+                       else if (param->value & IW_AUTH_WPA_VERSION_WPA2)
+                               wpa = 2;
+                       privinvoked = 1; /* For privacy invoked */
+                       exunencrypt = 1; /* Filter out all unencrypted frames */
+                       dot1x = 0x01; /* To enable eap filter */
+                       mlmelevel = DOT11_MLME_EXTENDED;
+                       authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
+               }
+               break;
+
+       case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+               dot1x = param->value ? 1 : 0;
+               break;
+
+       case IW_AUTH_PRIVACY_INVOKED:
+               privinvoked = param->value ? 1 : 0;
+
+       case IW_AUTH_DROP_UNENCRYPTED:
+               exunencrypt = param->value ? 1 : 0;
+               break;
+
+       case IW_AUTH_80211_AUTH_ALG:
+               if (param->value & IW_AUTH_ALG_SHARED_KEY) {
+                       /* Only WEP uses _SK and _BOTH */
+                       if (wpa > 0) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       authen = DOT11_AUTH_SK;
+               } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
+                       authen = DOT11_AUTH_OS;
+               } else {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* Set all the values */
+       down_write(&priv->mib_sem);
+       priv->wpa = wpa;
+       up_write(&priv->mib_sem);
+       mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
+       mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &privinvoked);
+       mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt);
+       mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x);
+       mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlmelevel);
+
+out:
+       return ret;
+}
+
+static int prism54_get_auth(struct net_device *ndev,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra)
+{
+       islpci_private *priv = netdev_priv(ndev);
+       struct iw_param *param = &wrqu->param;
+       u32 wpa = 0;
+       int ret = 0;
+       union oid_res_t r;
+
+       if (islpci_get_state(priv) < PRV_STATE_INIT)
+               return 0;
+
+       /* first get the flags */
+       down_write(&priv->mib_sem);
+       wpa = priv->wpa;
+       up_write(&priv->mib_sem);
+
+       switch (param->flags & IW_AUTH_INDEX) {
+       case IW_AUTH_CIPHER_PAIRWISE:
+       case IW_AUTH_CIPHER_GROUP:
+       case IW_AUTH_KEY_MGMT:
+               /*
+                * wpa_supplicant will control these internally
+                */
+               ret = -EOPNOTSUPP;
+               break;
+
+       case IW_AUTH_WPA_VERSION:
+               switch (wpa) {
+               case 1:
+                       param->value = IW_AUTH_WPA_VERSION_WPA;
+                       break;
+               case 2:
+                       param->value = IW_AUTH_WPA_VERSION_WPA2;
+                       break;
+               case 0:
+               default:
+                       param->value = IW_AUTH_WPA_VERSION_DISABLED;
+                       break;
+               }
+               break;
+
+       case IW_AUTH_DROP_UNENCRYPTED:
+               ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
+               if (ret >= 0)
+                       param->value = r.u > 0 ? 1 : 0;
+               break;
+
+       case IW_AUTH_80211_AUTH_ALG:
+               ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
+               if (ret >= 0) {
+                       switch (r.u) {
+                       case DOT11_AUTH_OS:
+                               param->value = IW_AUTH_ALG_OPEN_SYSTEM;
+                               break;
+                       case DOT11_AUTH_BOTH:
+                       case DOT11_AUTH_SK:
+                               param->value = IW_AUTH_ALG_SHARED_KEY;
+                       case DOT11_AUTH_NONE:
+                       default:
+                               param->value = 0;
+                               break;
+                       }
+               }
+               break;
+
+       case IW_AUTH_WPA_ENABLED:
+               param->value = wpa > 0 ? 1 : 0;
+               break;
+
+       case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+               ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
+               if (ret >= 0)
+                       param->value = r.u > 0 ? 1 : 0;
+               break;
+
+       case IW_AUTH_PRIVACY_INVOKED:
+               ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
+               if (ret >= 0)
+                       param->value = r.u > 0 ? 1 : 0;
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+       return ret;
+}
+
+static int prism54_set_encodeext(struct net_device *ndev,
+                                struct iw_request_info *info,
+                                union iwreq_data *wrqu,
+                                char *extra)
+{
+       islpci_private *priv = netdev_priv(ndev);
+       struct iw_point *encoding = &wrqu->encoding;
+       struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+       int idx, alg = ext->alg, set_key = 1;
+       union oid_res_t r;
+       int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
+       int ret = 0;
+
+       if (islpci_get_state(priv) < PRV_STATE_INIT)
+               return 0;
+
+       /* Determine and validate the key index */
+       idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
+       if (idx) {
+               if (idx < 0 || idx > 3)
+                       return -EINVAL;
+       } else {
+               ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
+               if (ret < 0)
+                       goto out;
+               idx = r.u;
+       }
+
+       if (encoding->flags & IW_ENCODE_DISABLED)
+               alg = IW_ENCODE_ALG_NONE;
+
+       if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+               /* Only set transmit key index here, actual
+                * key is set below if needed.
+                */
+               ret = mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &idx);
+               set_key = ext->key_len > 0 ? 1 : 0;
+       }
+
+       if (set_key) {
+               struct obj_key key = { DOT11_PRIV_WEP, 0, "" };
+               switch (alg) {
+               case IW_ENCODE_ALG_NONE:
+                       break;
+               case IW_ENCODE_ALG_WEP:
+                       if (ext->key_len > KEY_SIZE_WEP104) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       if (ext->key_len > KEY_SIZE_WEP40)
+                               key.length = KEY_SIZE_WEP104;
+                       else
+                               key.length = KEY_SIZE_WEP40;
+                       break;
+               case IW_ENCODE_ALG_TKIP:
+                       if (ext->key_len > KEY_SIZE_TKIP) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       key.type = DOT11_PRIV_TKIP;
+                       key.length = KEY_SIZE_TKIP;
+               default:
+                       return -EINVAL;
+               }
+
+               if (key.length) {
+                       memset(key.key, 0, sizeof(key.key));
+                       memcpy(key.key, ext->key, ext->key_len);
+                       ret = mgt_set_request(priv, DOT11_OID_DEFKEYX, idx,
+                                           &key);
+                       if (ret < 0)
+                               goto out;
+               }
+       }
+
+       /* Read the flags */
+       if (encoding->flags & IW_ENCODE_DISABLED) {
+               /* Encoding disabled,
+                * authen = DOT11_AUTH_OS;
+                * invoke = 0;
+                * exunencrypt = 0; */
+       }
+       if (encoding->flags & IW_ENCODE_OPEN) {
+               /* Encode but accept non-encoded packets. No auth */
+               invoke = 1;
+       }
+       if (encoding->flags & IW_ENCODE_RESTRICTED) {
+               /* Refuse non-encoded packets. Auth */
+               authen = DOT11_AUTH_BOTH;
+               invoke = 1;
+               exunencrypt = 1;
+       }
+
+       /* do the change if requested  */
+       if (encoding->flags & IW_ENCODE_MODE) {
+               ret = mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0,
+                                     &authen);
+               ret = mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0,
+                                     &invoke);
+               ret = mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
+                                     &exunencrypt);
+       }
+
+out:
+       return ret;
+}
+
+
+static int prism54_get_encodeext(struct net_device *ndev,
+                                struct iw_request_info *info,
+                                union iwreq_data *wrqu,
+                                char *extra)
+{
+       islpci_private *priv = netdev_priv(ndev);
+       struct iw_point *encoding = &wrqu->encoding;
+       struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+       int idx, max_key_len;
+       union oid_res_t r;
+       int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0, wpa = 0;
+       int ret = 0;
+
+       if (islpci_get_state(priv) < PRV_STATE_INIT)
+               return 0;
+
+       /* first get the flags */
+       ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
+       authen = r.u;
+       ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
+       invoke = r.u;
+       ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
+       exunencrypt = r.u;
+       if (ret < 0)
+               goto out;
+
+       max_key_len = encoding->length - sizeof(*ext);
+       if (max_key_len < 0)
+               return -EINVAL;
+
+       idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
+       if (idx) {
+               if (idx < 0 || idx > 3)
+                       return -EINVAL;
+       } else {
+               ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
+               if (ret < 0)
+                       goto out;
+               idx = r.u;
+       }
+
+       encoding->flags = idx + 1;
+       memset(ext, 0, sizeof(*ext));
+
+       switch (authen) {
+       case DOT11_AUTH_BOTH:
+       case DOT11_AUTH_SK:
+               wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
+       case DOT11_AUTH_OS:
+       default:
+               wrqu->encoding.flags |= IW_ENCODE_OPEN;
+               break;
+       }
+
+       down_write(&priv->mib_sem);
+       wpa = priv->wpa;
+       up_write(&priv->mib_sem);
+
+       if (authen == DOT11_AUTH_OS && !exunencrypt && !invoke && !wpa) {
+               /* No encryption */
+               ext->alg = IW_ENCODE_ALG_NONE;
+               ext->key_len = 0;
+               wrqu->encoding.flags |= IW_ENCODE_DISABLED;
+       } else {
+               struct obj_key *key;
+
+               ret = mgt_get_request(priv, DOT11_OID_DEFKEYX, idx, NULL, &r);
+               if (ret < 0)
+                       goto out;
+               key = r.ptr;
+               if (max_key_len < key->length) {
+                       ret = -E2BIG;
+                       goto out;
+               }
+               memcpy(ext->key, key->key, key->length);
+               ext->key_len = key->length;
+
+               switch (key->type) {
+               case DOT11_PRIV_TKIP:
+                       ext->alg = IW_ENCODE_ALG_TKIP;
+                       break;
+               default:
+               case DOT11_PRIV_WEP:
+                       ext->alg = IW_ENCODE_ALG_WEP;
+                       break;
+               }
+               wrqu->encoding.flags |= IW_ENCODE_ENABLED;
+       }
+
+out:
+       return ret;
+}
+
+
 static int
 prism54_reset(struct net_device *ndev, struct iw_request_info *info,
              __u32 * uwrq, char *extra)
@@ -1591,8 +2111,8 @@ static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
 #define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
 
 static void
-prism54_wpa_ie_add(islpci_private *priv, u8 *bssid,
-                  u8 *wpa_ie, size_t wpa_ie_len)
+prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
+                      u8 *wpa_ie, size_t wpa_ie_len)
 {
        struct list_head *ptr;
        struct islpci_bss_wpa_ie *bss = NULL;
@@ -1658,7 +2178,7 @@ prism54_wpa_ie_add(islpci_private *priv, u8 *bssid,
 }
 
 static size_t
-prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
+prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
 {
        struct list_head *ptr;
        struct islpci_bss_wpa_ie *bss = NULL;
@@ -1683,14 +2203,14 @@ prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
 }
 
 void
-prism54_wpa_ie_init(islpci_private *priv)
+prism54_wpa_bss_ie_init(islpci_private *priv)
 {
        INIT_LIST_HEAD(&priv->bss_wpa_list);
        sema_init(&priv->wpa_sem, 1);
 }
 
 void
-prism54_wpa_ie_clean(islpci_private *priv)
+prism54_wpa_bss_ie_clean(islpci_private *priv)
 {
        struct list_head *ptr, *n;
 
@@ -1722,7 +2242,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr,
                }
                if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 &&
                    memcmp(pos + 2, wpa_oid, 4) == 0) {
-                       prism54_wpa_ie_add(priv, addr, pos, pos[1] + 2);
+                       prism54_wpa_bss_ie_add(priv, addr, pos, pos[1] + 2);
                        return;
                }
                pos += 2 + pos[1];
@@ -1879,7 +2399,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
                send_formatted_event(priv, "Associate request (ex)", mlme, 1);
 
                if (priv->iw_mode != IW_MODE_MASTER 
-                               && mlmeex->state != DOT11_STATE_AUTHING)
+                               && mlmeex->state != DOT11_STATE_ASSOCING)
                        break;
                
                confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
@@ -1893,7 +2413,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
                confirm->state = 0; /* not used */
                confirm->code = 0;
 
-               wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie);
+               wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
 
                if (!wpa_ie_len) {
                        printk(KERN_DEBUG "No WPA IE found from "
@@ -1937,7 +2457,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
                confirm->state = 0; /* not used */
                confirm->code = 0;
 
-               wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie);
+               wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
 
                if (!wpa_ie_len) {
                        printk(KERN_DEBUG "No WPA IE found from "
@@ -2553,6 +3073,15 @@ static const iw_handler prism54_handler[] = {
        (iw_handler) prism54_get_encode,        /* SIOCGIWENCODE */
        (iw_handler) NULL,      /* SIOCSIWPOWER */
        (iw_handler) NULL,      /* SIOCGIWPOWER */
+       NULL,                   /* -- hole -- */
+       NULL,                   /* -- hole -- */
+       (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */
+       (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */
+       (iw_handler) prism54_set_auth,  /* SIOCSIWAUTH */
+       (iw_handler) prism54_get_auth,  /* SIOCGIWAUTH */
+       (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */
+       (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */
+       NULL,                   /* SIOCSIWPMKSA */
 };
 
 /* The low order bit identify a SET (0) or a GET (1) ioctl.  */
index 46d5cde..65f33ac 100644 (file)
@@ -27,7 +27,7 @@
 
 #include <net/iw_handler.h>    /* New driver API */
 
-#define SUPPORTED_WIRELESS_EXT                  16
+#define SUPPORTED_WIRELESS_EXT                  19
 
 void prism54_mib_init(islpci_private *);
 
@@ -39,8 +39,8 @@ void prism54_acl_clean(struct islpci_acl *);
 
 void prism54_process_trap(void *);
 
-void prism54_wpa_ie_init(islpci_private *priv);
-void prism54_wpa_ie_clean(islpci_private *priv);
+void prism54_wpa_bss_ie_init(islpci_private *priv);
+void prism54_wpa_bss_ie_clean(islpci_private *priv);
 
 int prism54_set_mac_address(struct net_device *, void *);
 
index 5ddf295..ab3c5a2 100644 (file)
@@ -715,7 +715,7 @@ islpci_alloc_memory(islpci_private *priv)
        }
 
        prism54_acl_init(&priv->acl);
-       prism54_wpa_ie_init(priv);
+       prism54_wpa_bss_ie_init(priv);
        if (mgt_init(priv)) 
                goto out_free;
 
@@ -774,7 +774,7 @@ islpci_free_memory(islpci_private *priv)
 
        /* Free the acces control list and the WPA list */
        prism54_acl_clean(&priv->acl);
-       prism54_wpa_ie_clean(priv);
+       prism54_wpa_bss_ie_clean(priv);
        mgt_clean(priv);
 
        return 0;
index 0705316..5049f37 100644 (file)
@@ -179,6 +179,8 @@ typedef struct {
        struct list_head bss_wpa_list;
        int num_bss_wpa;
        struct semaphore wpa_sem;
+       u8 wpa_ie[MAX_WPA_IE_LEN];
+       size_t wpa_ie_len;
 
        struct work_struct reset_task;
        int reset_task_pending;
index 09fc17a..f692dcc 100644 (file)
@@ -313,7 +313,7 @@ prism54_module_init(void)
 
        __bug_on_wrong_struct_sizes ();
 
-       return pci_module_init(&prism54_driver);
+       return pci_register_driver(&prism54_driver);
 }
 
 /* by the time prism54_module_exit() terminates, as a postcondition
index 61b83a5..8e112d1 100644 (file)
@@ -52,8 +52,8 @@
 #include <pcmcia/ds.h>
 #include <pcmcia/mem_op.h>
 
-#include <net/ieee80211.h>
 #include <linux/wireless.h>
+#include <net/iw_handler.h>
 
 #include <asm/io.h>
 #include <asm/system.h>
index 500314f..6603ad5 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_ZD1211RW) += zd1211rw.o
 zd1211rw-objs := zd_chip.o zd_ieee80211.o \
                zd_mac.o zd_netdev.o \
                zd_rf_al2230.o zd_rf_rf2959.o \
+               zd_rf_al7230b.o \
                zd_rf.o zd_usb.o zd_util.o
 
 ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
index aa79282..7c4e32c 100644 (file)
@@ -42,12 +42,11 @@ void zd_chip_init(struct zd_chip *chip,
 
 void zd_chip_clear(struct zd_chip *chip)
 {
-       mutex_lock(&chip->mutex);
+       ZD_ASSERT(!mutex_is_locked(&chip->mutex));
        zd_usb_clear(&chip->usb);
        zd_rf_clear(&chip->rf);
-       mutex_unlock(&chip->mutex);
        mutex_destroy(&chip->mutex);
-       memset(chip, 0, sizeof(*chip));
+       ZD_MEMCLEAR(chip, sizeof(*chip));
 }
 
 static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size)
@@ -68,10 +67,11 @@ static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size)
        i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i);
        i += scnprintf(buffer+i, size-i, " ");
        i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i);
-       i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c", chip->pa_type,
+       i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c", chip->pa_type,
                chip->patch_cck_gain ? 'g' : '-',
                chip->patch_cr157 ? '7' : '-',
-               chip->patch_6m_band_edge ? '6' : '-');
+               chip->patch_6m_band_edge ? '6' : '-',
+               chip->new_phy_layout ? 'N' : '-');
        return i;
 }
 
@@ -330,13 +330,14 @@ static int read_pod(struct zd_chip *chip, u8 *rf_type)
        chip->patch_cck_gain = (value >> 8) & 0x1;
        chip->patch_cr157 = (value >> 13) & 0x1;
        chip->patch_6m_band_edge = (value >> 21) & 0x1;
+       chip->new_phy_layout = (value >> 31) & 0x1;
 
        dev_dbg_f(zd_chip_dev(chip),
                "RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d "
-               "patch 6M %d\n",
+               "patch 6M %d new PHY %d\n",
                zd_rf_name(*rf_type), *rf_type,
                chip->pa_type, chip->patch_cck_gain,
-               chip->patch_cr157, chip->patch_6m_band_edge);
+               chip->patch_cr157, chip->patch_6m_band_edge, chip->new_phy_layout);
        return 0;
 error:
        *rf_type = 0;
@@ -344,6 +345,7 @@ error:
        chip->patch_cck_gain = 0;
        chip->patch_cr157 = 0;
        chip->patch_6m_band_edge = 0;
+       chip->new_phy_layout = 0;
        return r;
 }
 
@@ -717,7 +719,7 @@ static int zd1211b_hw_reset_phy(struct zd_chip *chip)
                { CR21,  0x0e }, { CR22,  0x23 }, { CR23,  0x90 },
                { CR24,  0x14 }, { CR25,  0x40 }, { CR26,  0x10 },
                { CR27,  0x10 }, { CR28,  0x7f }, { CR29,  0x80 },
-               { CR30,  0x49 }, /* jointly decoder, no ASIC */
+               { CR30,  0x4b }, /* ASIC/FWT, no jointly decoder */
                { CR31,  0x60 }, { CR32,  0x43 }, { CR33,  0x08 },
                { CR34,  0x06 }, { CR35,  0x0a }, { CR36,  0x00 },
                { CR37,  0x00 }, { CR38,  0x38 }, { CR39,  0x0c },
@@ -807,7 +809,6 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
                { CR_ACK_TIMEOUT_EXT,           0x80 },
                { CR_ADDA_PWR_DWN,              0x00 },
                { CR_ACK_TIME_80211,            0x100 },
-               { CR_IFS_VALUE,                 0x547c032 },
                { CR_RX_PE_DELAY,               0x70 },
                { CR_PS_CTRL,                   0x10000000 },
                { CR_RTS_CTS_RATE,              0x02030203 },
@@ -854,11 +855,10 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
                { CR_ACK_TIMEOUT_EXT,           0x80 },
                { CR_ADDA_PWR_DWN,              0x00 },
                { CR_ACK_TIME_80211,            0x100 },
-               { CR_IFS_VALUE,                 0x547c032 },
                { CR_RX_PE_DELAY,               0x70 },
                { CR_PS_CTRL,                   0x10000000 },
                { CR_RTS_CTS_RATE,              0x02030203 },
-               { CR_RX_THRESHOLD,              0x000c0640 },
+               { CR_RX_THRESHOLD,              0x000c0eff, },
                { CR_AFTER_PNP,                 0x1 },
                { CR_WEP_PROTECT,               0x114 },
        };
@@ -970,10 +970,15 @@ static int hw_init(struct zd_chip *chip)
        r = hw_init_hmac(chip);
        if (r)
                return r;
-       r = set_beacon_interval(chip, 100);
+
+       /* Although the vendor driver defaults to a different value during
+        * init, it overwrites the IFS value with the following every time
+        * the channel changes. We should aim to be more intelligent... */
+       r = zd_iowrite32_locked(chip, IFS_VALUE_DEFAULT, CR_IFS_VALUE);
        if (r)
                return r;
-       return 0;
+
+       return set_beacon_interval(chip, 100);
 }
 
 #ifdef DEBUG
@@ -1644,3 +1649,34 @@ int zd_rfwritev_locked(struct zd_chip *chip,
 
        return 0;
 }
+
+/*
+ * We can optionally program the RF directly through CR regs, if supported by
+ * the hardware. This is much faster than the older method.
+ */
+int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value)
+{
+       struct zd_ioreq16 ioreqs[] = {
+               { CR244, (value >> 16) & 0xff },
+               { CR243, (value >>  8) & 0xff },
+               { CR242,  value        & 0xff },
+       };
+       ZD_ASSERT(mutex_is_locked(&chip->mutex));
+       return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+int zd_rfwritev_cr_locked(struct zd_chip *chip,
+                         const u32 *values, unsigned int count)
+{
+       int r;
+       unsigned int i;
+
+       for (i = 0; i < count; i++) {
+               r = zd_rfwrite_cr_locked(chip, values[i]);
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
index 069d2b4..4b12508 100644 (file)
 
 #define CR_ACK_TIMEOUT_EXT             CTL_REG(0x0690)
 #define CR_BCN_FIFO_SEMAPHORE          CTL_REG(0x0694)
+
 #define CR_IFS_VALUE                   CTL_REG(0x0698)
+#define IFS_VALUE_DIFS_SH              0
+#define IFS_VALUE_EIFS_SH              12
+#define IFS_VALUE_SIFS_SH              24
+#define IFS_VALUE_DEFAULT              ((  50 << IFS_VALUE_DIFS_SH) | \
+                                        (1148 << IFS_VALUE_EIFS_SH) | \
+                                        (  10 << IFS_VALUE_SIFS_SH))
+
 #define CR_RX_TIME_OUT                 CTL_REG(0x069C)
 #define CR_TOTAL_RX_FRM                        CTL_REG(0x06A0)
 #define CR_CRC32_CNT                   CTL_REG(0x06A4)
@@ -630,6 +638,7 @@ enum {
        LOAD_CODE_SIZE                  = 0xe, /* words */
        LOAD_VECT_SIZE                  = 0x10000 - 0xfff7, /* words */
        EEPROM_REGS_OFFSET              = LOAD_CODE_SIZE + LOAD_VECT_SIZE,
+       EEPROM_REGS_SIZE                = 0x7e, /* words */
        E2P_BASE_OFFSET                 = EEPROM_START_OFFSET +
                                          EEPROM_REGS_OFFSET,
 };
@@ -655,7 +664,7 @@ struct zd_chip {
        /* SetPointOFDM in the vendor driver */
        u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT];
        u8 pa_type:4, patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1,
-          is_zd1211b:1;
+          new_phy_layout:1, is_zd1211b:1;
 };
 
 static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb)
@@ -739,8 +748,12 @@ static inline int zd_rfwrite_locked(struct zd_chip *chip, u32 value, u8 bits)
        return zd_usb_rfwrite(&chip->usb, value, bits);
 }
 
+int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value);
+
 int zd_rfwritev_locked(struct zd_chip *chip,
                       const u32* values, unsigned int count, u8 bits);
+int zd_rfwritev_cr_locked(struct zd_chip *chip,
+                         const u32* values, unsigned int count);
 
 /* Locking functions for reading and writing registers.
  * The different parameters are intentional.
index 4659068..a13ec72 100644 (file)
@@ -45,4 +45,10 @@ do { \
 #  define ZD_ASSERT(x) do { } while (0)
 #endif
 
+#ifdef DEBUG
+#  define ZD_MEMCLEAR(pointer, size) memset((pointer), 0xff, (size))
+#else
+#  define ZD_MEMCLEAR(pointer, size) do { } while (0)
+#endif
+
 #endif /* _ZD_DEF_H */
index 3632989..f63245b 100644 (file)
@@ -64,7 +64,7 @@ struct cck_plcp_header {
        u8 service;
        __le16 length;
        __le16 crc16;
-} __attribute__((packed));
+};
 
 static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header)
 {
index a9bd80a..1989f1c 100644 (file)
@@ -127,11 +127,9 @@ out:
 
 void zd_mac_clear(struct zd_mac *mac)
 {
-       /* Aquire the lock. */
-       spin_lock(&mac->lock);
-       spin_unlock(&mac->lock);
        zd_chip_clear(&mac->chip);
-       memset(mac, 0, sizeof(*mac));
+       ZD_ASSERT(!spin_is_locked(&mac->lock));
+       ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
 }
 
 static int reset_mode(struct zd_mac *mac)
@@ -716,7 +714,7 @@ struct zd_rt_hdr {
        u8  rt_rate;
        u16 rt_channel;
        u16 rt_chbitmask;
-} __attribute__((packed));
+};
 
 static void fill_rt_header(void *buffer, struct zd_mac *mac,
                           const struct ieee80211_rx_stats *stats,
index b3ba49b..29b51fd 100644 (file)
@@ -82,7 +82,7 @@ struct zd_ctrlset {
 struct rx_length_info {
        __le16 length[3];
        __le16 tag;
-} __attribute__((packed));
+};
 
 #define RX_LENGTH_INFO_TAG             0x697e
 
@@ -93,7 +93,7 @@ struct rx_status {
        u8 signal_quality_ofdm;
        u8 decryption_type;
        u8 frame_status;
-} __attribute__((packed));
+};
 
 /* rx_status field decryption_type */
 #define ZD_RX_NO_WEP   0
@@ -123,9 +123,9 @@ enum mac_flags {
 #define ZD_MAC_STATS_BUFFER_SIZE 16
 
 struct zd_mac {
-       struct net_device *netdev;
        struct zd_chip chip;
        spinlock_t lock;
+       struct net_device *netdev;
        /* Unlocked reading possible */
        struct iw_statistics iw_stats;
        unsigned int stats_count;
index 9df232c..440ef24 100644 (file)
@@ -72,10 +72,18 @@ static int iw_get_name(struct net_device *netdev,
                       struct iw_request_info *info,
                       union iwreq_data *req, char *extra)
 {
-       /* FIXME: check whether 802.11a will also supported, add also
-        *        zd1211B, if we support it.
-        */
-       strlcpy(req->name, "802.11g zd1211", IFNAMSIZ);
+       /* FIXME: check whether 802.11a will also supported */
+       strlcpy(req->name, "IEEE 802.11b/g", IFNAMSIZ);
+       return 0;
+}
+
+static int iw_get_nick(struct net_device *netdev,
+                      struct iw_request_info *info,
+                      union iwreq_data *req, char *extra)
+{
+       strcpy(extra, "zd1211");
+       req->data.length = strlen(extra) + 1;
+       req->data.flags = 1;
        return 0;
 }
 
@@ -181,6 +189,7 @@ static int iw_get_encodeext(struct net_device *netdev,
 
 static const iw_handler zd_standard_iw_handlers[] = {
        WX(SIOCGIWNAME)         = iw_get_name,
+       WX(SIOCGIWNICKN)        = iw_get_nick,
        WX(SIOCSIWFREQ)         = iw_set_freq,
        WX(SIOCGIWFREQ)         = iw_get_freq,
        WX(SIOCSIWMODE)         = iw_set_mode,
index d3770d2..f50cff3 100644 (file)
@@ -56,7 +56,7 @@ void zd_rf_init(struct zd_rf *rf)
 
 void zd_rf_clear(struct zd_rf *rf)
 {
-       memset(rf, 0, sizeof(*rf));
+       ZD_MEMCLEAR(rf, sizeof(*rf));
 }
 
 int zd_rf_init_hw(struct zd_rf *rf, u8 type)
@@ -76,6 +76,11 @@ int zd_rf_init_hw(struct zd_rf *rf, u8 type)
                if (r)
                        return r;
                break;
+       case AL7230B_RF:
+               r = zd_rf_init_al7230b(rf);
+               if (r)
+                       return r;
+               break;
        default:
                dev_err(zd_chip_dev(chip),
                        "RF %s %#x is not supported\n", zd_rf_name(type), type);
index ea30f69..676b373 100644 (file)
@@ -78,5 +78,6 @@ int zd_switch_radio_off(struct zd_rf *rf);
 
 int zd_rf_init_rf2959(struct zd_rf *rf);
 int zd_rf_init_al2230(struct zd_rf *rf);
+int zd_rf_init_al7230b(struct zd_rf *rf);
 
 #endif /* _ZD_RF_H */
index 0948b25..25323a1 100644 (file)
@@ -21,7 +21,7 @@
 #include "zd_usb.h"
 #include "zd_chip.h"
 
-static const u32 al2230_table[][3] = {
+static const u32 zd1211_al2230_table[][3] = {
        RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, },
        RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, },
        RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, },
@@ -38,6 +38,53 @@ static const u32 al2230_table[][3] = {
        RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, },
 };
 
+static const u32 zd1211b_al2230_table[][3] = {
+       RF_CHANNEL( 1) = { 0x09efc0, 0x8cccc0, 0xb00000, },
+       RF_CHANNEL( 2) = { 0x09efc0, 0x8cccd0, 0xb00000, },
+       RF_CHANNEL( 3) = { 0x09e7c0, 0x8cccc0, 0xb00000, },
+       RF_CHANNEL( 4) = { 0x09e7c0, 0x8cccd0, 0xb00000, },
+       RF_CHANNEL( 5) = { 0x05efc0, 0x8cccc0, 0xb00000, },
+       RF_CHANNEL( 6) = { 0x05efc0, 0x8cccd0, 0xb00000, },
+       RF_CHANNEL( 7) = { 0x05e7c0, 0x8cccc0, 0xb00000, },
+       RF_CHANNEL( 8) = { 0x05e7c0, 0x8cccd0, 0xb00000, },
+       RF_CHANNEL( 9) = { 0x0defc0, 0x8cccc0, 0xb00000, },
+       RF_CHANNEL(10) = { 0x0defc0, 0x8cccd0, 0xb00000, },
+       RF_CHANNEL(11) = { 0x0de7c0, 0x8cccc0, 0xb00000, },
+       RF_CHANNEL(12) = { 0x0de7c0, 0x8cccd0, 0xb00000, },
+       RF_CHANNEL(13) = { 0x03efc0, 0x8cccc0, 0xb00000, },
+       RF_CHANNEL(14) = { 0x03e7c0, 0x866660, 0xb00000, },
+};
+
+static const struct zd_ioreq16 zd1211b_ioreqs_shared_1[] = {
+       { CR240, 0x57 }, { CR9,   0xe0 },
+};
+
+static int zd1211b_al2230_finalize_rf(struct zd_chip *chip)
+{
+       int r;
+       static const struct zd_ioreq16 ioreqs[] = {
+               { CR80,  0x30 }, { CR81,  0x30 }, { CR79,  0x58 },
+               { CR12,  0xf0 }, { CR77,  0x1b }, { CR78,  0x58 },
+               { CR203, 0x06 },
+               { },
+
+               { CR240, 0x80 },
+       };
+
+       r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+       if (r)
+               return r;
+
+       /* related to antenna selection? */
+       if (chip->new_phy_layout) {
+               r = zd_iowrite16_locked(chip, 0xe1, CR9);
+               if (r)
+                       return r;
+       }
+
+       return zd_iowrite16_locked(chip, 0x06, CR203);
+}
+
 static int zd1211_al2230_init_hw(struct zd_rf *rf)
 {
        int r;
@@ -139,7 +186,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
                { CR47,  0x1e },
 
                /* ZD1211B 05.06.10 */
-               { CR48,  0x00 }, { CR49,  0x00 }, { CR51,  0x01 },
+               { CR48,  0x06 }, { CR49,  0xf9 }, { CR51,  0x01 },
                { CR52,  0x80 }, { CR53,  0x7e }, { CR65,  0x00 },
                { CR66,  0x00 }, { CR67,  0x00 }, { CR68,  0x00 },
                { CR69,  0x28 },
@@ -172,79 +219,78 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
                { CR137, 0x50 }, /* 5614 */
                { CR138, 0xa8 },
                { CR144, 0xac }, /* 5621 */
-               { CR150, 0x0d }, { CR252, 0x00 }, { CR253, 0x00 },
+               { CR150, 0x0d }, { CR252, 0x34 }, { CR253, 0x34 },
        };
 
        static const u32 rv1[] = {
-               /* channel 1 */
-               0x03f790,
-               0x033331,
-               0x00000d,
-
-               0x0b3331,
-               0x03b812,
-               0x00fff3,
-               0x0005a4,
-               0x0f4dc5, /* fix freq shift 0x044dc5 */
-               0x0805b6,
-               0x0146c7,
-               0x000688,
-               0x0403b9, /* External control TX power (CR31) */
-               0x00dbba,
-               0x00099b,
-               0x0bdffc,
-               0x00000d,
-               0x00580f,
+               0x8cccd0,
+               0x481dc0,
+               0xcfff00,
+               0x25a000,
+
+               /* To improve AL2230 yield, improve phase noise, 4713 */
+               0x25a000,
+               0xa3b2f0,
+
+               0x6da010, /* Reg6 update for MP versio */
+               0xe36280, /* Modified by jxiao for Bor-Chin on 2004/08/02 */
+               0x116000,
+               0x9dc020, /* External control TX power (CR31) */
+               0x5ddb00, /* RegA update for MP version */
+               0xd99000, /* RegB update for MP version */
+               0x3ffbd0, /* RegC update for MP version */
+               0xb00000, /* RegD update for MP version */
+
+               /* improve phase noise and remove phase calibration,4713 */
+               0xf01a00,
        };
 
        static const struct zd_ioreq16 ioreqs2[] = {
-               { CR47,  0x1e }, { CR_RFCFG, 0x03 },
+               { CR251, 0x2f }, /* shdnb(PLL_ON)=0 */
+               { CR251, 0x7f }, /* shdnb(PLL_ON)=1 */
        };
 
        static const u32 rv2[] = {
-               0x00880f,
-               0x00080f,
+               /* To improve AL2230 yield, 4713 */
+               0xf01b00,
+               0xf01e00,
+               0xf01a00,
        };
 
        static const struct zd_ioreq16 ioreqs3[] = {
-               { CR_RFCFG, 0x00 }, { CR47, 0x1e }, { CR251, 0x7f },
-       };
-
-       static const u32 rv3[] = {
-               0x00d80f,
-               0x00780f,
-               0x00580f,
-       };
-
-       static const struct zd_ioreq16 ioreqs4[] = {
-               { CR138, 0x28 }, { CR203, 0x06 },
+               /* related to 6M band edge patching, happens unconditionally */
+               { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
        };
 
+       r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1,
+               ARRAY_SIZE(zd1211b_ioreqs_shared_1));
+       if (r)
+               return r;
        r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1));
        if (r)
                return r;
-       r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS);
+       r = zd_rfwritev_cr_locked(chip, zd1211b_al2230_table[0], 3);
        if (r)
                return r;
-       r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2));
+       r = zd_rfwritev_cr_locked(chip, rv1, ARRAY_SIZE(rv1));
        if (r)
                return r;
-       r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS);
+       r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2));
        if (r)
                return r;
-       r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3));
+       r = zd_rfwritev_cr_locked(chip, rv2, ARRAY_SIZE(rv2));
        if (r)
                return r;
-       r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS);
+       r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3));
        if (r)
                return r;
-       return zd_iowrite16a_locked(chip, ioreqs4, ARRAY_SIZE(ioreqs4));
+       return zd1211b_al2230_finalize_rf(chip);
 }
 
-static int al2230_set_channel(struct zd_rf *rf, u8 channel)
+static int zd1211_al2230_set_channel(struct zd_rf *rf, u8 channel)
 {
        int r;
-       const u32 *rv = al2230_table[channel-1];
+       const u32 *rv = zd1211_al2230_table[channel-1];
        struct zd_chip *chip = zd_rf_to_chip(rf);
        static const struct zd_ioreq16 ioreqs[] = {
                { CR138, 0x28 },
@@ -257,6 +303,24 @@ static int al2230_set_channel(struct zd_rf *rf, u8 channel)
        return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
 }
 
+static int zd1211b_al2230_set_channel(struct zd_rf *rf, u8 channel)
+{
+       int r;
+       const u32 *rv = zd1211b_al2230_table[channel-1];
+       struct zd_chip *chip = zd_rf_to_chip(rf);
+
+       r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1,
+               ARRAY_SIZE(zd1211b_ioreqs_shared_1));
+       if (r)
+               return r;
+
+       r = zd_rfwritev_cr_locked(chip, rv, 3);
+       if (r)
+               return r;
+
+       return zd1211b_al2230_finalize_rf(chip);
+}
+
 static int zd1211_al2230_switch_radio_on(struct zd_rf *rf)
 {
        struct zd_chip *chip = zd_rf_to_chip(rf);
@@ -294,13 +358,14 @@ int zd_rf_init_al2230(struct zd_rf *rf)
 {
        struct zd_chip *chip = zd_rf_to_chip(rf);
 
-       rf->set_channel = al2230_set_channel;
        rf->switch_radio_off = al2230_switch_radio_off;
        if (chip->is_zd1211b) {
                rf->init_hw = zd1211b_al2230_init_hw;
+               rf->set_channel = zd1211b_al2230_set_channel;
                rf->switch_radio_on = zd1211b_al2230_switch_radio_on;
        } else {
                rf->init_hw = zd1211_al2230_init_hw;
+               rf->set_channel = zd1211_al2230_set_channel;
                rf->switch_radio_on = zd1211_al2230_switch_radio_on;
        }
        rf->patch_6m_band_edge = 1;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
new file mode 100644 (file)
index 0000000..a289f95
--- /dev/null
@@ -0,0 +1,274 @@
+/* zd_rf_al7230b.c: Functions for the AL7230B RF controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+
+#include "zd_rf.h"
+#include "zd_usb.h"
+#include "zd_chip.h"
+
+static const u32 chan_rv[][2] = {
+       RF_CHANNEL( 1) = { 0x09ec00, 0x8cccc8 },
+       RF_CHANNEL( 2) = { 0x09ec00, 0x8cccd8 },
+       RF_CHANNEL( 3) = { 0x09ec00, 0x8cccc0 },
+       RF_CHANNEL( 4) = { 0x09ec00, 0x8cccd0 },
+       RF_CHANNEL( 5) = { 0x05ec00, 0x8cccc8 },
+       RF_CHANNEL( 6) = { 0x05ec00, 0x8cccd8 },
+       RF_CHANNEL( 7) = { 0x05ec00, 0x8cccc0 },
+       RF_CHANNEL( 8) = { 0x05ec00, 0x8cccd0 },
+       RF_CHANNEL( 9) = { 0x0dec00, 0x8cccc8 },
+       RF_CHANNEL(10) = { 0x0dec00, 0x8cccd8 },
+       RF_CHANNEL(11) = { 0x0dec00, 0x8cccc0 },
+       RF_CHANNEL(12) = { 0x0dec00, 0x8cccd0 },
+       RF_CHANNEL(13) = { 0x03ec00, 0x8cccc8 },
+       RF_CHANNEL(14) = { 0x03ec00, 0x866660 },
+};
+
+static const u32 std_rv[] = {
+       0x4ff821,
+       0xc5fbfc,
+       0x21ebfe,
+       0xafd401, /* freq shift 0xaad401 */
+       0x6cf56a,
+       0xe04073,
+       0x193d76,
+       0x9dd844,
+       0x500007,
+       0xd8c010,
+};
+
+static int al7230b_init_hw(struct zd_rf *rf)
+{
+       int i, r;
+       struct zd_chip *chip = zd_rf_to_chip(rf);
+
+       /* All of these writes are identical to AL2230 unless otherwise
+        * specified */
+       static const struct zd_ioreq16 ioreqs_1[] = {
+               /* This one is 7230-specific, and happens before the rest */
+               { CR240,  0x57 },
+               { },
+
+               { CR15,   0x20 }, { CR23,   0x40 }, { CR24,  0x20 },
+               { CR26,   0x11 }, { CR28,   0x3e }, { CR29,  0x00 },
+               { CR44,   0x33 },
+               /* This value is different for 7230 (was: 0x2a) */
+               { CR106,  0x22 },
+               { CR107,  0x1a }, { CR109,  0x09 }, { CR110,  0x27 },
+               { CR111,  0x2b }, { CR112,  0x2b }, { CR119,  0x0a },
+               /* This happened further down in AL2230,
+                * and the value changed (was: 0xe0) */
+               { CR122,  0xfc },
+               { CR10,   0x89 },
+               /* for newest (3rd cut) AL2300 */
+               { CR17,   0x28 },
+               { CR26,   0x93 }, { CR34,   0x30 },
+               /* for newest (3rd cut) AL2300 */
+               { CR35,   0x3e },
+               { CR41,   0x24 }, { CR44,   0x32 },
+               /* for newest (3rd cut) AL2300 */
+               { CR46,   0x96 },
+               { CR47,   0x1e }, { CR79,   0x58 }, { CR80,  0x30 },
+               { CR81,   0x30 }, { CR87,   0x0a }, { CR89,  0x04 },
+               { CR92,   0x0a }, { CR99,   0x28 },
+               /* This value is different for 7230 (was: 0x00) */
+               { CR100,  0x02 },
+               { CR101,  0x13 }, { CR102,  0x27 },
+               /* This value is different for 7230 (was: 0x24) */
+               { CR106,  0x22 },
+               /* This value is different for 7230 (was: 0x2a) */
+               { CR107,  0x3f },
+               { CR109,  0x09 },
+               /* This value is different for 7230 (was: 0x13) */
+               { CR110,  0x1f },
+               { CR111,  0x1f }, { CR112,  0x1f }, { CR113, 0x27 },
+               { CR114,  0x27 },
+               /* for newest (3rd cut) AL2300 */
+               { CR115,  0x24 },
+               /* This value is different for 7230 (was: 0x24) */
+               { CR116,  0x3f },
+               /* This value is different for 7230 (was: 0xf4) */
+               { CR117,  0xfa },
+               { CR118,  0xfc }, { CR119,  0x10 }, { CR120, 0x4f },
+               { CR121,  0x77 }, { CR137,  0x88 },
+               /* This one is 7230-specific */
+               { CR138,  0xa8 },
+               /* This value is different for 7230 (was: 0xff) */
+               { CR252,  0x34 },
+               /* This value is different for 7230 (was: 0xff) */
+               { CR253,  0x34 },
+
+               /* PLL_OFF */
+               { CR251, 0x2f },
+       };
+
+       static const struct zd_ioreq16 ioreqs_2[] = {
+               /* PLL_ON */
+               { CR251, 0x3f },
+               { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
+               { CR38, 0x38 }, { CR136, 0xdf },
+       };
+
+       r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1));
+       if (r)
+               return r;
+
+       r = zd_rfwrite_cr_locked(chip, 0x09ec04);
+       if (r)
+               return r;
+       r = zd_rfwrite_cr_locked(chip, 0x8cccc8);
+       if (r)
+               return r;
+
+       for (i = 0; i < ARRAY_SIZE(std_rv); i++) {
+               r = zd_rfwrite_cr_locked(chip, std_rv[i]);
+               if (r)
+                       return r;
+       }
+
+       r = zd_rfwrite_cr_locked(chip, 0x3c9000);
+       if (r)
+               return r;
+       r = zd_rfwrite_cr_locked(chip, 0xbfffff);
+       if (r)
+               return r;
+       r = zd_rfwrite_cr_locked(chip, 0x700000);
+       if (r)
+               return r;
+       r = zd_rfwrite_cr_locked(chip, 0xf15d58);
+       if (r)
+               return r;
+
+       r = zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2));
+       if (r)
+               return r;
+
+       r = zd_rfwrite_cr_locked(chip, 0xf15d59);
+       if (r)
+               return r;
+       r = zd_rfwrite_cr_locked(chip, 0xf15d5c);
+       if (r)
+               return r;
+       r = zd_rfwrite_cr_locked(chip, 0xf15d58);
+       if (r)
+               return r;
+
+       r = zd_iowrite16_locked(chip, 0x06, CR203);
+       if (r)
+               return r;
+       r = zd_iowrite16_locked(chip, 0x80, CR240);
+       if (r)
+               return r;
+
+       return 0;
+}
+
+static int al7230b_set_channel(struct zd_rf *rf, u8 channel)
+{
+       int i, r;
+       const u32 *rv = chan_rv[channel-1];
+       struct zd_chip *chip = zd_rf_to_chip(rf);
+
+       struct zd_ioreq16 ioreqs_1[] = {
+               { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
+               { CR38,  0x38 }, { CR136, 0xdf },
+       };
+
+       struct zd_ioreq16 ioreqs_2[] = {
+               /* PLL_ON */
+               { CR251, 0x3f },
+               { CR203, 0x06 }, { CR240, 0x08 },
+       };
+
+       r = zd_iowrite16_locked(chip, 0x57, CR240);
+       if (r)
+               return r;
+
+       /* PLL_OFF */
+       r = zd_iowrite16_locked(chip, 0x2f, CR251);
+       if (r)
+               return r;
+
+       for (i = 0; i < ARRAY_SIZE(std_rv); i++) {
+               r = zd_rfwrite_cr_locked(chip, std_rv[i]);
+               if (r)
+                       return r;
+       }
+
+       r = zd_rfwrite_cr_locked(chip, 0x3c9000);
+       if (r)
+               return r;
+       r = zd_rfwrite_cr_locked(chip, 0xf15d58);
+       if (r)
+               return r;
+
+       r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1));
+       if (r)
+               return r;
+
+       for (i = 0; i < 2; i++) {
+               r = zd_rfwrite_cr_locked(chip, rv[i]);
+               if (r)
+                       return r;
+       }
+
+       r = zd_rfwrite_cr_locked(chip, 0x3c9000);
+       if (r)
+               return r;
+
+       return zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2));
+}
+
+static int al7230b_switch_radio_on(struct zd_rf *rf)
+{
+       struct zd_chip *chip = zd_rf_to_chip(rf);
+       static const struct zd_ioreq16 ioreqs[] = {
+               { CR11,  0x00 },
+               { CR251, 0x3f },
+       };
+
+       return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+static int al7230b_switch_radio_off(struct zd_rf *rf)
+{
+       struct zd_chip *chip = zd_rf_to_chip(rf);
+       static const struct zd_ioreq16 ioreqs[] = {
+               { CR11,  0x04 },
+               { CR251, 0x2f },
+       };
+
+       return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
+
+int zd_rf_init_al7230b(struct zd_rf *rf)
+{
+       struct zd_chip *chip = zd_rf_to_chip(rf);
+
+       if (chip->is_zd1211b) {
+               dev_err(zd_chip_dev(chip), "AL7230B is currently not "
+                       "supported for ZD1211B devices\n");
+               return -ENODEV;
+       }
+
+       rf->init_hw = al7230b_init_hw;
+       rf->set_channel = al7230b_set_channel;
+       rf->switch_radio_on = al7230b_switch_radio_on;
+       rf->switch_radio_off = al7230b_switch_radio_off;
+       rf->patch_6m_band_edge = 1;
+       return 0;
+}
index 6320984..31027e5 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <asm/unaligned.h>
+#include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/firmware.h>
@@ -39,9 +40,19 @@ static struct usb_device_id usb_ids[] = {
        { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
        { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
        { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 },
        /* ZD1211B */
        { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
        { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
+       { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
+       { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
+       /* "Driverless" devices that need ejecting */
+       { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
        {}
 };
 
@@ -263,6 +274,39 @@ static char *get_fw_name(char *buffer, size_t size, u8 device_type,
        return buffer;
 }
 
+static int handle_version_mismatch(struct usb_device *udev, u8 device_type,
+       const struct firmware *ub_fw)
+{
+       const struct firmware *ur_fw = NULL;
+       int offset;
+       int r = 0;
+       char fw_name[128];
+
+       r = request_fw_file(&ur_fw,
+               get_fw_name(fw_name, sizeof(fw_name), device_type, "ur"),
+               &udev->dev);
+       if (r)
+               goto error;
+
+       r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START_OFFSET,
+               REBOOT);
+       if (r)
+               goto error;
+
+       offset = ((EEPROM_REGS_OFFSET + EEPROM_REGS_SIZE) * sizeof(u16));
+       r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset,
+               E2P_BASE_OFFSET + EEPROM_REGS_SIZE, REBOOT);
+
+       /* At this point, the vendor driver downloads the whole firmware
+        * image, hacks around with version IDs, and uploads it again,
+        * completely overwriting the boot code. We do not do this here as
+        * it is not required on any tested devices, and it is suspected to
+        * cause problems. */
+error:
+       release_firmware(ur_fw);
+       return r;
+}
+
 static int upload_firmware(struct usb_device *udev, u8 device_type)
 {
        int r;
@@ -282,15 +326,17 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
 
        fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET);
 
-       /* FIXME: do we have any reason to perform the kludge that the vendor
-        * driver does when there is a version mismatch? (their driver uploads
-        * different firmwares and stuff)
-        */
        if (fw_bcdDevice != bcdDevice) {
                dev_info(&udev->dev,
-                       "firmware device id %#06x and actual device id "
-                       "%#06x differ, continuing anyway\n",
-                       fw_bcdDevice, bcdDevice);
+                       "firmware version %#06x and device bootcode version "
+                       "%#06x differ\n", fw_bcdDevice, bcdDevice);
+               if (bcdDevice <= 0x4313)
+                       dev_warn(&udev->dev, "device has old bootcode, please "
+                               "report success or failure\n");
+
+               r = handle_version_mismatch(udev, device_type, ub_fw);
+               if (r)
+                       goto error;
        } else {
                dev_dbg_f(&udev->dev,
                        "firmware device id %#06x is equal to the "
@@ -620,7 +666,7 @@ resubmit:
        usb_submit_urb(urb, GFP_ATOMIC);
 }
 
-struct urb *alloc_urb(struct zd_usb *usb)
+static struct urb *alloc_urb(struct zd_usb *usb)
 {
        struct usb_device *udev = zd_usb_to_usbdev(usb);
        struct urb *urb;
@@ -644,7 +690,7 @@ struct urb *alloc_urb(struct zd_usb *usb)
        return urb;
 }
 
-void free_urb(struct urb *urb)
+static void free_urb(struct urb *urb)
 {
        if (!urb)
                return;
@@ -864,7 +910,7 @@ void zd_usb_clear(struct zd_usb *usb)
 {
        usb_set_intfdata(usb->intf, NULL);
        usb_put_intf(usb->intf);
-       memset(usb, 0, sizeof(*usb));
+       ZD_MEMCLEAR(usb, sizeof(*usb));
        /* FIXME: usb_interrupt, usb_tx, usb_rx? */
 }
 
@@ -910,6 +956,55 @@ static void print_id(struct usb_device *udev)
 #define print_id(udev) do { } while (0)
 #endif
 
+static int eject_installer(struct usb_interface *intf)
+{
+       struct usb_device *udev = interface_to_usbdev(intf);
+       struct usb_host_interface *iface_desc = &intf->altsetting[0];
+       struct usb_endpoint_descriptor *endpoint;
+       unsigned char *cmd;
+       u8 bulk_out_ep;
+       int r;
+
+       /* Find bulk out endpoint */
+       endpoint = &iface_desc->endpoint[1].desc;
+       if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
+           (endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+           USB_ENDPOINT_XFER_BULK) {
+               bulk_out_ep = endpoint->bEndpointAddress;
+       } else {
+               dev_err(&udev->dev,
+                       "zd1211rw: Could not find bulk out endpoint\n");
+               return -ENODEV;
+       }
+
+       cmd = kzalloc(31, GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENODEV;
+
+       /* USB bulk command block */
+       cmd[0] = 0x55;  /* bulk command signature */
+       cmd[1] = 0x53;  /* bulk command signature */
+       cmd[2] = 0x42;  /* bulk command signature */
+       cmd[3] = 0x43;  /* bulk command signature */
+       cmd[14] = 6;    /* command length */
+
+       cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
+       cmd[19] = 0x2;  /* eject disc */
+
+       dev_info(&udev->dev, "Ejecting virtual installer media...\n");
+       r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
+               cmd, 31, NULL, 2000);
+       kfree(cmd);
+       if (r)
+               return r;
+
+       /* At this point, the device disconnects and reconnects with the real
+        * ID numbers. */
+
+       usb_set_intfdata(intf, NULL);
+       return 0;
+}
+
 static int probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
        int r;
@@ -918,6 +1013,9 @@ static int probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        print_id(udev);
 
+       if (id->driver_info & DEVICE_INSTALLER)
+               return eject_installer(intf);
+
        switch (udev->speed) {
        case USB_SPEED_LOW:
        case USB_SPEED_FULL:
@@ -983,6 +1081,11 @@ static void disconnect(struct usb_interface *intf)
        struct zd_mac *mac = zd_netdev_mac(netdev);
        struct zd_usb *usb = &mac->chip.usb;
 
+       /* Either something really bad happened, or we're just dealing with
+        * a DEVICE_INSTALLER. */
+       if (netdev == NULL)
+               return;
+
        dev_dbg_f(zd_usb_dev(usb), "\n");
 
        zd_netdev_disconnect(netdev);
@@ -998,7 +1101,6 @@ static void disconnect(struct usb_interface *intf)
         */
        usb_reset_device(interface_to_usbdev(intf));
 
-       /* If somebody still waits on this lock now, this is an error. */
        zd_netdev_free(netdev);
        dev_dbg(&intf->dev, "disconnected\n");
 }
index d642028..ded39de 100644 (file)
@@ -30,6 +30,7 @@
 enum devicetype {
        DEVICE_ZD1211  = 0,
        DEVICE_ZD1211B = 1,
+       DEVICE_INSTALLER = 2,
 };
 
 enum endpoints {
@@ -73,17 +74,17 @@ enum control_requests {
 struct usb_req_read_regs {
        __le16 id;
        __le16 addr[0];
-} __attribute__((packed));
+};
 
 struct reg_data {
        __le16 addr;
        __le16 value;
-} __attribute__((packed));
+};
 
 struct usb_req_write_regs {
        __le16 id;
        struct reg_data reg_writes[0];
-} __attribute__((packed));
+};
 
 enum {
        RF_IF_LE = 0x02,
@@ -100,7 +101,7 @@ struct usb_req_rfwrite {
        /* RF2595: 24 */
        __le16 bit_values[0];
        /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
-} __attribute__((packed));
+};
 
 /* USB interrupt */
 
@@ -117,12 +118,12 @@ enum usb_int_flags {
 struct usb_int_header {
        u8 type;        /* must always be 1 */
        u8 id;
-} __attribute__((packed));
+};
 
 struct usb_int_regs {
        struct usb_int_header hdr;
        struct reg_data regs[0];
-} __attribute__((packed));
+};
 
 struct usb_int_retry_fail {
        struct usb_int_header hdr;
@@ -130,7 +131,7 @@ struct usb_int_retry_fail {
        u8 _dummy;
        u8 addr[ETH_ALEN];
        u8 ibss_wakeup_dest;
-} __attribute__((packed));
+};
 
 struct read_regs_int {
        struct completion completion;
index 8459a18..8746cc7 100644 (file)
@@ -24,8 +24,8 @@
 */
 
 #define DRV_NAME       "yellowfin"
-#define DRV_VERSION    "2.0"
-#define DRV_RELDATE    "Jun 27, 2006"
+#define DRV_VERSION    "2.1"
+#define DRV_RELDATE    "Sep 11, 2006"
 
 #define PFX DRV_NAME ": "
 
@@ -1307,8 +1307,6 @@ static void set_rx_mode(struct net_device *dev)
        /* Stop the Rx process to change any value. */
        iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
-               /* Unconditionally log net taps. */
-               printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
                iowrite16(0x000F, ioaddr + AddrMode);
        } else if ((dev->mc_count > 64)  ||  (dev->flags & IFF_ALLMULTI)) {
                /* Too many to filter well, or accept all multicasts. */
@@ -1434,7 +1432,7 @@ static int __init yellowfin_init (void)
 #ifdef MODULE
        printk(version);
 #endif
-       return pci_module_init (&yellowfin_driver);
+       return pci_register_driver(&yellowfin_driver);
 }
 
 
index ecc4286..b174ebb 100644 (file)
@@ -240,6 +240,11 @@ struct ieee80211_snap_hdr {
 #define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
 #define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
 
+/* 802.11g ERP information element */
+#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
+#define WLAN_ERP_USE_PROTECTION (1<<1)
+#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
+
 /* Status codes */
 enum ieee80211_statuscode {
        WLAN_STATUS_SUCCESS = 0,
@@ -747,6 +752,8 @@ struct ieee80211_txb {
 #define NETWORK_HAS_IBSS_DFS            (1<<8)
 #define NETWORK_HAS_TPC_REPORT          (1<<9)
 
+#define NETWORK_HAS_ERP_VALUE           (1<<10)
+
 #define QOS_QUEUE_NUM                   4
 #define QOS_OUI_LEN                     3
 #define QOS_OUI_TYPE                    2
@@ -1252,6 +1259,8 @@ extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
                              int total_len, int encrypt_mpdu);
 
 /* ieee80211_rx.c */
+extern void ieee80211_rx_any(struct ieee80211_device *ieee,
+                    struct sk_buff *skb, struct ieee80211_rx_stats *stats);
 extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                        struct ieee80211_rx_stats *rx_stats);
 /* make sure to set stats->len */
index 00ad810..425b3a5 100644 (file)
@@ -86,9 +86,6 @@ struct ieee80211softmac_assoc_info {
        
        /* BSSID we're trying to associate to */
        char bssid[ETH_ALEN];
-
-       /* Rates supported by the network */
-       struct ieee80211softmac_ratesinfo supported_rates;
        
        /* some flags.
         * static_essid is valid if the essid is constant,
@@ -103,6 +100,7 @@ struct ieee80211softmac_assoc_info {
         * bssfixed is used for SIOCSIWAP.
         */
        u8 static_essid:1,
+          short_preamble_available:1,
           associating:1,
           assoc_wait:1,
           bssvalid:1,
@@ -115,6 +113,19 @@ struct ieee80211softmac_assoc_info {
        struct work_struct timeout;
 };
 
+struct ieee80211softmac_bss_info {
+       /* Rates supported by the network */
+       struct ieee80211softmac_ratesinfo supported_rates;
+
+       /* This indicates whether frames can currently be transmitted with
+        * short preamble (only use this variable during TX at CCK rates) */
+       u8 short_preamble:1;
+
+       /* This indicates whether protection (e.g. self-CTS) should be used
+        * when transmitting with OFDM modulation */
+       u8 use_protection:1;
+};
+
 enum {
        IEEE80211SOFTMAC_AUTH_OPEN_REQUEST      = 1,
        IEEE80211SOFTMAC_AUTH_OPEN_RESPONSE     = 2,
@@ -157,6 +168,10 @@ struct ieee80211softmac_txrates {
 #define IEEE80211SOFTMAC_TXRATECHG_MCAST               (1 << 2) /* mcast_rate */
 #define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST           (1 << 3) /* mgt_mcast_rate */
 
+#define IEEE80211SOFTMAC_BSSINFOCHG_RATES              (1 << 0) /* supported_rates */
+#define IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE     (1 << 1) /* short_preamble */
+#define IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION         (1 << 2) /* use_protection */
+
 struct ieee80211softmac_device {
        /* 802.11 structure for data stuff */
        struct ieee80211_device *ieee;
@@ -200,10 +215,16 @@ struct ieee80211softmac_device {
         * The driver just needs to read them.
         */
        struct ieee80211softmac_txrates txrates;
-       /* If the driver needs to do stuff on TX rate changes, assign this callback. */
+
+       /* If the driver needs to do stuff on TX rate changes, assign this
+        * callback. See IEEE80211SOFTMAC_TXRATECHG for change flags. */
        void (*txrates_change)(struct net_device *dev,
-                              u32 changes, /* see IEEE80211SOFTMAC_TXRATECHG flags */
-                              const struct ieee80211softmac_txrates *rates_before_change);
+                              u32 changes);
+
+       /* If the driver needs to do stuff when BSS properties change, assign
+        * this callback. see IEEE80211SOFTMAC_BSSINFOCHG for change flags. */
+       void (*bssinfo_change)(struct net_device *dev,
+                              u32 changes);
 
        /* private stuff follows */
        /* this lock protects this structure */
@@ -216,6 +237,7 @@ struct ieee80211softmac_device {
        
        struct ieee80211softmac_scaninfo *scaninfo;
        struct ieee80211softmac_assoc_info associnfo;
+       struct ieee80211softmac_bss_info bssinfo;
 
        struct list_head auth_queue;
        struct list_head events;
@@ -257,6 +279,14 @@ extern void ieee80211softmac_fragment_lost(struct net_device *dev,
  * Note that the rates need to be sorted. */
 extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates);
 
+/* Finds the highest rate which is:
+ *  1. Present in ri (optionally a basic rate)
+ *  2. Supported by the device
+ *  3. Less than or equal to the user-defined rate
+ */
+extern u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac,
+       struct ieee80211softmac_ratesinfo *ri, int basic_only);
+
 /* Helper function which advises you the rate at which a frame should be
  * transmitted at. */
 static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac,
@@ -279,6 +309,24 @@ static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device
                return txrates->mcast_rate;
 }
 
+/* Helper function which advises you when it is safe to transmit with short
+ * preamble.
+ * You should only call this function when transmitting at CCK rates. */
+static inline int ieee80211softmac_short_preamble_ok(struct ieee80211softmac_device *mac,
+                                                   int is_multicast,
+                                                   int is_mgt)
+{
+       return (is_multicast && is_mgt) ? 0 : mac->bssinfo.short_preamble;
+}
+
+/* Helper function which advises you whether protection (e.g. self-CTS) is
+ * needed. 1 = protection needed, 0 = no protection needed
+ * Only use this function when transmitting with OFDM modulation. */
+static inline int ieee80211softmac_protection_needed(struct ieee80211softmac_device *mac)
+{
+       return mac->bssinfo.use_protection;
+}
+
 /* Start the SoftMAC. Call this after you initialized the device
  * and it is ready to run.
  */
index ed90a8a..098c668 100644 (file)
@@ -271,6 +271,27 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        return 0;
 }
 
+/*
+ * deal with seq counter wrapping correctly.
+ * refer to timer_after() for jiffies wrapping handling
+ */
+static inline int ccmp_replay_check(u8 *pn_n, u8 *pn_o)
+{
+       u32 iv32_n, iv16_n;
+       u32 iv32_o, iv16_o;
+
+       iv32_n = (pn_n[0] << 24) | (pn_n[1] << 16) | (pn_n[2] << 8) | pn_n[3];
+       iv16_n = (pn_n[4] << 8) | pn_n[5];
+
+       iv32_o = (pn_o[0] << 24) | (pn_o[1] << 16) | (pn_o[2] << 8) | pn_o[3];
+       iv16_o = (pn_o[4] << 8) | pn_o[5];
+
+       if ((s32)iv32_n - (s32)iv32_o < 0 ||
+           (iv32_n == iv32_o && iv16_n <= iv16_o))
+               return 1;
+       return 0;
+}
+
 static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct ieee80211_ccmp_data *key = priv;
@@ -323,7 +344,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        pn[5] = pos[0];
        pos += 8;
 
-       if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
+       if (ccmp_replay_check(pn, key->rx_pn)) {
                if (net_ratelimit()) {
                        printk(KERN_DEBUG "CCMP: replay detected: STA=" MAC_FMT
                               " previous PN %02x%02x%02x%02x%02x%02x "
index 34dba0b..f2df2f5 100644 (file)
@@ -52,8 +52,10 @@ struct ieee80211_tkip_data {
 
        int key_idx;
 
-       struct crypto_tfm *tfm_arc4;
-       struct crypto_tfm *tfm_michael;
+       struct crypto_tfm *tx_tfm_arc4;
+       struct crypto_tfm *tx_tfm_michael;
+       struct crypto_tfm *rx_tfm_arc4;
+       struct crypto_tfm *rx_tfm_michael;
 
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16], tx_hdr[16];
@@ -85,15 +87,29 @@ static void *ieee80211_tkip_init(int key_idx)
 
        priv->key_idx = key_idx;
 
-       priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0);
-       if (priv->tfm_arc4 == NULL) {
+       priv->tx_tfm_arc4 = crypto_alloc_tfm("arc4", 0);
+       if (priv->tx_tfm_arc4 == NULL) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
                       "crypto API arc4\n");
                goto fail;
        }
 
-       priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0);
-       if (priv->tfm_michael == NULL) {
+       priv->tx_tfm_michael = crypto_alloc_tfm("michael_mic", 0);
+       if (priv->tx_tfm_michael == NULL) {
+               printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
+                      "crypto API michael_mic\n");
+               goto fail;
+       }
+
+       priv->rx_tfm_arc4 = crypto_alloc_tfm("arc4", 0);
+       if (priv->rx_tfm_arc4 == NULL) {
+               printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
+                      "crypto API arc4\n");
+               goto fail;
+       }
+
+       priv->rx_tfm_michael = crypto_alloc_tfm("michael_mic", 0);
+       if (priv->rx_tfm_michael == NULL) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
                       "crypto API michael_mic\n");
                goto fail;
@@ -103,10 +119,14 @@ static void *ieee80211_tkip_init(int key_idx)
 
       fail:
        if (priv) {
-               if (priv->tfm_michael)
-                       crypto_free_tfm(priv->tfm_michael);
-               if (priv->tfm_arc4)
-                       crypto_free_tfm(priv->tfm_arc4);
+               if (priv->tx_tfm_michael)
+                       crypto_free_tfm(priv->tx_tfm_michael);
+               if (priv->tx_tfm_arc4)
+                       crypto_free_tfm(priv->tx_tfm_arc4);
+               if (priv->rx_tfm_michael)
+                       crypto_free_tfm(priv->rx_tfm_michael);
+               if (priv->rx_tfm_arc4)
+                       crypto_free_tfm(priv->rx_tfm_arc4);
                kfree(priv);
        }
 
@@ -116,10 +136,16 @@ static void *ieee80211_tkip_init(int key_idx)
 static void ieee80211_tkip_deinit(void *priv)
 {
        struct ieee80211_tkip_data *_priv = priv;
-       if (_priv && _priv->tfm_michael)
-               crypto_free_tfm(_priv->tfm_michael);
-       if (_priv && _priv->tfm_arc4)
-               crypto_free_tfm(_priv->tfm_arc4);
+       if (_priv) {
+               if (_priv->tx_tfm_michael)
+                       crypto_free_tfm(_priv->tx_tfm_michael);
+               if (_priv->tx_tfm_arc4)
+                       crypto_free_tfm(_priv->tx_tfm_arc4);
+               if (_priv->rx_tfm_michael)
+                       crypto_free_tfm(_priv->rx_tfm_michael);
+               if (_priv->rx_tfm_arc4)
+                       crypto_free_tfm(_priv->rx_tfm_arc4);
+       }
        kfree(priv);
 }
 
@@ -351,12 +377,25 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
 
-       crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
+       crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
        sg.page = virt_to_page(pos);
        sg.offset = offset_in_page(pos);
        sg.length = len + 4;
-       crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4);
+       crypto_cipher_encrypt(tkey->tx_tfm_arc4, &sg, &sg, len + 4);
+
+       return 0;
+}
 
+/*
+ * deal with seq counter wrapping correctly.
+ * refer to timer_after() for jiffies wrapping handling
+ */
+static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
+                                   u32 iv32_o, u16 iv16_o)
+{
+       if ((s32)iv32_n - (s32)iv32_o < 0 ||
+           (iv32_n == iv32_o && iv16_n <= iv16_o))
+               return 1;
        return 0;
 }
 
@@ -414,8 +453,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
        pos += 8;
 
-       if (iv32 < tkey->rx_iv32 ||
-           (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
+       if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
                if (net_ratelimit()) {
                        printk(KERN_DEBUG "TKIP: replay detected: STA=" MAC_FMT
                               " previous TSC %08x%04x received TSC "
@@ -434,11 +472,11 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 
        plen = skb->len - hdr_len - 12;
 
-       crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
+       crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
        sg.page = virt_to_page(pos);
        sg.offset = offset_in_page(pos);
        sg.length = plen + 4;
-       crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4);
+       crypto_cipher_decrypt(tkey->rx_tfm_arc4, &sg, &sg, plen + 4);
 
        crc = ~crc32_le(~0, pos, plen);
        icv[0] = crc;
@@ -472,12 +510,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        return keyidx;
 }
 
-static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
+static int michael_mic(struct crypto_tfm *tfm_michael, u8 * key, u8 * hdr,
                       u8 * data, size_t data_len, u8 * mic)
 {
        struct scatterlist sg[2];
 
-       if (tkey->tfm_michael == NULL) {
+       if (tfm_michael == NULL) {
                printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
                return -1;
        }
@@ -489,10 +527,10 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
        sg[1].offset = offset_in_page(data);
        sg[1].length = data_len;
 
-       crypto_digest_init(tkey->tfm_michael);
-       crypto_digest_setkey(tkey->tfm_michael, key, 8);
-       crypto_digest_update(tkey->tfm_michael, sg, 2);
-       crypto_digest_final(tkey->tfm_michael, mic);
+       crypto_digest_init(tfm_michael);
+       crypto_digest_setkey(tfm_michael, key, 8);
+       crypto_digest_update(tfm_michael, sg, 2);
+       crypto_digest_final(tfm_michael, mic);
 
        return 0;
 }
@@ -528,7 +566,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
        if (stype & IEEE80211_STYPE_QOS_DATA) {
                const struct ieee80211_hdr_3addrqos *qoshdr =
                        (struct ieee80211_hdr_3addrqos *)skb->data;
-               hdr[12] = le16_to_cpu(qoshdr->qos_ctl) & IEEE80211_QCTL_TID;
+               hdr[12] = qoshdr->qos_ctl & cpu_to_le16(IEEE80211_QCTL_TID);
        } else
                hdr[12] = 0;            /* priority */
 
@@ -550,7 +588,7 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
 
        michael_mic_hdr(skb, tkey->tx_hdr);
        pos = skb_put(skb, 8);
-       if (michael_mic(tkey, &tkey->key[16], tkey->tx_hdr,
+       if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
                        skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
                return -1;
 
@@ -588,7 +626,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
                return -1;
 
        michael_mic_hdr(skb, tkey->rx_hdr);
-       if (michael_mic(tkey, &tkey->key[24], tkey->rx_hdr,
+       if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
                        skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
                return -1;
        if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
@@ -618,14 +656,18 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
 {
        struct ieee80211_tkip_data *tkey = priv;
        int keyidx;
-       struct crypto_tfm *tfm = tkey->tfm_michael;
-       struct crypto_tfm *tfm2 = tkey->tfm_arc4;
+       struct crypto_tfm *tfm = tkey->tx_tfm_michael;
+       struct crypto_tfm *tfm2 = tkey->tx_tfm_arc4;
+       struct crypto_tfm *tfm3 = tkey->rx_tfm_michael;
+       struct crypto_tfm *tfm4 = tkey->rx_tfm_arc4;
 
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
        tkey->key_idx = keyidx;
-       tkey->tfm_michael = tfm;
-       tkey->tfm_arc4 = tfm2;
+       tkey->tx_tfm_michael = tfm;
+       tkey->tx_tfm_arc4 = tfm2;
+       tkey->rx_tfm_michael = tfm3;
+       tkey->rx_tfm_arc4 = tfm4;
        if (len == TKIP_KEY_LEN) {
                memcpy(tkey->key, key, TKIP_KEY_LEN);
                tkey->key_set = 1;
index 0ebf235..b435b28 100644 (file)
@@ -32,7 +32,8 @@ struct prism2_wep_data {
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_tfm *tfm;
+       struct crypto_tfm *tx_tfm;
+       struct crypto_tfm *rx_tfm;
 };
 
 static void *prism2_wep_init(int keyidx)
@@ -44,13 +45,19 @@ static void *prism2_wep_init(int keyidx)
                goto fail;
        priv->key_idx = keyidx;
 
-       priv->tfm = crypto_alloc_tfm("arc4", 0);
-       if (priv->tfm == NULL) {
+       priv->tx_tfm = crypto_alloc_tfm("arc4", 0);
+       if (priv->tx_tfm == NULL) {
                printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
                       "crypto API arc4\n");
                goto fail;
        }
 
+       priv->rx_tfm = crypto_alloc_tfm("arc4", 0);
+       if (priv->rx_tfm == NULL) {
+               printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
+                      "crypto API arc4\n");
+               goto fail;
+       }
        /* start WEP IV from a random value */
        get_random_bytes(&priv->iv, 4);
 
@@ -58,8 +65,10 @@ static void *prism2_wep_init(int keyidx)
 
       fail:
        if (priv) {
-               if (priv->tfm)
-                       crypto_free_tfm(priv->tfm);
+               if (priv->tx_tfm)
+                       crypto_free_tfm(priv->tx_tfm);
+               if (priv->rx_tfm)
+                       crypto_free_tfm(priv->rx_tfm);
                kfree(priv);
        }
        return NULL;
@@ -68,8 +77,12 @@ static void *prism2_wep_init(int keyidx)
 static void prism2_wep_deinit(void *priv)
 {
        struct prism2_wep_data *_priv = priv;
-       if (_priv && _priv->tfm)
-               crypto_free_tfm(_priv->tfm);
+       if (_priv) {
+               if (_priv->tx_tfm)
+                       crypto_free_tfm(_priv->tx_tfm);
+               if (_priv->rx_tfm)
+                       crypto_free_tfm(_priv->rx_tfm);
+       }
        kfree(priv);
 }
 
@@ -151,11 +164,11 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
 
-       crypto_cipher_setkey(wep->tfm, key, klen);
+       crypto_cipher_setkey(wep->tx_tfm, key, klen);
        sg.page = virt_to_page(pos);
        sg.offset = offset_in_page(pos);
        sg.length = len + 4;
-       crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
+       crypto_cipher_encrypt(wep->tx_tfm, &sg, &sg, len + 4);
 
        return 0;
 }
@@ -194,11 +207,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        /* Apply RC4 to data and compute CRC32 over decrypted data */
        plen = skb->len - hdr_len - 8;
 
-       crypto_cipher_setkey(wep->tfm, key, klen);
+       crypto_cipher_setkey(wep->rx_tfm, key, klen);
        sg.page = virt_to_page(pos);
        sg.offset = offset_in_page(pos);
        sg.length = plen + 4;
-       crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4);
+       crypto_cipher_decrypt(wep->rx_tfm, &sg, &sg, plen + 4);
 
        crc = ~crc32_le(~0, pos, plen);
        icv[0] = crc;
index 72d4d4e..7707041 100644 (file)
@@ -779,33 +779,44 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
        return 0;
 }
 
-/* Filter out unrelated packets, call ieee80211_rx[_mgt] */
-int ieee80211_rx_any(struct ieee80211_device *ieee,
+/* Filter out unrelated packets, call ieee80211_rx[_mgt]
+ * This function takes over the skb, it should not be used again after calling
+ * this function. */
+void ieee80211_rx_any(struct ieee80211_device *ieee,
                     struct sk_buff *skb, struct ieee80211_rx_stats *stats)
 {
        struct ieee80211_hdr_4addr *hdr;
        int is_packet_for_us;
        u16 fc;
 
-       if (ieee->iw_mode == IW_MODE_MONITOR)
-               return ieee80211_rx(ieee, skb, stats) ? 0 : -EINVAL;
+       if (ieee->iw_mode == IW_MODE_MONITOR) {
+               if (!ieee80211_rx(ieee, skb, stats))
+                       dev_kfree_skb_irq(skb);
+               return;
+       }
+
+       if (skb->len < sizeof(struct ieee80211_hdr))
+               goto drop_free;
 
        hdr = (struct ieee80211_hdr_4addr *)skb->data;
        fc = le16_to_cpu(hdr->frame_ctl);
 
        if ((fc & IEEE80211_FCTL_VERS) != 0)
-               return -EINVAL;
+               goto drop_free;
                
        switch (fc & IEEE80211_FCTL_FTYPE) {
        case IEEE80211_FTYPE_MGMT:
+               if (skb->len < sizeof(struct ieee80211_hdr_3addr))
+                       goto drop_free;
                ieee80211_rx_mgt(ieee, hdr, stats);
-               return 0;
+               dev_kfree_skb_irq(skb);
+               return;
        case IEEE80211_FTYPE_DATA:
                break;
        case IEEE80211_FTYPE_CTL:
-               return 0;
+               return;
        default:
-               return -EINVAL;
+               return;
        }
 
        is_packet_for_us = 0;
@@ -849,8 +860,14 @@ int ieee80211_rx_any(struct ieee80211_device *ieee,
        }
 
        if (is_packet_for_us)
-               return (ieee80211_rx(ieee, skb, stats) ? 0 : -EINVAL);
-       return 0;
+               if (!ieee80211_rx(ieee, skb, stats))
+                       dev_kfree_skb_irq(skb);
+       return;
+
+drop_free:
+       dev_kfree_skb_irq(skb);
+       ieee->stats.rx_dropped++;
+       return;
 }
 
 #define MGMT_FRAME_FIXED_PART_LENGTH           0x24
@@ -1061,13 +1078,16 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
 
        while (length >= sizeof(*info_element)) {
                if (sizeof(*info_element) + info_element->len > length) {
-                       IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
-                                            "info_element->len + 2 > left : "
-                                            "info_element->len+2=%zd left=%d, id=%d.\n",
-                                            info_element->len +
-                                            sizeof(*info_element),
-                                            length, info_element->id);
-                       return 1;
+                       IEEE80211_ERROR("Info elem: parse failed: "
+                                       "info_element->len + 2 > left : "
+                                       "info_element->len+2=%zd left=%d, id=%d.\n",
+                                       info_element->len +
+                                       sizeof(*info_element),
+                                       length, info_element->id);
+                       /* We stop processing but don't return an error here
+                        * because some misbehaviour APs break this rule. ie.
+                        * Orinoco AP1000. */
+                       break;
                }
 
                switch (info_element->id) {
@@ -1166,6 +1186,7 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
 
                case MFIE_TYPE_ERP_INFO:
                        network->erp_value = info_element->data[0];
+                       network->flags |= NETWORK_HAS_ERP_VALUE;
                        IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
                                             network->erp_value);
                        break;
@@ -1729,5 +1750,6 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
        }
 }
 
+EXPORT_SYMBOL_GPL(ieee80211_rx_any);
 EXPORT_SYMBOL(ieee80211_rx_mgt);
 EXPORT_SYMBOL(ieee80211_rx);
index bf04213..ae25449 100644 (file)
@@ -337,7 +337,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
                hdr_len += 2;
 
                skb->priority = ieee80211_classify(skb);
-               header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID;
+               header.qos_ctl |= cpu_to_le16(skb->priority & IEEE80211_QCTL_TID);
        }
        header.frame_ctl = cpu_to_le16(fc);
 
@@ -532,13 +532,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
                        return 0;
                }
 
-               if (ret == NETDEV_TX_BUSY) {
-                       printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; "
-                              "driver should report queue full via "
-                              "ieee_device->is_queue_full.\n",
-                              ieee->dev->name);
-               }
-
                ieee80211_txb_free(txb);
        }
 
index 44215ce..589f6d2 100644 (file)
@@ -96,7 +96,7 @@ ieee80211softmac_disassoc(struct ieee80211softmac_device *mac)
        mac->associated = 0;
        mac->associnfo.bssvalid = 0;
        mac->associnfo.associating = 0;
-       ieee80211softmac_init_txrates(mac);
+       ieee80211softmac_init_bss(mac);
        ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL);
        spin_unlock_irqrestore(&mac->lock, flags);
 }
@@ -334,11 +334,19 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac,
        struct ieee80211_assoc_response * resp,
        struct ieee80211softmac_network *net)
 {
+       u16 cap = le16_to_cpu(resp->capability);
+       u8 erp_value = net->erp_value;
+
        mac->associnfo.associating = 0;
-       mac->associnfo.supported_rates = net->supported_rates;
+       mac->bssinfo.supported_rates = net->supported_rates;
        ieee80211softmac_recalc_txrates(mac);
 
        mac->associated = 1;
+
+       mac->associnfo.short_preamble_available =
+               (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0;
+       ieee80211softmac_process_erp(mac, erp_value);
+
        if (mac->set_bssid_filter)
                mac->set_bssid_filter(mac->dev, net->bssid);
        memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN);
@@ -351,9 +359,9 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac,
 int
 ieee80211softmac_handle_assoc_response(struct net_device * dev,
                                       struct ieee80211_assoc_response * resp,
-                                      struct ieee80211_network * _ieee80211_network_do_not_use)
+                                      struct ieee80211_network * _ieee80211_network)
 {
-       /* NOTE: the network parameter has to be ignored by
+       /* NOTE: the network parameter has to be mostly ignored by
         *       this code because it is the ieee80211's pointer
         *       to the struct, not ours (we made a copy)
         */
@@ -385,6 +393,11 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
        /* now that we know it was for us, we can cancel the timeout */
        cancel_delayed_work(&mac->associnfo.timeout);
 
+       /* if the association response included an ERP IE, update our saved
+        * copy */
+       if (_ieee80211_network->flags & NETWORK_HAS_ERP_VALUE)
+               network->erp_value = _ieee80211_network->erp_value;
+
        switch (status) {
                case 0:
                        dprintk(KERN_INFO PFX "associated!\n");
index 6ae5a1d..82bfddb 100644 (file)
@@ -467,3 +467,17 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
        kfree(pkt);
        return 0;
 }
+
+/* Beacon handling */
+int ieee80211softmac_handle_beacon(struct net_device *dev,
+       struct ieee80211_beacon *beacon,
+       struct ieee80211_network *network)
+{
+       struct ieee80211softmac_device *mac = ieee80211_priv(dev);
+
+       if (mac->associated && memcmp(network->bssid, mac->associnfo.bssid, ETH_ALEN) == 0)
+               ieee80211softmac_process_erp(mac, network->erp_value);
+
+       return 0;
+}
+
index 4b2e57d..addea1c 100644 (file)
@@ -44,6 +44,7 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
        softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response;
        softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req;
        softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc;
+       softmac->ieee->handle_beacon = ieee80211softmac_handle_beacon;
        softmac->scaninfo = NULL;
 
        softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
@@ -178,21 +179,14 @@ int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo
        return 0;
 }
 
-/* Finds the highest rate which is:
- *  1. Present in ri (optionally a basic rate)
- *  2. Supported by the device
- *  3. Less than or equal to the user-defined rate
- */
-static u8 highest_supported_rate(struct ieee80211softmac_device *mac,
+u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac,
        struct ieee80211softmac_ratesinfo *ri, int basic_only)
 {
        u8 user_rate = mac->txrates.user_rate;
        int i;
 
-       if (ri->count == 0) {
-               dprintk(KERN_ERR PFX "empty ratesinfo?\n");
+       if (ri->count == 0)
                return IEEE80211_CCK_RATE_1MB;
-       }
 
        for (i = ri->count - 1; i >= 0; i--) {
                u8 rate = ri->rates[i];
@@ -208,36 +202,61 @@ static u8 highest_supported_rate(struct ieee80211softmac_device *mac,
        /* If we haven't found a suitable rate by now, just trust the user */
        return user_rate;
 }
+EXPORT_SYMBOL_GPL(ieee80211softmac_highest_supported_rate);
+
+void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac,
+       u8 erp_value)
+{
+       int use_protection;
+       int short_preamble;
+       u32 changes = 0;
+
+       /* Barker preamble mode */
+       short_preamble = ((erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0
+                         && mac->associnfo.short_preamble_available) ? 1 : 0;
+
+       /* Protection needed? */
+       use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
+
+       if (mac->bssinfo.short_preamble != short_preamble) {
+               changes |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE;
+               mac->bssinfo.short_preamble = short_preamble;
+       }
+
+       if (mac->bssinfo.use_protection != use_protection) {
+               changes |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION;
+               mac->bssinfo.use_protection = use_protection;
+       }
+
+       if (mac->bssinfo_change && changes)
+               mac->bssinfo_change(mac->dev, changes);
+}
 
 void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac)
 {
        struct ieee80211softmac_txrates *txrates = &mac->txrates;
-       struct ieee80211softmac_txrates oldrates;
        u32 change = 0;
 
-       if (mac->txrates_change)
-               oldrates = mac->txrates;
-
        change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
-       txrates->default_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 0);
+       txrates->default_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 0);
 
        change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
        txrates->default_fallback = lower_rate(mac, txrates->default_rate);
 
        change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
-       txrates->mcast_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 1);
+       txrates->mcast_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 1);
 
        if (mac->txrates_change)
-               mac->txrates_change(mac->dev, change, &oldrates);
+               mac->txrates_change(mac->dev, change);
 
 }
 
-void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac)
+void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac)
 {
        struct ieee80211_device *ieee = mac->ieee;
        u32 change = 0;
        struct ieee80211softmac_txrates *txrates = &mac->txrates;
-       struct ieee80211softmac_txrates oldrates;
+       struct ieee80211softmac_bss_info *bssinfo = &mac->bssinfo;
 
        /* TODO: We need some kind of state machine to lower the default rates
         *       if we loose too many packets.
@@ -245,8 +264,6 @@ void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac)
        /* Change the default txrate to the highest possible value.
         * The txrate machine will lower it, if it is too high.
         */
-       if (mac->txrates_change)
-               oldrates = mac->txrates;
        /* FIXME: We don't correctly handle backing down to lower
           rates, so 801.11g devices start off at 11M for now. People
           can manually change it if they really need to, but 11M is
@@ -272,7 +289,23 @@ void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac)
        change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST;
 
        if (mac->txrates_change)
-               mac->txrates_change(mac->dev, change, &oldrates);
+               mac->txrates_change(mac->dev, change);
+
+       change = 0;
+
+       bssinfo->supported_rates.count = 0;
+       memset(bssinfo->supported_rates.rates, 0,
+               sizeof(bssinfo->supported_rates.rates));
+       change |= IEEE80211SOFTMAC_BSSINFOCHG_RATES;
+
+       bssinfo->short_preamble = 0;
+       change |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE;
+
+       bssinfo->use_protection = 0;
+       change |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION;
+
+       if (mac->bssinfo_change)
+               mac->bssinfo_change(mac->dev, change);
 
        mac->running = 1;
 }
@@ -282,7 +315,7 @@ void ieee80211softmac_start(struct net_device *dev)
        struct ieee80211softmac_device *mac = ieee80211_priv(dev);
 
        ieee80211softmac_start_check_rates(mac);
-       ieee80211softmac_init_txrates(mac);
+       ieee80211softmac_init_bss(mac);
 }
 EXPORT_SYMBOL_GPL(ieee80211softmac_start);
 
@@ -335,7 +368,6 @@ u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rat
 static void ieee80211softmac_add_txrates_badness(struct ieee80211softmac_device *mac,
                                                 int amount)
 {
-       struct ieee80211softmac_txrates oldrates;
        u8 default_rate = mac->txrates.default_rate;
        u8 default_fallback = mac->txrates.default_fallback;
        u32 changes = 0;
@@ -348,8 +380,6 @@ printk("badness %d\n", mac->txrate_badness);
        mac->txrate_badness += amount;
        if (mac->txrate_badness <= -1000) {
                /* Very small badness. Try a faster bitrate. */
-               if (mac->txrates_change)
-                       memcpy(&oldrates, &mac->txrates, sizeof(oldrates));
                default_rate = raise_rate(mac, default_rate);
                changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
                default_fallback = get_fallback_rate(mac, default_rate);
@@ -358,8 +388,6 @@ printk("badness %d\n", mac->txrate_badness);
 printk("Bitrate raised to %u\n", default_rate);
        } else if (mac->txrate_badness >= 10000) {
                /* Very high badness. Try a slower bitrate. */
-               if (mac->txrates_change)
-                       memcpy(&oldrates, &mac->txrates, sizeof(oldrates));
                default_rate = lower_rate(mac, default_rate);
                changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
                default_fallback = get_fallback_rate(mac, default_rate);
@@ -372,7 +400,7 @@ printk("Bitrate lowered to %u\n", default_rate);
        mac->txrates.default_fallback = default_fallback;
 
        if (changes && mac->txrates_change)
-               mac->txrates_change(mac->dev, changes, &oldrates);
+               mac->txrates_change(mac->dev, changes);
 }
 
 void ieee80211softmac_fragment_lost(struct net_device *dev,
@@ -416,7 +444,11 @@ ieee80211softmac_create_network(struct ieee80211softmac_device *mac,
        memcpy(&softnet->supported_rates.rates[softnet->supported_rates.count], net->rates_ex, net->rates_ex_len);
        softnet->supported_rates.count += net->rates_ex_len;
        sort(softnet->supported_rates.rates, softnet->supported_rates.count, sizeof(softnet->supported_rates.rates[0]), rate_cmp, NULL);
-       
+
+       /* we save the ERP value because it is needed at association time, and
+        * many AP's do not include an ERP IE in the association response. */
+       softnet->erp_value = net->erp_value;
+
        softnet->capabilities = net->capability;
        return softnet;
 }
index fa1f8e3..0642e09 100644 (file)
@@ -116,9 +116,11 @@ ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac,
        struct ieee80211softmac_essid *essid);
 
 /* Rates related */
+void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac,
+       u8 erp_value);
 int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate);
 u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta);
-void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac);
+void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac);
 void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac);
 static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) {
        return ieee80211softmac_lower_rate_delta(mac, rate, 1);
@@ -133,6 +135,9 @@ static inline u8 get_fallback_rate(struct ieee80211softmac_device *mac, u8 rate)
 /*** prototypes from _io.c */
 int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
        void* ptrarg, u32 type, u32 arg);
+int ieee80211softmac_handle_beacon(struct net_device *dev,
+       struct ieee80211_beacon *beacon,
+       struct ieee80211_network *network);
 
 /*** prototypes from _auth.c */
 /* do these have to go into the public header? */
@@ -189,6 +194,7 @@ struct ieee80211softmac_network {
            authenticated:1,
            auth_desynced_once:1;
 
+       u8 erp_value;                           /* Saved ERP value */
        u16 capabilities;                       /* Capabilities bitfield */
        u8 challenge_len;                       /* Auth Challenge length */
        char *challenge;                        /* Challenge Text */