Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
authorDavid S. Miller <davem@davemloft.net>
Mon, 29 Nov 2010 19:19:09 +0000 (11:19 -0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 29 Nov 2010 19:19:09 +0000 (11:19 -0800)
85 files changed:
drivers/infiniband/core/addr.c
drivers/infiniband/hw/mlx4/main.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/bnx2.c
drivers/net/bnx2.h
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_main.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb4/cxgb4_main.c
drivers/net/e1000/e1000_main.c
drivers/net/e1000e/netdev.c
drivers/net/ehea/ehea_main.c
drivers/net/ethoc.c
drivers/net/forcedeth.c
drivers/net/igb/igb_main.c
drivers/net/igbvf/netdev.c
drivers/net/ixgb/ixgb_main.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbevf/ixgbevf_main.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/phy/phy.c
drivers/net/pptp.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_ctx.c
drivers/net/qlcnic/qlcnic_hdr.h
drivers/net/qlcnic/qlcnic_init.c
drivers/net/qlcnic/qlcnic_main.c
drivers/net/sfc/filter.c
drivers/net/stmmac/stmmac.h
drivers/net/stmmac/stmmac_ethtool.c
drivers/net/stmmac/stmmac_main.c
drivers/net/stmmac/stmmac_mdio.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-main.c
drivers/net/wireless/zd1211rw/zd_chip.c
drivers/s390/net/lcs.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
include/linux/ipv6.h
include/linux/netdevice.h
include/linux/skbuff.h
include/linux/stmmac.h
include/net/if_inet6.h
include/net/rtnetlink.h
include/net/scm.h
include/net/sctp/command.h
include/net/sctp/constants.h
include/net/sctp/structs.h
include/net/x25.h
include/net/xfrm.h
net/8021q/vlan.c
net/Kconfig
net/core/dev.c
net/core/net-sysfs.c
net/core/net-sysfs.h
net/core/netpoll.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/scm.c
net/ipv4/devinet.c
net/ipv4/ipconfig.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/sched/sch_generic.c
net/sched/sch_teql.c
net/x25/af_x25.c
net/x25/x25_link.c

index c15fd2e..8aba0ba 100644 (file)
@@ -130,8 +130,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
-               read_lock(&dev_base_lock);
-               for_each_netdev(&init_net, dev) {
+               rcu_read_lock();
+               for_each_netdev_rcu(&init_net, dev) {
                        if (ipv6_chk_addr(&init_net,
                                          &((struct sockaddr_in6 *) addr)->sin6_addr,
                                          dev, 1)) {
@@ -139,7 +139,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
                                break;
                        }
                }
-               read_unlock(&dev_base_lock);
+               rcu_read_unlock();
                break;
 #endif
        }
index bf3e20c..4e55a28 100644 (file)
@@ -848,8 +848,8 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
                goto out;
        }
 
-       read_lock(&dev_base_lock);
-       for_each_netdev(&init_net, tmp) {
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, tmp) {
                if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
                        gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
                        vid = rdma_vlan_dev_vlan_id(tmp);
@@ -884,7 +884,7 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
                        }
                }
        }
-       read_unlock(&dev_base_lock);
+       rcu_read_unlock();
 
        for (i = 0; i < 128; ++i)
                if (!hits[i]) {
index 4594a28..b61a1df 100644 (file)
 #define BE_NAME                        "ServerEngines BladeEngine2 10Gbps NIC"
 #define BE3_NAME               "ServerEngines BladeEngine3 10Gbps NIC"
 #define OC_NAME                        "Emulex OneConnect 10Gbps NIC"
-#define OC_NAME1               "Emulex OneConnect 10Gbps NIC (be3)"
+#define OC_NAME_BE             OC_NAME "(be3)"
+#define OC_NAME_LANCER         OC_NAME "(Lancer)"
 #define DRV_DESC               "ServerEngines BladeEngine 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID           0x19a2
+#define EMULEX_VENDOR_ID       0x10df
 #define BE_DEVICE_ID1          0x211
 #define BE_DEVICE_ID2          0x221
-#define OC_DEVICE_ID1          0x700
-#define OC_DEVICE_ID2          0x710
+#define OC_DEVICE_ID1          0x700   /* Device Id for BE2 cards */
+#define OC_DEVICE_ID2          0x710   /* Device Id for BE3 cards */
+#define OC_DEVICE_ID3          0xe220  /* Device id for Lancer cards */
 
 static inline char *nic_name(struct pci_dev *pdev)
 {
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
        case OC_DEVICE_ID1:
                return OC_NAME;
        case OC_DEVICE_ID2:
-               return OC_NAME1;
+               return OC_NAME_BE;
+       case OC_DEVICE_ID3:
+               return OC_NAME_LANCER;
        case BE_DEVICE_ID2:
                return BE3_NAME;
        default:
@@ -149,6 +154,7 @@ struct be_eq_obj {
        u16 min_eqd;            /* in usecs */
        u16 max_eqd;            /* in usecs */
        u16 cur_eqd;            /* in usecs */
+       u8  msix_vec_idx;
 
        struct napi_struct napi;
 };
@@ -260,6 +266,8 @@ struct be_adapter {
        u32 num_rx_qs;
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
+       u8 msix_vec_next_idx;
+
        struct vlan_group *vlan_grp;
        u16 vlans_added;
        u16 max_vlans;  /* Number of vlans supported */
@@ -299,8 +307,8 @@ struct be_adapter {
 
        bool sriov_enabled;
        struct be_vf_cfg vf_cfg[BE_MAX_VF];
-       u8 base_eq_id;
        u8 is_virtfn;
+       u32 sli_family;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +317,8 @@ struct be_adapter {
 #define BE_GEN2 2
 #define BE_GEN3 3
 
+#define lancer_chip(adapter)           (adapter->pdev->device == OC_DEVICE_ID3)
+
 extern const struct ethtool_ops be_ethtool_ops;
 
 #define tx_stats(adapter)              (&adapter->tx_stats)
@@ -416,10 +426,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
 static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
 {
        u8 data;
-
-       pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
-       pci_read_config_byte(adapter->pdev, 0xFE, &data);
-       adapter->is_virtfn = (data != 0xAA);
+       u32 sli_intf;
+
+       if (lancer_chip(adapter)) {
+               pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
+                                                               &sli_intf);
+               adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+       } else {
+               pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
+               pci_read_config_byte(adapter->pdev, 0xFE, &data);
+               adapter->is_virtfn = (data != 0xAA);
+       }
 }
 
 static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
index 36eca1c..3865b2b 100644 (file)
@@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
 
 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
 {
-       u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+       u32 sem;
+
+       if (lancer_chip(adapter))
+               sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
+       else
+               sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
 
        *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
        if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -465,14 +470,25 @@ int be_cmd_fw_init(struct be_adapter *adapter)
        spin_lock(&adapter->mbox_lock);
 
        wrb = (u8 *)wrb_from_mbox(adapter);
-       *wrb++ = 0xFF;
-       *wrb++ = 0x12;
-       *wrb++ = 0x34;
-       *wrb++ = 0xFF;
-       *wrb++ = 0xFF;
-       *wrb++ = 0x56;
-       *wrb++ = 0x78;
-       *wrb = 0xFF;
+       if (lancer_chip(adapter)) {
+               *wrb++ = 0xFF;
+               *wrb++ = 0x34;
+               *wrb++ = 0x12;
+               *wrb++ = 0xFF;
+               *wrb++ = 0xFF;
+               *wrb++ = 0x78;
+               *wrb++ = 0x56;
+               *wrb = 0xFF;
+       } else {
+               *wrb++ = 0xFF;
+               *wrb++ = 0x12;
+               *wrb++ = 0x34;
+               *wrb++ = 0xFF;
+               *wrb++ = 0xFF;
+               *wrb++ = 0x56;
+               *wrb++ = 0x78;
+               *wrb = 0xFF;
+       }
 
        status = be_mbox_notify_wait(adapter);
 
@@ -680,16 +696,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
                OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+       if (lancer_chip(adapter)) {
+               req->hdr.version = 1;
+               req->page_size = 1; /* 1 for 4K */
+               AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
+                                                               coalesce_wm);
+               AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
+                                                               no_delay);
+               AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
+                                               __ilog2_u32(cq->len/256));
+               AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
+                                                               ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
+                                                               ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
+       } else {
+               AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
+                                                               coalesce_wm);
+               AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
+                                                               ctxt, no_delay);
+               AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
+                                               __ilog2_u32(cq->len/256));
+               AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_be, solevent,
+                                                               ctxt, sol_evts);
+               AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
+       }
 
-       AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
-       AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
-       AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
-                       __ilog2_u32(cq->len/256));
-       AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
-       AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
-       AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
-       AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
-       AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -737,13 +773,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
                        OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+       if (lancer_chip(adapter)) {
+               req->hdr.version = 1;
+               req->cq_id = cpu_to_le16(cq->id);
+
+               AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
+                                               be_encoded_q_len(mccq->len));
+               AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
+                                                               ctxt, cq->id);
+               AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
+                                                                ctxt, 1);
+
+       } else {
+               AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+                                               be_encoded_q_len(mccq->len));
+               AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+       }
 
-       AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
-       AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
-               be_encoded_q_len(mccq->len));
-       AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
        /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
-       req->async_event_bitmap[0] |= 0x00000022;
+       req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
index 8469ff0..83d15c8 100644 (file)
@@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
 /******************** Create CQ ***************************/
 /* Pseudo amap definition in which each bit of the actual structure is defined
  * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_cq_context {
+struct amap_cq_context_be {
        u8 cidx[11];            /* dword 0*/
        u8 rsvd0;               /* dword 0*/
        u8 coalescwm[2];        /* dword 0*/
@@ -332,14 +332,32 @@ struct amap_cq_context {
        u8 rsvd5[32];           /* dword 3*/
 } __packed;
 
+struct amap_cq_context_lancer {
+       u8 rsvd0[12];           /* dword 0*/
+       u8 coalescwm[2];        /* dword 0*/
+       u8 nodelay;             /* dword 0*/
+       u8 rsvd1[12];           /* dword 0*/
+       u8 count[2];            /* dword 0*/
+       u8 valid;               /* dword 0*/
+       u8 rsvd2;               /* dword 0*/
+       u8 eventable;           /* dword 0*/
+       u8 eqid[16];            /* dword 1*/
+       u8 rsvd3[15];           /* dword 1*/
+       u8 armed;               /* dword 1*/
+       u8 rsvd4[32];           /* dword 2*/
+       u8 rsvd5[32];           /* dword 3*/
+} __packed;
+
 struct be_cmd_req_cq_create {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
-       u16 rsvd0;
-       u8 context[sizeof(struct amap_cq_context) / 8];
+       u8 page_size;
+       u8 rsvd0;
+       u8 context[sizeof(struct amap_cq_context_be) / 8];
        struct phys_addr pages[8];
 } __packed;
 
+
 struct be_cmd_resp_cq_create {
        struct be_cmd_resp_hdr hdr;
        u16 cq_id;
@@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
 /******************** Create MCCQ ***************************/
 /* Pseudo amap definition in which each bit of the actual structure is defined
  * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_mcc_context {
+struct amap_mcc_context_be {
        u8 con_index[14];
        u8 rsvd0[2];
        u8 ring_size[4];
@@ -364,12 +382,23 @@ struct amap_mcc_context {
        u8 rsvd2[32];
 } __packed;
 
+struct amap_mcc_context_lancer {
+       u8 async_cq_id[16];
+       u8 ring_size[4];
+       u8 rsvd0[12];
+       u8 rsvd1[31];
+       u8 valid;
+       u8 async_cq_valid[1];
+       u8 rsvd2[31];
+       u8 rsvd3[32];
+} __packed;
+
 struct be_cmd_req_mcc_create {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
-       u16 rsvd0;
+       u16 cq_id;
        u32 async_event_bitmap[1];
-       u8 context[sizeof(struct amap_mcc_context) / 8];
+       u8 context[sizeof(struct amap_mcc_context_be) / 8];
        struct phys_addr pages[8];
 } __packed;
 
@@ -605,6 +634,7 @@ struct be_hw_stats {
        struct be_rxf_stats rxf;
        u32 rsvd[48];
        struct be_erx_stats erx;
+       u32 rsvd1[6];
 };
 
 struct be_cmd_req_get_stats {
index a2ec5df..4096d97 100644 (file)
 #define MPU_EP_CONTROL                 0
 
 /********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET        0xac
-#define EP_SEMAPHORE_POST_STAGE_MASK   0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK     0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT    31
+#define MPU_EP_SEMAPHORE_OFFSET                0xac
+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET       0x400
+#define EP_SEMAPHORE_POST_STAGE_MASK           0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK             0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT            31
+
 /* MPU semphore POST stage values */
 #define POST_STAGE_AWAITING_HOST_RDY   0x1 /* FW awaiting goahead from host */
 #define POST_STAGE_HOST_RDY            0x2 /* Host has given go-ahed to FW */
 #define PCICFG_UE_STATUS_LOW_MASK              0xA8
 #define PCICFG_UE_STATUS_HI_MASK               0xAC
 
+/******** SLI_INTF ***********************/
+#define SLI_INTF_REG_OFFSET                    0x58
+#define SLI_INTF_VALID_MASK                    0xE0000000
+#define SLI_INTF_VALID                         0xC0000000
+#define SLI_INTF_HINT2_MASK                    0x1F000000
+#define SLI_INTF_HINT2_SHIFT                   24
+#define SLI_INTF_HINT1_MASK                    0x00FF0000
+#define SLI_INTF_HINT1_SHIFT                   16
+#define SLI_INTF_FAMILY_MASK                   0x00000F00
+#define SLI_INTF_FAMILY_SHIFT                  8
+#define SLI_INTF_IF_TYPE_MASK                  0x0000F000
+#define SLI_INTF_IF_TYPE_SHIFT                 12
+#define SLI_INTF_REV_MASK                      0x000000F0
+#define SLI_INTF_REV_SHIFT                     4
+#define SLI_INTF_FT_MASK                       0x00000001
+
+
+/* SLI family */
+#define BE_SLI_FAMILY          0x0
+#define LANCER_A0_SLI_FAMILY   0xA
+
+
 /********* ISR0 Register offset **********/
 #define CEV_ISR0_OFFSET                        0xC18
 #define CEV_ISR_SIZE                           4
@@ -73,6 +97,9 @@
 /********* Event Q door bell *************/
 #define DB_EQ_OFFSET                   DB_CQ_OFFSET
 #define DB_EQ_RING_ID_MASK             0x1FF   /* bits 0 - 8 */
+#define DB_EQ_RING_ID_EXT_MASK         0x3e00  /* bits 9-13 */
+#define DB_EQ_RING_ID_EXT_MASK_SHIFT   (2) /* qid bits 9-13 placing at 11-15 */
+
 /* Clear the interrupt for this eq */
 #define DB_EQ_CLR_SHIFT                        (9)     /* bit 9 */
 /* Must be 1 */
 /********* Compl Q door bell *************/
 #define DB_CQ_OFFSET                   0x120
 #define DB_CQ_RING_ID_MASK             0x3FF   /* bits 0 - 9 */
+#define DB_CQ_RING_ID_EXT_MASK         0x7C00  /* bits 10-14 */
+#define DB_CQ_RING_ID_EXT_MASK_SHIFT   (1)     /* qid bits 10-14
+                                                placing at 11-15 */
+
 /* Number of event entries processed */
 #define DB_CQ_NUM_POPPED_SHIFT         (16)    /* bits 16 - 28 */
 /* Rearm bit */
index 93354ee..102567e 100644 (file)
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
        { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+       { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
        { 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
 {
        u32 val = 0;
        val |= qid & DB_EQ_RING_ID_MASK;
+       val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
+                       DB_EQ_RING_ID_EXT_MASK_SHIFT);
 
        if (adapter->eeh_err)
                return;
@@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
 {
        u32 val = 0;
        val |= qid & DB_CQ_RING_ID_MASK;
+       val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
+                       DB_CQ_RING_ID_EXT_MASK_SHIFT);
 
        if (adapter->eeh_err)
                return;
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
 }
 
 /* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
+                                                               bool *dummy)
 {
        int cnt = (skb->len > skb->data_len);
 
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
 
        /* to account for hdr wrb */
        cnt++;
-       if (cnt & 1) {
+       if (lancer_chip(adapter) || !(cnt & 1)) {
+               *dummy = false;
+       } else {
                /* add a dummy to make it an even num */
                cnt++;
                *dummy = true;
-       } else
-               *dummy = false;
+       }
        BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
        return cnt;
 }
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
                        hdr, skb_shinfo(skb)->gso_size);
-               if (skb_is_gso_v6(skb))
+               if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
+               if (lancer_chip(adapter) && adapter->sli_family  ==
+                                                       LANCER_A0_SLI_FAMILY) {
+                       AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+                       if (is_tcp_pkt(skb))
+                               AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+                                                               tcpcs, hdr, 1);
+                       else if (is_udp_pkt(skb))
+                               AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+                                                               udpcs, hdr, 1);
+               }
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                if (is_tcp_pkt(skb))
                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
        u32 start = txq->head;
        bool dummy_wrb, stopped = false;
 
-       wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
+       wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
        copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
        if (copied) {
@@ -1035,7 +1052,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
                        return;
                }
                vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-               vid = swab16(vid);
+               if (!lancer_chip(adapter))
+                       vid = swab16(vid);
                vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
        } else {
                netif_receive_skb(skb);
@@ -1113,7 +1131,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
                napi_gro_frags(&eq_obj->napi);
        } else {
                vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-               vid = swab16(vid);
+               if (!lancer_chip(adapter))
+                       vid = swab16(vid);
 
                if (!adapter->vlan_grp || adapter->vlans_added == 0)
                        return;
@@ -1381,7 +1400,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                sent_skb = sent_skbs[txq->tail];
                end_idx = txq->tail;
                index_adv(&end_idx,
-                       wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+                       wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
+                       txq->len);
                be_tx_compl_process(adapter, end_idx);
        }
 }
@@ -1476,7 +1496,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
        /* Ask BE to create Tx Event queue */
        if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
                goto tx_eq_free;
-       adapter->base_eq_id = adapter->tx_eq.q.id;
+
+       adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
 
        /* Alloc TX eth compl queue */
        cq = &adapter->tx_obj.cq;
@@ -1568,6 +1590,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
                if (rc)
                        goto err;
 
+               rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
                /* CQ */
                cq = &rxo->cq;
                rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1602,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
                rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
                if (rc)
                        goto err;
-
                /* Rx Q */
                q = &rxo->q;
                rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1634,45 @@ err:
        return -1;
 }
 
-/* There are 8 evt ids per func. Retruns the evt id's bit number */
-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
+static bool event_peek(struct be_eq_obj *eq_obj)
 {
-       return eq_id - adapter->base_eq_id;
+       struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+       if (!eqe->evt)
+               return false;
+       else
+               return true;
 }
 
 static irqreturn_t be_intx(int irq, void *dev)
 {
        struct be_adapter *adapter = dev;
        struct be_rx_obj *rxo;
-       int isr, i;
+       int isr, i, tx = 0 , rx = 0;
 
-       isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
-               (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
-       if (!isr)
-               return IRQ_NONE;
+       if (lancer_chip(adapter)) {
+               if (event_peek(&adapter->tx_eq))
+                       tx = event_handle(adapter, &adapter->tx_eq);
+               for_all_rx_queues(adapter, rxo, i) {
+                       if (event_peek(&rxo->rx_eq))
+                               rx |= event_handle(adapter, &rxo->rx_eq);
+               }
 
-       if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
-               event_handle(adapter, &adapter->tx_eq);
+               if (!(tx || rx))
+                       return IRQ_NONE;
 
-       for_all_rx_queues(adapter, rxo, i) {
-               if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
-                       event_handle(adapter, &rxo->rx_eq);
+       } else {
+               isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
+                       (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
+               if (!isr)
+                       return IRQ_NONE;
+
+               if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+                       event_handle(adapter, &adapter->tx_eq);
+
+               for_all_rx_queues(adapter, rxo, i) {
+                       if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+                               event_handle(adapter, &rxo->rx_eq);
+               }
        }
 
        return IRQ_HANDLED;
@@ -1830,8 +1869,7 @@ static void be_worker(struct work_struct *work)
                        be_post_rx_frags(rxo);
                }
        }
-
-       if (!adapter->ue_detected)
+       if (!adapter->ue_detected && !lancer_chip(adapter))
                be_detect_dump_ue(adapter);
 
 reschedule:
@@ -1910,10 +1948,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
 #endif
 }
 
-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
+static inline int be_msix_vec_get(struct be_adapter *adapter,
+                                       struct be_eq_obj *eq_obj)
 {
-       return adapter->msix_entries[
-                       be_evt_bit_get(adapter, eq_id)].vector;
+       return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
 }
 
 static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1962,14 @@ static int be_request_irq(struct be_adapter *adapter,
        int vec;
 
        sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
-       vec = be_msix_vec_get(adapter, eq_obj->q.id);
+       vec = be_msix_vec_get(adapter, eq_obj);
        return request_irq(vec, handler, 0, eq_obj->desc, context);
 }
 
 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
                        void *context)
 {
-       int vec = be_msix_vec_get(adapter, eq_obj->q.id);
+       int vec = be_msix_vec_get(adapter, eq_obj);
        free_irq(vec, context);
 }
 
@@ -2036,14 +2074,15 @@ static int be_close(struct net_device *netdev)
        netif_carrier_off(netdev);
        adapter->link_up = false;
 
-       be_intr_set(adapter, false);
+       if (!lancer_chip(adapter))
+               be_intr_set(adapter, false);
 
        if (adapter->msix_enabled) {
-               vec = be_msix_vec_get(adapter, tx_eq->q.id);
+               vec = be_msix_vec_get(adapter, tx_eq);
                synchronize_irq(vec);
 
                for_all_rx_queues(adapter, rxo, i) {
-                       vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
+                       vec = be_msix_vec_get(adapter, &rxo->rx_eq);
                        synchronize_irq(vec);
                }
        } else {
@@ -2082,7 +2121,8 @@ static int be_open(struct net_device *netdev)
 
        be_irq_register(adapter);
 
-       be_intr_set(adapter, true);
+       if (!lancer_chip(adapter))
+               be_intr_set(adapter, true);
 
        /* The evt queues are created in unarmed state; arm them */
        for_all_rx_queues(adapter, rxo, i) {
@@ -2548,6 +2588,9 @@ static void be_netdev_init(struct net_device *netdev)
 
        netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
 
+       if (lancer_chip(adapter))
+               netdev->vlan_features |= NETIF_F_TSO6;
+
        netdev->flags |= IFF_MULTICAST;
 
        adapter->rx_csum = true;
@@ -2587,6 +2630,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
        u8 __iomem *addr;
        int pcicfg_reg, db_reg;
 
+       if (lancer_chip(adapter)) {
+               addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
+                       pci_resource_len(adapter->pdev, 0));
+               if (addr == NULL)
+                       return -ENOMEM;
+               adapter->db = addr;
+               return 0;
+       }
+
        if (be_physfn(adapter)) {
                addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
                                pci_resource_len(adapter->pdev, 2));
@@ -2783,6 +2835,44 @@ static int be_get_config(struct be_adapter *adapter)
        return 0;
 }
 
+static int be_dev_family_check(struct be_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       u32 sli_intf = 0, if_type;
+
+       switch (pdev->device) {
+       case BE_DEVICE_ID1:
+       case OC_DEVICE_ID1:
+               adapter->generation = BE_GEN2;
+               break;
+       case BE_DEVICE_ID2:
+       case OC_DEVICE_ID2:
+               adapter->generation = BE_GEN3;
+               break;
+       case OC_DEVICE_ID3:
+               pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+               if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+                                               SLI_INTF_IF_TYPE_SHIFT;
+
+               if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
+                       if_type != 0x02) {
+                       dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
+                       return -EINVAL;
+               }
+               if (num_vfs > 0) {
+                       dev_err(&pdev->dev, "VFs not supported\n");
+                       return -EINVAL;
+               }
+               adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
+                                        SLI_INTF_FAMILY_SHIFT);
+               adapter->generation = BE_GEN3;
+               break;
+       default:
+               adapter->generation = 0;
+       }
+       return 0;
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
                        const struct pci_device_id *pdev_id)
 {
@@ -2805,22 +2895,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
                goto rel_reg;
        }
        adapter = netdev_priv(netdev);
-
-       switch (pdev->device) {
-       case BE_DEVICE_ID1:
-       case OC_DEVICE_ID1:
-               adapter->generation = BE_GEN2;
-               break;
-       case BE_DEVICE_ID2:
-       case OC_DEVICE_ID2:
-               adapter->generation = BE_GEN3;
-               break;
-       default:
-               adapter->generation = 0;
-       }
-
        adapter->pdev = pdev;
        pci_set_drvdata(pdev, adapter);
+
+       status = be_dev_family_check(adapter);
+       if (!status)
+               goto free_netdev;
+
        adapter->netdev = netdev;
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
@@ -2895,7 +2976,7 @@ ctrl_clean:
        be_ctrl_cleanup(adapter);
 free_netdev:
        be_sriov_disable(adapter);
-       free_netdev(adapter->netdev);
+       free_netdev(netdev);
        pci_set_drvdata(pdev, NULL);
 rel_reg:
        pci_release_regions(pdev);
index 062600b..03209a3 100644 (file)
@@ -56,8 +56,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.0.18"
-#define DRV_MODULE_RELDATE     "Oct 7, 2010"
+#define DRV_MODULE_VERSION     "2.0.20"
+#define DRV_MODULE_RELDATE     "Nov 24, 2010"
 #define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.0.15.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-6.0.15.fw"
 #define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.0.17.fw"
@@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
                int j;
 
                rxr->rx_buf_ring =
-                       vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+                       vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
                if (rxr->rx_buf_ring == NULL)
                        return -ENOMEM;
 
-               memset(rxr->rx_buf_ring, 0,
-                      SW_RXBD_RING_SIZE * bp->rx_max_ring);
-
                for (j = 0; j < bp->rx_max_ring; j++) {
                        rxr->rx_desc_ring[j] =
                                dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
                }
 
                if (bp->rx_pg_ring_size) {
-                       rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
+                       rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
                                                  bp->rx_max_pg_ring);
                        if (rxr->rx_pg_ring == NULL)
                                return -ENOMEM;
 
-                       memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
-                              bp->rx_max_pg_ring);
                }
 
                for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4640,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
 
        /* Wait for the current PCI transaction to complete before
         * issuing a reset. */
-       REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
-              BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
-              BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
-              BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
-              BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
-       val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
-       udelay(5);
+       if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+           (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+               REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+                      BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+                      BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+                      BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+                      BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+               val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+               udelay(5);
+       } else {  /* 5709 */
+               val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+               val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+               REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+               val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+
+               for (i = 0; i < 100; i++) {
+                       msleep(1);
+                       val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+                       if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
+                               break;
+               }
+       }
 
        /* Wait for the firmware to tell us it is ok to issue a reset. */
        bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4683,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
                val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
                      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
 
-               pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
+               REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
 
        } else {
                val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -7914,15 +7924,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                goto err_out_release;
        }
 
+       bnx2_set_power_state(bp, PCI_D0);
+
        /* Configure byte swap and enable write to the reg_window registers.
         * Rely on CPU to do target byte swapping on big endian systems
         * The chip's target access swapping will not swap all accesses
         */
-       pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
-                              BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
-                              BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
-
-       bnx2_set_power_state(bp, PCI_D0);
+       REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+                  BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+                  BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
 
        bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
 
index bf4c342..5488a2e 100644 (file)
@@ -461,6 +461,8 @@ struct l2_fhdr {
 #define BNX2_PCICFG_MAILBOX_QUEUE_ADDR                 0x00000090
 #define BNX2_PCICFG_MAILBOX_QUEUE_DATA                 0x00000094
 
+#define BNX2_PCICFG_DEVICE_CONTROL                     0x000000b4
+#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND               ((1L<<5)<<16)
 
 /*
  *  pci_reg definition
index 863e73a..342ab58 100644 (file)
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.60.00-4"
-#define DRV_MODULE_RELDATE      "2010/11/01"
+#define DRV_MODULE_VERSION      "1.60.00-5"
+#define DRV_MODULE_RELDATE      "2010/11/24"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
index 94d5f59..e20b2d3 100644 (file)
@@ -1692,11 +1692,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
                }
        }
 
-       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
-       else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+       if (skb_is_gso_v6(skb))
+               rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+       else if (skb_is_gso(skb))
+               rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
 
        return rc;
 }
index d02ffbd..0301278 100644 (file)
@@ -1499,8 +1499,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
         * updates that have been performed while interrupts were
         * disabled.
         */
-       if (bp->common.int_block == INT_BLOCK_IGU)
+       if (bp->common.int_block == INT_BLOCK_IGU) {
+               /* Disable local BHes to prevent a dead-lock situation between
+                * sch_direct_xmit() and bnx2x_run_loopback() (calling
+                * bnx2x_tx_int()), as both are taking netif_tx_lock().
+                */
+               local_bh_disable();
                bnx2x_tx_int(fp_tx);
+               local_bh_enable();
+       }
 
        rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
        if (rx_idx != rx_start_idx + num_pkts)
index 92057d7..f53edfd 100644 (file)
@@ -9096,12 +9096,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        /* calc qm_cid_count */
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
 
-       rc = register_netdev(dev);
-       if (rc) {
-               dev_err(&pdev->dev, "Cannot register net device\n");
-               goto init_one_exit;
-       }
-
        /* Configure interupt mode: try to enable MSI-X/MSI if
         * needed, set bp->num_queues appropriately.
         */
@@ -9110,6 +9104,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        /* Add all NAPI objects */
        bnx2x_add_all_napi(bp);
 
+       rc = register_netdev(dev);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot register net device\n");
+               goto init_one_exit;
+       }
+
        bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 
        netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
index 046d846..3864617 100644 (file)
@@ -3006,12 +3006,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
                                             pci_channel_state_t state)
 {
        struct adapter *adapter = pci_get_drvdata(pdev);
-       int ret;
 
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
 
-       ret = t3_adapter_error(adapter, 0, 0);
+       t3_adapter_error(adapter, 0, 0);
 
        /* Request a slot reset. */
        return PCI_ERS_RESULT_NEED_RESET;
index bcf0753..ef02aa6 100644 (file)
@@ -1164,12 +1164,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
  */
 void *cxgb_alloc_mem(unsigned long size)
 {
-       void *p = kmalloc(size, GFP_KERNEL);
+       void *p = kzalloc(size, GFP_KERNEL);
 
        if (!p)
-               p = vmalloc(size);
-       if (p)
-               memset(p, 0, size);
+               p = vzalloc(size);
        return p;
 }
 
index f50bc98..848f89d 100644 (file)
@@ -868,12 +868,10 @@ out:      release_firmware(fw);
  */
 void *t4_alloc_mem(size_t size)
 {
-       void *p = kmalloc(size, GFP_KERNEL);
+       void *p = kzalloc(size, GFP_KERNEL);
 
        if (!p)
-               p = vmalloc(size);
-       if (p)
-               memset(p, 0, size);
+               p = vzalloc(size);
        return p;
 }
 
index 4686c39..dcb7f82 100644 (file)
@@ -1425,13 +1425,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
        int size;
 
        size = sizeof(struct e1000_buffer) * txdr->count;
-       txdr->buffer_info = vmalloc(size);
+       txdr->buffer_info = vzalloc(size);
        if (!txdr->buffer_info) {
                e_err(probe, "Unable to allocate memory for the Tx descriptor "
                      "ring\n");
                return -ENOMEM;
        }
-       memset(txdr->buffer_info, 0, size);
 
        /* round up to nearest 4K */
 
@@ -1621,13 +1620,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
        int size, desc_len;
 
        size = sizeof(struct e1000_buffer) * rxdr->count;
-       rxdr->buffer_info = vmalloc(size);
+       rxdr->buffer_info = vzalloc(size);
        if (!rxdr->buffer_info) {
                e_err(probe, "Unable to allocate memory for the Rx descriptor "
                      "ring\n");
                return -ENOMEM;
        }
-       memset(rxdr->buffer_info, 0, size);
 
        desc_len = sizeof(struct e1000_rx_desc);
 
index 9b3f0a9..0adcb79 100644 (file)
@@ -2059,10 +2059,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
        int err = -ENOMEM, size;
 
        size = sizeof(struct e1000_buffer) * tx_ring->count;
-       tx_ring->buffer_info = vmalloc(size);
+       tx_ring->buffer_info = vzalloc(size);
        if (!tx_ring->buffer_info)
                goto err;
-       memset(tx_ring->buffer_info, 0, size);
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2094,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
        int i, size, desc_len, err = -ENOMEM;
 
        size = sizeof(struct e1000_buffer) * rx_ring->count;
-       rx_ring->buffer_info = vmalloc(size);
+       rx_ring->buffer_info = vzalloc(size);
        if (!rx_ring->buffer_info)
                goto err;
-       memset(rx_ring->buffer_info, 0, size);
 
        for (i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
index 182b2a7..a84c389 100644 (file)
@@ -1496,12 +1496,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
 {
        int arr_size = sizeof(void *) * max_q_entries;
 
-       q_skba->arr = vmalloc(arr_size);
+       q_skba->arr = vzalloc(arr_size);
        if (!q_skba->arr)
                return -ENOMEM;
 
-       memset(q_skba->arr, 0, arr_size);
-
        q_skba->len = max_q_entries;
        q_skba->index = 0;
        q_skba->os_skbs = 0;
index c5a2fe0..b79d7e1 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 #include <net/ethoc.h>
 
 static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
  * @netdev:    pointer to network device structure
  * @napi:      NAPI structure
  * @msg_enable:        device state flags
- * @rx_lock:   receive lock
  * @lock:      device lock
  * @phy:       attached PHY
  * @mdio:      MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
        struct napi_struct napi;
        u32 msg_enable;
 
-       spinlock_t rx_lock;
        spinlock_t lock;
 
        struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
                unsigned int entry;
                struct ethoc_bd bd;
 
-               entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
+               entry = priv->num_tx + priv->cur_rx;
                ethoc_read_bd(priv, entry, &bd);
-               if (bd.stat & RX_BD_EMPTY)
-                       break;
+               if (bd.stat & RX_BD_EMPTY) {
+                       ethoc_ack_irq(priv, INT_MASK_RX);
+                       /* If packet (interrupt) came in between checking
+                        * BD_EMTPY and clearing the interrupt source, then we
+                        * risk missing the packet as the RX interrupt won't
+                        * trigger right away when we reenable it; hence, check
+                        * BD_EMTPY here again to make sure there isn't such a
+                        * packet waiting for us...
+                        */
+                       ethoc_read_bd(priv, entry, &bd);
+                       if (bd.stat & RX_BD_EMPTY)
+                               break;
+               }
 
                if (ethoc_update_rx_stats(priv, &bd) == 0) {
                        int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
                bd.stat &= ~RX_BD_STATS;
                bd.stat |=  RX_BD_EMPTY;
                ethoc_write_bd(priv, entry, &bd);
-               priv->cur_rx++;
+               if (++priv->cur_rx == priv->num_rx)
+                       priv->cur_rx = 0;
        }
 
        return count;
 }
 
-static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
+static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
 {
        struct net_device *netdev = dev->netdev;
 
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
        netdev->stats.collisions += (bd->stat >> 4) & 0xf;
        netdev->stats.tx_bytes += bd->stat >> 16;
        netdev->stats.tx_packets++;
-       return 0;
 }
 
-static void ethoc_tx(struct net_device *dev)
+static int ethoc_tx(struct net_device *dev, int limit)
 {
        struct ethoc *priv = netdev_priv(dev);
+       int count;
+       struct ethoc_bd bd;
 
-       spin_lock(&priv->lock);
+       for (count = 0; count < limit; ++count) {
+               unsigned int entry;
 
-       while (priv->dty_tx != priv->cur_tx) {
-               unsigned int entry = priv->dty_tx % priv->num_tx;
-               struct ethoc_bd bd;
+               entry = priv->dty_tx & (priv->num_tx-1);
 
                ethoc_read_bd(priv, entry, &bd);
-               if (bd.stat & TX_BD_READY)
-                       break;
 
-               entry = (++priv->dty_tx) % priv->num_tx;
-               (void)ethoc_update_tx_stats(priv, &bd);
+               if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
+                       ethoc_ack_irq(priv, INT_MASK_TX);
+                       /* If interrupt came in between reading in the BD
+                        * and clearing the interrupt source, then we risk
+                        * missing the event as the TX interrupt won't trigger
+                        * right away when we reenable it; hence, check
+                        * BD_EMPTY here again to make sure there isn't such an
+                        * event pending...
+                        */
+                       ethoc_read_bd(priv, entry, &bd);
+                       if (bd.stat & TX_BD_READY ||
+                           (priv->dty_tx == priv->cur_tx))
+                               break;
+               }
+
+               ethoc_update_tx_stats(priv, &bd);
+               priv->dty_tx++;
        }
 
        if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
                netif_wake_queue(dev);
 
-       ethoc_ack_irq(priv, INT_MASK_TX);
-       spin_unlock(&priv->lock);
+       return count;
 }
 
 static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
        struct net_device *dev = dev_id;
        struct ethoc *priv = netdev_priv(dev);
        u32 pending;
-
-       ethoc_disable_irq(priv, INT_MASK_ALL);
+       u32 mask;
+
+       /* Figure out what triggered the interrupt...
+        * The tricky bit here is that the interrupt source bits get
+        * set in INT_SOURCE for an event irregardless of whether that
+        * event is masked or not.  Thus, in order to figure out what
+        * triggered the interrupt, we need to remove the sources
+        * for all events that are currently masked.  This behaviour
+        * is not particularly well documented but reasonable...
+        */
+       mask = ethoc_read(priv, INT_MASK);
        pending = ethoc_read(priv, INT_SOURCE);
+       pending &= mask;
+
        if (unlikely(pending == 0)) {
-               ethoc_enable_irq(priv, INT_MASK_ALL);
                return IRQ_NONE;
        }
 
        ethoc_ack_irq(priv, pending);
 
+       /* We always handle the dropped packet interrupt */
        if (pending & INT_MASK_BUSY) {
                dev_err(&dev->dev, "packet dropped\n");
                dev->stats.rx_dropped++;
        }
 
-       if (pending & INT_MASK_RX) {
-               if (napi_schedule_prep(&priv->napi))
-                       __napi_schedule(&priv->napi);
-       } else {
-               ethoc_enable_irq(priv, INT_MASK_RX);
+       /* Handle receive/transmit event by switching to polling */
+       if (pending & (INT_MASK_TX | INT_MASK_RX)) {
+               ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+               napi_schedule(&priv->napi);
        }
 
-       if (pending & INT_MASK_TX)
-               ethoc_tx(dev);
-
-       ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
        return IRQ_HANDLED;
 }
 
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
 static int ethoc_poll(struct napi_struct *napi, int budget)
 {
        struct ethoc *priv = container_of(napi, struct ethoc, napi);
-       int work_done = 0;
+       int rx_work_done = 0;
+       int tx_work_done = 0;
+
+       rx_work_done = ethoc_rx(priv->netdev, budget);
+       tx_work_done = ethoc_tx(priv->netdev, budget);
 
-       work_done = ethoc_rx(priv->netdev, budget);
-       if (work_done < budget) {
-               ethoc_enable_irq(priv, INT_MASK_RX);
+       if (rx_work_done < budget && tx_work_done < budget) {
                napi_complete(napi);
+               ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
        }
 
-       return work_done;
+       return rx_work_done;
 }
 
 static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
 {
-       unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
        struct ethoc *priv = bus->priv;
+       int i;
 
        ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
        ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
 
-       while (time_before(jiffies, timeout)) {
+       for (i=0; i < 5; i++) {
                u32 status = ethoc_read(priv, MIISTATUS);
                if (!(status & MIISTATUS_BUSY)) {
                        u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
                        ethoc_write(priv, MIICOMMAND, 0);
                        return data;
                }
-
-               schedule();
+               usleep_range(100,200);
        }
 
        return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
 
 static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 {
-       unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
        struct ethoc *priv = bus->priv;
+       int i;
 
        ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
        ethoc_write(priv, MIITX_DATA, val);
        ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
 
-       while (time_before(jiffies, timeout)) {
+       for (i=0; i < 5; i++) {
                u32 stat = ethoc_read(priv, MIISTATUS);
                if (!(stat & MIISTATUS_BUSY)) {
                        /* reset MII command register */
                        ethoc_write(priv, MIICOMMAND, 0);
                        return 0;
                }
-
-               schedule();
+               usleep_range(100,200);
        }
 
        return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
        /* calculate the number of TX/RX buffers, maximum 128 supported */
        num_bd = min_t(unsigned int,
                128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
-       priv->num_tx = max(2, num_bd / 4);
+       if (num_bd < 4) {
+               ret = -ENODEV;
+               goto error;
+       }
+       /* num_tx must be a power of two */
+       priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
        priv->num_rx = num_bd - priv->num_tx;
 
+       dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
+               priv->num_tx, priv->num_rx);
+
        priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
        if (!priv->vma) {
                ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
 
        /* Allow the platform setup code to pass in a MAC address. */
        if (pdev->dev.platform_data) {
-               struct ethoc_platform_data *pdata =
-                       (struct ethoc_platform_data *)pdev->dev.platform_data;
+               struct ethoc_platform_data *pdata = pdev->dev.platform_data;
                memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
                priv->phy_id = pdata->phy_id;
+       } else {
+               priv->phy_id = -1;
+
+#ifdef CONFIG_OF
+               {
+               const uint8_t* mac;
+
+               mac = of_get_property(pdev->dev.of_node,
+                                     "local-mac-address",
+                                     NULL);
+               if (mac)
+                       memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+               }
+#endif
        }
 
        /* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
        /* setup NAPI */
        netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
 
-       spin_lock_init(&priv->rx_lock);
        spin_lock_init(&priv->lock);
 
        ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@ static int ethoc_resume(struct platform_device *pdev)
 # define ethoc_resume  NULL
 #endif
 
+#ifdef CONFIG_OF
+static struct of_device_id ethoc_match[] = {
+       {
+               .compatible = "opencores,ethoc",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ethoc_match);
+#endif
+
 static struct platform_driver ethoc_driver = {
        .probe   = ethoc_probe,
        .remove  = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@ static struct platform_driver ethoc_driver = {
        .resume  = ethoc_resume,
        .driver  = {
                .name = "ethoc",
+               .owner = THIS_MODULE,
+#ifdef CONFIG_OF
+               .of_match_table = ethoc_match,
+#endif
        },
 };
 
index 0fa1776..2fd1ae9 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
+#include <linux/uaccess.h>
+#include  <linux/io.h>
 
 #include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
 #include <asm/system.h>
 
 #if 0
@@ -186,9 +186,9 @@ enum {
        NvRegSlotTime = 0x9c,
 #define NVREG_SLOTTIME_LEGBF_ENABLED   0x80000000
 #define NVREG_SLOTTIME_10_100_FULL     0x00007f00
-#define NVREG_SLOTTIME_1000_FULL       0x0003ff00
+#define NVREG_SLOTTIME_1000_FULL       0x0003ff00
 #define NVREG_SLOTTIME_HALF            0x0000ff00
-#define NVREG_SLOTTIME_DEFAULT         0x00007f00
+#define NVREG_SLOTTIME_DEFAULT         0x00007f00
 #define NVREG_SLOTTIME_MASK            0x000000ff
 
        NvRegTxDeferral = 0xA0,
@@ -297,7 +297,7 @@ enum {
 #define NVREG_WAKEUPFLAGS_ENABLE       0x1111
 
        NvRegMgmtUnitGetVersion = 0x204,
-#define NVREG_MGMTUNITGETVERSION       0x01
+#define NVREG_MGMTUNITGETVERSION       0x01
        NvRegMgmtUnitVersion = 0x208,
 #define NVREG_MGMTUNITVERSION          0x08
        NvRegPowerCap = 0x268,
@@ -368,8 +368,8 @@ struct ring_desc_ex {
 };
 
 union ring_type {
-       struct ring_descorig;
-       struct ring_desc_exex;
+       struct ring_desc *orig;
+       struct ring_desc_ex *ex;
 };
 
 #define FLAG_MASK_V1 0xffff0000
@@ -444,10 +444,10 @@ union ring_type {
 #define NV_RX3_VLAN_TAG_MASK   (0x0000FFFF)
 
 /* Miscelaneous hardware related defines: */
-#define NV_PCI_REGSZ_VER1              0x270
-#define NV_PCI_REGSZ_VER2              0x2d4
-#define NV_PCI_REGSZ_VER3              0x604
-#define NV_PCI_REGSZ_MAX               0x604
+#define NV_PCI_REGSZ_VER1      0x270
+#define NV_PCI_REGSZ_VER2      0x2d4
+#define NV_PCI_REGSZ_VER3      0x604
+#define NV_PCI_REGSZ_MAX       0x604
 
 /* various timeout delays: all in usec */
 #define NV_TXRX_RESET_DELAY    4
@@ -717,7 +717,7 @@ static const struct register_test nv_registers_test[] = {
        { NvRegMulticastAddrA, 0xffffffff },
        { NvRegTxWatermark, 0x0ff },
        { NvRegWakeUpFlags, 0x07777 },
-       { 0,0 }
+       { 0, 0 }
 };
 
 struct nv_skb_map {
@@ -911,7 +911,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
  * Power down phy when interface is down (persists through reboot;
  * older Linux and other OSes may not power it up again)
  */
-static int phy_power_down = 0;
+static int phy_power_down;
 
 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 {
@@ -984,12 +984,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
        u8 __iomem *base = get_hwbase(dev);
 
        if (!nv_optimized(np)) {
-               if (rxtx_flags & NV_SETUP_RX_RING) {
+               if (rxtx_flags & NV_SETUP_RX_RING)
                        writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
-               }
-               if (rxtx_flags & NV_SETUP_TX_RING) {
+               if (rxtx_flags & NV_SETUP_TX_RING)
                        writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
-               }
        } else {
                if (rxtx_flags & NV_SETUP_RX_RING) {
                        writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1013,8 @@ static void free_rings(struct net_device *dev)
                        pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
                                            np->rx_ring.ex, np->ring_addr);
        }
-       if (np->rx_skb)
-               kfree(np->rx_skb);
-       if (np->tx_skb)
-               kfree(np->tx_skb);
+       kfree(np->rx_skb);
+       kfree(np->tx_skb);
 }
 
 static int using_multi_irqs(struct net_device *dev)
@@ -1174,16 +1170,15 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
        unsigned int tries = 0;
 
        miicontrol = BMCR_RESET | bmcr_setup;
-       if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
+       if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
                return -1;
-       }
 
        /* wait for 500ms */
        msleep(500);
 
        /* must wait till reset is deasserted */
        while (miicontrol & BMCR_RESET) {
-               msleep(10);
+               usleep_range(10000, 20000);
                miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
                /* FIXME: 100 tries seem excessive */
                if (tries++ > 100)
@@ -1196,7 +1191,7 @@ static int phy_init(struct net_device *dev)
 {
        struct fe_priv *np = get_nvpriv(dev);
        u8 __iomem *base = get_hwbase(dev);
-       u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
+       u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000, reg;
 
        /* phy errata for E3016 phy */
        if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
@@ -1313,8 +1308,7 @@ static int phy_init(struct net_device *dev)
                        printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
                        return PHY_ERROR;
                }
-       }
-       else
+       } else
                np->gigabit = 0;
 
        mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1340,7 +1334,7 @@ static int phy_init(struct net_device *dev)
        }
 
        /* phy vendor specific configuration */
-       if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
+       if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII)) {
                phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
                phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
                phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
@@ -1501,12 +1495,10 @@ static int phy_init(struct net_device *dev)
        /* restart auto negotiation, power down phy */
        mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
        mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
-       if (phy_power_down) {
+       if (phy_power_down)
                mii_control |= BMCR_PDOWN;
-       }
-       if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+       if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
                return PHY_ERROR;
-       }
 
        return 0;
 }
@@ -1526,8 +1518,8 @@ static void nv_start_rx(struct net_device *dev)
        }
        writel(np->linkspeed, base + NvRegLinkSpeed);
        pci_push(base);
-        rx_ctrl |= NVREG_RCVCTL_START;
-        if (np->mac_in_use)
+       rx_ctrl |= NVREG_RCVCTL_START;
+       if (np->mac_in_use)
                rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
        writel(rx_ctrl, base + NvRegReceiverControl);
        dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
@@ -1745,7 +1737,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
 static int nv_alloc_rx(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
-       struct ring_descless_rx;
+       struct ring_desc *less_rx;
 
        less_rx = np->get_rx.orig;
        if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1759,8 @@ static int nv_alloc_rx(struct net_device *dev)
                                np->put_rx.orig = np->first_rx.orig;
                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
                                np->put_rx_ctx = np->first_rx_ctx;
-               } else {
+               } else
                        return 1;
-               }
        }
        return 0;
 }
@@ -1777,7 +1768,7 @@ static int nv_alloc_rx(struct net_device *dev)
 static int nv_alloc_rx_optimized(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
-       struct ring_desc_exless_rx;
+       struct ring_desc_ex *less_rx;
 
        less_rx = np->get_rx.ex;
        if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1791,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
                                np->put_rx.ex = np->first_rx.ex;
                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
                                np->put_rx_ctx = np->first_rx_ctx;
-               } else {
+               } else
                        return 1;
-               }
        }
        return 0;
 }
@@ -2018,24 +2008,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
 
 /* Known Good seed sets */
 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
-    {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
-    {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
-    {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
-    {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
-    {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
-    {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
-    {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
-    {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
+       {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+       {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
+       {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+       {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
+       {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
+       {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
+       {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
+       {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
 
 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
-    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
-    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
-    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
+       {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+       {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+       {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
+       {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+       {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+       {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+       {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+       {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
 
 static void nv_gear_backoff_reseed(struct net_device *dev)
 {
@@ -2083,13 +2073,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
        temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
        temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
        temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
-       writel(temp,base + NvRegBackOffControl);
+       writel(temp, base + NvRegBackOffControl);
 
-       /* Setup seeds for all gear LFSRs. */
+       /* Setup seeds for all gear LFSRs. */
        get_random_bytes(&seedset, sizeof(seedset));
        seedset = seedset % BACKOFF_SEEDSET_ROWS;
-       for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
-       {
+       for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
                temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
                temp |= main_seedset[seedset][i-1] & 0x3ff;
                temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2102,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
        u32 size = skb_headlen(skb);
        u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
        u32 empty_slots;
-       struct ring_descput_tx;
-       struct ring_descstart_tx;
-       struct ring_descprev_tx;
-       struct nv_skb_mapprev_tx_ctx;
+       struct ring_desc *put_tx;
+       struct ring_desc *start_tx;
+       struct ring_desc *prev_tx;
+       struct nv_skb_map *prev_tx_ctx;
        unsigned long flags;
 
        /* add fragments to entries count */
@@ -2208,10 +2197,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                dev->name, entries, tx_flags_extra);
        {
                int j;
-               for (j=0; j<64; j++) {
+               for (j = 0; j < 64; j++) {
                        if ((j%16) == 0)
                                dprintk("\n%03x:", j);
-                       dprintk(" %02x", ((unsigned char*)skb->data)[j]);
+                       dprintk(" %02x", ((unsigned char *)skb->data)[j]);
                }
                dprintk("\n");
        }
@@ -2233,11 +2222,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
        u32 size = skb_headlen(skb);
        u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
        u32 empty_slots;
-       struct ring_desc_exput_tx;
-       struct ring_desc_exstart_tx;
-       struct ring_desc_exprev_tx;
-       struct nv_skb_mapprev_tx_ctx;
-       struct nv_skb_mapstart_tx_ctx;
+       struct ring_desc_ex *put_tx;
+       struct ring_desc_ex *start_tx;
+       struct ring_desc_ex *prev_tx;
+       struct nv_skb_map *prev_tx_ctx;
+       struct nv_skb_map *start_tx_ctx;
        unsigned long flags;
 
        /* add fragments to entries count */
@@ -2359,10 +2348,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                dev->name, entries, tx_flags_extra);
        {
                int j;
-               for (j=0; j<64; j++) {
+               for (j = 0; j < 64; j++) {
                        if ((j%16) == 0)
                                dprintk("\n%03x:", j);
-                       dprintk(" %02x", ((unsigned char*)skb->data)[j]);
+                       dprintk(" %02x", ((unsigned char *)skb->data)[j]);
                }
                dprintk("\n");
        }
@@ -2399,7 +2388,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
        struct fe_priv *np = netdev_priv(dev);
        u32 flags;
        int tx_work = 0;
-       struct ring_descorig_get_tx = np->get_tx.orig;
+       struct ring_desc *orig_get_tx = np->get_tx.orig;
 
        while ((np->get_tx.orig != np->put_tx.orig) &&
               !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2464,7 +2453,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
        struct fe_priv *np = netdev_priv(dev);
        u32 flags;
        int tx_work = 0;
-       struct ring_desc_exorig_get_tx = np->get_tx.ex;
+       struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
 
        while ((np->get_tx.ex != np->put_tx.ex) &&
               !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2491,9 +2480,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
                        np->get_tx_ctx->skb = NULL;
                        tx_work++;
 
-                       if (np->tx_limit) {
+                       if (np->tx_limit)
                                nv_tx_flip_ownership(dev);
-                       }
                }
                if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
                        np->get_tx.ex = np->first_tx.ex;
@@ -2532,7 +2520,7 @@ static void nv_tx_timeout(struct net_device *dev)
                printk(KERN_INFO "%s: Ring at %lx\n",
                       dev->name, (unsigned long)np->ring_addr);
                printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
-               for (i=0;i<=np->register_size;i+= 32) {
+               for (i = 0; i <= np->register_size; i += 32) {
                        printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
                                        i,
                                        readl(base + i + 0), readl(base + i + 4),
@@ -2541,7 +2529,7 @@ static void nv_tx_timeout(struct net_device *dev)
                                        readl(base + i + 24), readl(base + i + 28));
                }
                printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
-               for (i=0;i<np->tx_ring_size;i+= 4) {
+               for (i = 0; i < np->tx_ring_size; i += 4) {
                        if (!nv_optimized(np)) {
                                printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
                                       i,
@@ -2616,11 +2604,11 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
        int protolen;   /* length as stored in the proto field */
 
        /* 1) calculate len according to header */
-       if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
-               protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
+       if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
+               protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
                hdrlen = VLAN_HLEN;
        } else {
-               protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
+               protolen = ntohs(((struct ethhdr *)packet)->h_proto);
                hdrlen = ETH_HLEN;
        }
        dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
@@ -2667,7 +2655,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
        struct sk_buff *skb;
        int len;
 
-       while((np->get_rx.orig != np->put_rx.orig) &&
+       while ((np->get_rx.orig != np->put_rx.orig) &&
              !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
                (rx_work < limit)) {
 
@@ -2687,11 +2675,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
 
                {
                        int j;
-                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
-                       for (j=0; j<64; j++) {
+                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
+                       for (j = 0; j < 64; j++) {
                                if ((j%16) == 0)
                                        dprintk("\n%03x:", j);
-                               dprintk(" %02x", ((unsigned char*)skb->data)[j]);
+                               dprintk(" %02x", ((unsigned char *)skb->data)[j]);
                        }
                        dprintk("\n");
                }
@@ -2710,9 +2698,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
                                        }
                                        /* framing errors are soft errors */
                                        else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
-                                               if (flags & NV_RX_SUBSTRACT1) {
+                                               if (flags & NV_RX_SUBSTRACT1)
                                                        len--;
-                                               }
                                        }
                                        /* the rest are hard errors */
                                        else {
@@ -2745,9 +2732,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
                                        }
                                        /* framing errors are soft errors */
                                        else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
-                                               if (flags & NV_RX2_SUBSTRACT1) {
+                                               if (flags & NV_RX2_SUBSTRACT1)
                                                        len--;
-                                               }
                                        }
                                        /* the rest are hard errors */
                                        else {
@@ -2797,7 +2783,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
        struct sk_buff *skb;
        int len;
 
-       while((np->get_rx.ex != np->put_rx.ex) &&
+       while ((np->get_rx.ex != np->put_rx.ex) &&
              !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
              (rx_work < limit)) {
 
@@ -2817,11 +2803,11 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
 
                {
                        int j;
-                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
-                       for (j=0; j<64; j++) {
+                       dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
+                       for (j = 0; j < 64; j++) {
                                if ((j%16) == 0)
                                        dprintk("\n%03x:", j);
-                               dprintk(" %02x", ((unsigned char*)skb->data)[j]);
+                               dprintk(" %02x", ((unsigned char *)skb->data)[j]);
                        }
                        dprintk("\n");
                }
@@ -2838,9 +2824,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
                                }
                                /* framing errors are soft errors */
                                else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
-                                       if (flags & NV_RX2_SUBSTRACT1) {
+                                       if (flags & NV_RX2_SUBSTRACT1)
                                                len--;
-                                       }
                                }
                                /* the rest are hard errors */
                                else {
@@ -2949,7 +2934,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
                /* reinit nic view of the rx queue */
                writel(np->rx_buf_sz, base + NvRegOffloadConfig);
                setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-               writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+               writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                        base + NvRegRingSizes);
                pci_push(base);
                writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2971,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
 static int nv_set_mac_address(struct net_device *dev, void *addr)
 {
        struct fe_priv *np = netdev_priv(dev);
-       struct sockaddr *macaddr = (struct sockaddr*)addr;
+       struct sockaddr *macaddr = (struct sockaddr *)addr;
 
        if (!is_valid_ether_addr(macaddr->sa_data))
                return -EADDRNOTAVAIL;
@@ -3302,7 +3287,7 @@ set_speed:
        }
        writel(txreg, base + NvRegTxWatermark);
 
-       writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
+       writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
                base + NvRegMisc1);
        pci_push(base);
        writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3297,8 @@ set_speed:
        /* setup pause frame */
        if (np->duplex != 0) {
                if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
-                       adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
-                       lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+                       adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+                       lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
 
                        switch (adv_pause) {
                        case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3309,17 @@ set_speed:
                                }
                                break;
                        case ADVERTISE_PAUSE_ASYM:
-                               if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
-                               {
+                               if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
                                        pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
-                               }
                                break;
-                       case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
-                               if (lpa_pause & LPA_PAUSE_CAP)
-                               {
+                       case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
+                               if (lpa_pause & LPA_PAUSE_CAP) {
                                        pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
                                        if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
                                                pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
                                }
                                if (lpa_pause == LPA_PAUSE_ASYM)
-                               {
                                        pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
-                               }
                                break;
                        }
                } else {
@@ -3514,7 +3494,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
 
-       for (i=0; ; i++) {
+       for (i = 0;; i++) {
                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
                writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
                dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
@@ -3553,7 +3533,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
        u8 __iomem *base = get_hwbase(dev);
        unsigned long flags;
        int retcode;
-       int rx_count, tx_work=0, rx_work=0;
+       int rx_count, tx_work = 0, rx_work = 0;
 
        do {
                if (!nv_optimized(np)) {
@@ -3628,7 +3608,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
 
-       for (i=0; ; i++) {
+       for (i = 0;; i++) {
                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
                writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
                dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
@@ -3675,7 +3655,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
 
-       for (i=0; ; i++) {
+       for (i = 0;; i++) {
                events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
                writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
                dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
@@ -3776,17 +3756,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
         * the remaining 8 interrupts.
         */
        for (i = 0; i < 8; i++) {
-               if ((irqmask >> i) & 0x1) {
+               if ((irqmask >> i) & 0x1)
                        msixmap |= vector << (i << 2);
-               }
        }
        writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
 
        msixmap = 0;
        for (i = 0; i < 8; i++) {
-               if ((irqmask >> (i + 8)) & 0x1) {
+               if ((irqmask >> (i + 8)) & 0x1)
                        msixmap |= vector << (i << 2);
-               }
        }
        writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
 }
@@ -3809,10 +3787,10 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
        }
 
        if (np->msi_flags & NV_MSI_X_CAPABLE) {
-               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
                        np->msi_x_entry[i].entry = i;
-               }
-               if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+               ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
+               if (ret == 0) {
                        np->msi_flags |= NV_MSI_X_ENABLED;
                        if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
                                /* Request irq for rx handling */
@@ -3864,7 +3842,8 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                }
        }
        if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
-               if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+               ret = pci_enable_msi(np->pci_dev);
+               if (ret == 0) {
                        np->msi_flags |= NV_MSI_ENABLED;
                        dev->irq = np->pci_dev->irq;
                        if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
@@ -3903,9 +3882,8 @@ static void nv_free_irq(struct net_device *dev)
        int i;
 
        if (np->msi_flags & NV_MSI_X_ENABLED) {
-               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+               for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
                        free_irq(np->msi_x_entry[i].vector, dev);
-               }
                pci_disable_msix(np->pci_dev);
                np->msi_flags &= ~NV_MSI_X_ENABLED;
        } else {
@@ -3975,7 +3953,7 @@ static void nv_do_nic_poll(unsigned long data)
                        /* reinit nic view of the rx queue */
                        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
                        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-                       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+                       writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                                base + NvRegRingSizes);
                        pci_push(base);
                        writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4105,7 +4083,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
        }
 
        if (netif_carrier_ok(dev)) {
-               switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+               switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
                case NVREG_LINKSPEED_10:
                        ecmd->speed = SPEED_10;
                        break;
@@ -4344,7 +4322,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
 
        regs->version = FORCEDETH_REGS_VER;
        spin_lock_irq(&np->lock);
-       for (i = 0;i <= np->register_size/sizeof(u32); i++)
+       for (i = 0; i <= np->register_size/sizeof(u32); i++)
                rbuf[i] = readl(base + i*sizeof(u32));
        spin_unlock_irq(&np->lock);
 }
@@ -4464,10 +4442,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
                                pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
                                                    rxtx_ring, ring_addr);
                }
-               if (rx_skbuff)
-                       kfree(rx_skbuff);
-               if (tx_skbuff)
-                       kfree(tx_skbuff);
+
+               kfree(rx_skbuff);
+               kfree(tx_skbuff);
                goto exit;
        }
 
@@ -4491,14 +4468,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
        np->tx_ring_size = ring->tx_pending;
 
        if (!nv_optimized(np)) {
-               np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
+               np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
                np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
        } else {
-               np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
+               np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
                np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
        }
-       np->rx_skb = (struct nv_skb_map*)rx_skbuff;
-       np->tx_skb = (struct nv_skb_map*)tx_skbuff;
+       np->rx_skb = (struct nv_skb_map *)rx_skbuff;
+       np->tx_skb = (struct nv_skb_map *)tx_skbuff;
        np->ring_addr = ring_addr;
 
        memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4492,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
                /* reinit nic view of the queues */
                writel(np->rx_buf_sz, base + NvRegOffloadConfig);
                setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-               writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+               writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                        base + NvRegRingSizes);
                pci_push(base);
                writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4841,7 +4818,7 @@ static int nv_loopback_test(struct net_device *dev)
        /* reinit nic view of the rx queue */
        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+       writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                base + NvRegRingSizes);
        pci_push(base);
 
@@ -4893,9 +4870,8 @@ static int nv_loopback_test(struct net_device *dev)
                if (flags & NV_RX_ERROR)
                        ret = 0;
        } else {
-               if (flags & NV_RX2_ERROR) {
+               if (flags & NV_RX2_ERROR)
                        ret = 0;
-               }
        }
 
        if (ret) {
@@ -4958,11 +4934,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
                        netif_addr_lock(dev);
                        spin_lock_irq(&np->lock);
                        nv_disable_hw_interrupts(dev, np->irqmask);
-                       if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+                       if (!(np->msi_flags & NV_MSI_X_ENABLED))
                                writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
-                       } else {
+                       else
                                writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
-                       }
                        /* stop engines */
                        nv_stop_rxtx(dev);
                        nv_txrx_reset(dev);
@@ -5003,7 +4978,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
                        /* reinit nic view of the rx queue */
                        writel(np->rx_buf_sz, base + NvRegOffloadConfig);
                        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-                       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+                       writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                                base + NvRegRingSizes);
                        pci_push(base);
                        writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +5081,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
                    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
                        np->mgmt_sema = 1;
                        return 1;
-               }
-               else
+               } else
                        udelay(50);
        }
 
@@ -5204,7 +5178,7 @@ static int nv_open(struct net_device *dev)
 
        /* give hw rings */
        setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-       writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+       writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
                base + NvRegRingSizes);
 
        writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5251,8 +5225,7 @@ static int nv_open(struct net_device *dev)
                        writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
                else
                        writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
-       }
-       else
+       } else
                writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
        writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
        writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5236,7 @@ static int nv_open(struct net_device *dev)
                writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
 
        i = readl(base + NvRegPowerState);
-       if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
+       if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
                writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
 
        pci_push(base);
@@ -5276,9 +5249,8 @@ static int nv_open(struct net_device *dev)
        writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
        pci_push(base);
 
-       if (nv_request_irq(dev, 0)) {
+       if (nv_request_irq(dev, 0))
                goto out_drain;
-       }
 
        /* ask for interrupts */
        nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5466,7 +5438,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        addr = 0;
        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
                dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
-                               pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
+                               pci_name(pci_dev), i, (void *)pci_resource_start(pci_dev, i),
                                pci_resource_len(pci_dev, i),
                                pci_resource_flags(pci_dev, i));
                if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
@@ -5631,7 +5603,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                 */
                dev_printk(KERN_ERR, &pci_dev->dev,
                        "Invalid Mac address detected: %pM\n",
-                       dev->dev_addr);
+                       dev->dev_addr);
                dev_printk(KERN_ERR, &pci_dev->dev,
                        "Please complain to your hardware vendor. Switching to a random MAC.\n");
                random_ether_addr(dev->dev_addr);
@@ -5663,16 +5635,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                writel(powerstate, base + NvRegPowerState2);
        }
 
-       if (np->desc_ver == DESC_VER_1) {
+       if (np->desc_ver == DESC_VER_1)
                np->tx_flags = NV_TX_VALID;
-       } else {
+       else
                np->tx_flags = NV_TX2_VALID;
-       }
 
        np->msi_flags = 0;
-       if ((id->driver_data & DEV_HAS_MSI) && msi) {
+       if ((id->driver_data & DEV_HAS_MSI) && msi)
                np->msi_flags |= NV_MSI_CAPABLE;
-       }
+
        if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
                /* msix has had reported issues when modifying irqmask
                   as in the case of napi, therefore, disable for now
@@ -5735,9 +5706,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                    nv_mgmt_acquire_sema(dev) &&
                    nv_mgmt_get_version(dev)) {
                        np->mac_in_use = 1;
-                       if (np->mgmt_version > 0) {
+                       if (np->mgmt_version > 0)
                                np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
-                       }
                        dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
                                pci_name(pci_dev), np->mac_in_use);
                        /* management unit setup the phy already? */
@@ -5799,9 +5769,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        } else {
                /* see if it is a gigabit phy */
                u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-               if (mii_status & PHY_GIGABIT) {
+               if (mii_status & PHY_GIGABIT)
                        np->gigabit = PHY_GIGABIT;
-               }
        }
 
        /* set default link speed settings */
@@ -5829,19 +5798,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                   dev->dev_addr[5]);
 
        dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
-                  dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
-                  dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
-                       "csum " : "",
-                  dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
-                       "vlan " : "",
-                  id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
-                  id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
-                  id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
-                  np->gigabit == PHY_GIGABIT ? "gbit " : "",
-                  np->need_linktimer ? "lnktim " : "",
-                  np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
-                  np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
-                  np->desc_ver);
+               dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
+               dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
+                       "csum " : "",
+               dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
+                       "vlan " : "",
+               id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
+               id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
+               id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
+               np->gigabit == PHY_GIGABIT ? "gbit " : "",
+               np->need_linktimer ? "lnktim " : "",
+               np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
+               np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
+               np->desc_ver);
 
        return 0;
 
@@ -5931,13 +5900,13 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
        int i;
 
        if (netif_running(dev)) {
-               // Gross.
+               /* Gross. */
                nv_close(dev);
        }
        netif_device_detach(dev);
 
        /* save non-pci configuration space */
-       for (i = 0;i <= np->register_size/sizeof(u32); i++)
+       for (i = 0; i <= np->register_size/sizeof(u32); i++)
                np->saved_config_space[i] = readl(base + i*sizeof(u32));
 
        pci_save_state(pdev);
@@ -5960,7 +5929,7 @@ static int nv_resume(struct pci_dev *pdev)
        pci_enable_wake(pdev, PCI_D0, 0);
 
        /* restore non-pci configuration space */
-       for (i = 0;i <= np->register_size/sizeof(u32); i++)
+       for (i = 0; i <= np->register_size/sizeof(u32); i++)
                writel(np->saved_config_space[i], base+i*sizeof(u32));
 
        if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5990,9 +5959,8 @@ static void nv_shutdown(struct pci_dev *pdev)
         * If we really go for poweroff, we must not restore the MAC,
         * otherwise the MAC for WOL will be reversed at least on some boards.
         */
-       if (system_state != SYSTEM_POWER_OFF) {
+       if (system_state != SYSTEM_POWER_OFF)
                nv_restore_mac_addr(pdev);
-       }
 
        pci_disable_device(pdev);
        /*
index 892d196..67ea262 100644 (file)
@@ -2436,10 +2436,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
        int size;
 
        size = sizeof(struct igb_buffer) * tx_ring->count;
-       tx_ring->buffer_info = vmalloc(size);
+       tx_ring->buffer_info = vzalloc(size);
        if (!tx_ring->buffer_info)
                goto err;
-       memset(tx_ring->buffer_info, 0, size);
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2586,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
        int size, desc_len;
 
        size = sizeof(struct igb_buffer) * rx_ring->count;
-       rx_ring->buffer_info = vmalloc(size);
+       rx_ring->buffer_info = vzalloc(size);
        if (!rx_ring->buffer_info)
                goto err;
-       memset(rx_ring->buffer_info, 0, size);
 
        desc_len = sizeof(union e1000_adv_rx_desc);
 
index 4c998b7..8dbde23 100644 (file)
@@ -430,10 +430,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
        int size;
 
        size = sizeof(struct igbvf_buffer) * tx_ring->count;
-       tx_ring->buffer_info = vmalloc(size);
+       tx_ring->buffer_info = vzalloc(size);
        if (!tx_ring->buffer_info)
                goto err;
-       memset(tx_ring->buffer_info, 0, size);
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -470,10 +469,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
        int size, desc_len;
 
        size = sizeof(struct igbvf_buffer) * rx_ring->count;
-       rx_ring->buffer_info = vmalloc(size);
+       rx_ring->buffer_info = vzalloc(size);
        if (!rx_ring->buffer_info)
                goto err;
-       memset(rx_ring->buffer_info, 0, size);
 
        desc_len = sizeof(union e1000_adv_rx_desc);
 
index caa8192..211a169 100644 (file)
@@ -669,13 +669,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
        int size;
 
        size = sizeof(struct ixgb_buffer) * txdr->count;
-       txdr->buffer_info = vmalloc(size);
+       txdr->buffer_info = vzalloc(size);
        if (!txdr->buffer_info) {
                netif_err(adapter, probe, adapter->netdev,
                          "Unable to allocate transmit descriptor ring memory\n");
                return -ENOMEM;
        }
-       memset(txdr->buffer_info, 0, size);
 
        /* round up to nearest 4K */
 
@@ -759,13 +758,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
        int size;
 
        size = sizeof(struct ixgb_buffer) * rxdr->count;
-       rxdr->buffer_info = vmalloc(size);
+       rxdr->buffer_info = vzalloc(size);
        if (!rxdr->buffer_info) {
                netif_err(adapter, probe, adapter->netdev,
                          "Unable to allocate receive descriptor ring\n");
                return -ENOMEM;
        }
-       memset(rxdr->buffer_info, 0, size);
 
        /* Round up to nearest 4K */
 
index 0254195..494cb57 100644 (file)
@@ -5181,12 +5181,11 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
        int size;
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
-       tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+       tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
        if (!tx_ring->tx_buffer_info)
-               tx_ring->tx_buffer_info = vmalloc(size);
+               tx_ring->tx_buffer_info = vzalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
-       memset(tx_ring->tx_buffer_info, 0, size);
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -5246,12 +5245,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        int size;
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-       rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
+       rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
        if (!rx_ring->rx_buffer_info)
-               rx_ring->rx_buffer_info = vmalloc(size);
+               rx_ring->rx_buffer_info = vzalloc(size);
        if (!rx_ring->rx_buffer_info)
                goto err;
-       memset(rx_ring->rx_buffer_info, 0, size);
 
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
index 5b8063c..2216a3c 100644 (file)
@@ -2489,10 +2489,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
        int size;
 
        size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
-       tx_ring->tx_buffer_info = vmalloc(size);
+       tx_ring->tx_buffer_info = vzalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
-       memset(tx_ring->tx_buffer_info, 0, size);
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2556,14 +2555,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
        int size;
 
        size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
-       rx_ring->rx_buffer_info = vmalloc(size);
+       rx_ring->rx_buffer_info = vzalloc(size);
        if (!rx_ring->rx_buffer_info) {
                hw_dbg(&adapter->hw,
                       "Unable to vmalloc buffer memory for "
                       "the receive descriptor ring\n");
                goto alloc_failed;
        }
-       memset(rx_ring->rx_buffer_info, 0, size);
 
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
index 95fe552..731077d 100644 (file)
@@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
        tx_ring->num_desc = adapter->num_txd;
        tx_ring->txq = netdev_get_tx_queue(netdev, 0);
 
-       cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+       cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
        if (cmd_buf_arr == NULL) {
                dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
                       netdev->name);
                goto err_out;
        }
-       memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
        tx_ring->cmd_buf_arr = cmd_buf_arr;
 
        recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
                        break;
 
                }
-               rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
-                       vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+               rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
                if (rds_ring->rx_buf_arr == NULL) {
                        printk(KERN_ERR "%s: Failed to allocate "
                                "rx buffer ring %d\n",
@@ -288,7 +286,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
                        /* free whatever was already allocated */
                        goto err_out;
                }
-               memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
                INIT_LIST_HEAD(&rds_ring->free_list);
                /*
                 * Now go through all of them, set reference handles
index e1d30d7..ceeaac9 100644 (file)
@@ -1277,6 +1277,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int i = 0, err;
        int pci_func_id = PCI_FUNC(pdev->devfn);
        uint8_t revision_id;
+       u32 val;
 
        if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
                pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1352,8 +1353,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                break;
        }
 
-       if (reset_devices) {
-               if (adapter->portnum == 0) {
+       if (adapter->portnum == 0) {
+               val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+               if (val != 0xffffffff && val != 0) {
                        NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
                        adapter->need_fw_reset = 1;
                }
index 472056b..afb7506 100644 (file)
@@ -1523,12 +1523,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
        int desNo;
 
        size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
-       tx_ring->buffer_info = vmalloc(size);
+       tx_ring->buffer_info = vzalloc(size);
        if (!tx_ring->buffer_info) {
                pr_err("Unable to allocate memory for the buffer infomation\n");
                return -ENOMEM;
        }
-       memset(tx_ring->buffer_info, 0, size);
 
        tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
 
@@ -1573,12 +1572,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
        int desNo;
 
        size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
-       rx_ring->buffer_info = vmalloc(size);
+       rx_ring->buffer_info = vzalloc(size);
        if (!rx_ring->buffer_info) {
                pr_err("Unable to allocate memory for the receive descriptor ring\n");
                return -ENOMEM;
        }
-       memset(rx_ring->buffer_info, 0, size);
        rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
        rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
index 7670aac..a8445c7 100644 (file)
@@ -47,11 +47,11 @@ void phy_print_status(struct phy_device *phydev)
        pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
                        phydev->link ? "Up" : "Down");
        if (phydev->link)
-               printk(" - %d/%s", phydev->speed,
+               printk(KERN_CONT " - %d/%s", phydev->speed,
                                DUPLEX_FULL == phydev->duplex ?
                                "Full" : "Half");
 
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 EXPORT_SYMBOL(phy_print_status);
 
index ccbc913..7556a92 100644 (file)
@@ -673,8 +673,7 @@ static int __init pptp_init_module(void)
        int err = 0;
        pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
 
-       callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
-               GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+       callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
        if (!callid_sock) {
                pr_err("PPTP: cann't allocate memory\n");
                return -ENOMEM;
index 56f54ff..9513a83 100644 (file)
@@ -923,6 +923,7 @@ struct qlcnic_ipaddr {
 #define QLCNIC_MACSPOOF                        0x200
 #define QLCNIC_MAC_OVERRIDE_DISABLED   0x400
 #define QLCNIC_PROMISC_DISABLED                0x800
+#define QLCNIC_NEED_FLR                        0x1000
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
        ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 
index 3ad1f3e..29cbc2a 100644 (file)
@@ -480,8 +480,10 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
 {
        int err;
 
-       if (reset_devices)
+       if (adapter->flags & QLCNIC_NEED_FLR) {
                pci_reset_function(adapter->pdev);
+               adapter->flags &= ~QLCNIC_NEED_FLR;
+       }
 
        err = qlcnic_fw_cmd_create_rx_ctx(adapter);
        if (err)
index 4290b80..566e0e8 100644 (file)
@@ -722,7 +722,7 @@ enum {
 #define QLCNIC_DEV_NPAR_OPER           1 /* NPAR Operational */
 #define QLCNIC_DEV_NPAR_OPER_TIMEO     30 /* Operational time out */
 
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN)          ((VAL) &= (1 << (FN * 4)))
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN)          ((VAL) & (1 << (FN * 4)))
 #define QLC_DEV_SET_REF_CNT(VAL, FN)           ((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_CLR_REF_CNT(VAL, FN)           ((VAL) &= ~(1 << (FN * 4)))
 #define QLC_DEV_SET_RST_RDY(VAL, FN)           ((VAL) |= (1 << (FN * 4)))
index 0d180c6..c5ea2f4 100644 (file)
@@ -236,12 +236,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
        tx_ring->num_desc = adapter->num_txd;
        tx_ring->txq = netdev_get_tx_queue(netdev, 0);
 
-       cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+       cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
        if (cmd_buf_arr == NULL) {
                dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
                goto err_out;
        }
-       memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
        tx_ring->cmd_buf_arr = cmd_buf_arr;
 
        recv_ctx = &adapter->recv_ctx;
@@ -275,14 +274,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
                                rds_ring->dma_size + NET_IP_ALIGN;
                        break;
                }
-               rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
-                       vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+               rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
                if (rds_ring->rx_buf_arr == NULL) {
                        dev_err(&netdev->dev, "Failed to allocate "
                                "rx buffer ring %d\n", ring);
                        goto err_out;
                }
-               memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
                INIT_LIST_HEAD(&rds_ring->free_list);
                /*
                 * Now go through all of them, set reference handles
index a3dcd04..899df5a 100644 (file)
@@ -1485,6 +1485,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        uint8_t revision_id;
        uint8_t pci_using_dac;
        char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+       u32 val;
 
        err = pci_enable_device(pdev);
        if (err)
@@ -1546,6 +1547,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_out_iounmap;
 
+       val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+       if (QLC_DEV_CHECK_ACTIVE(val, adapter->portnum))
+               adapter->flags |= QLCNIC_NEED_FLR;
+
        err = adapter->nic_ops->start_firmware(adapter);
        if (err) {
                dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
index 52cb608..44500b5 100644 (file)
@@ -428,10 +428,9 @@ int efx_probe_filters(struct efx_nic *efx)
                                             GFP_KERNEL);
                if (!table->used_bitmap)
                        goto fail;
-               table->spec = vmalloc(table->size * sizeof(*table->spec));
+               table->spec = vzalloc(table->size * sizeof(*table->spec));
                if (!table->spec)
                        goto fail;
-               memset(table->spec, 0, table->size * sizeof(*table->spec));
        }
 
        return 0;
index 79bdc2e..5f06c47 100644 (file)
@@ -20,7 +20,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
-#define DRV_MODULE_VERSION     "Apr_2010"
+#define DRV_MODULE_VERSION     "Nov_2010"
 #include <linux/platform_device.h>
 #include <linux/stmmac.h>
 
@@ -37,7 +37,6 @@ struct stmmac_priv {
        unsigned int cur_tx;
        unsigned int dirty_tx;
        unsigned int dma_tx_size;
-       int tx_coe;
        int tx_coalesce;
 
        struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@ struct stmmac_priv {
        struct sk_buff_head rx_recycle;
 
        struct net_device *dev;
-       int is_gmac;
        dma_addr_t dma_rx_phy;
        unsigned int dma_rx_size;
        unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@ struct stmmac_priv {
        struct napi_struct napi;
 
        phy_interface_t phy_interface;
-       int pbl;
-       int bus_id;
        int phy_addr;
        int phy_mask;
        int (*phy_reset) (void *priv);
-       void (*fix_mac_speed) (void *priv, unsigned int speed);
-       void (*bus_setup)(void __iomem *ioaddr);
-       void *bsp_priv;
+       int rx_coe;
+       int no_csum_insertion;
 
        int phy_irq;
        struct phy_device *phydev;
@@ -77,47 +72,20 @@ struct stmmac_priv {
        unsigned int flow_ctrl;
        unsigned int pause;
        struct mii_bus *mii;
-       int mii_clk_csr;
 
        u32 msg_enable;
        spinlock_t lock;
        int wolopts;
        int wolenabled;
-       int shutdown;
 #ifdef CONFIG_STMMAC_TIMER
        struct stmmac_timer *tm;
 #endif
 #ifdef STMMAC_VLAN_TAG_USED
        struct vlan_group *vlgrp;
 #endif
-       int enh_desc;
-       int rx_coe;
-       int bugged_jumbo;
-       int no_csum_insertion;
+       struct plat_stmmacenet_data *plat;
 };
 
-#ifdef CONFIG_STM_DRIVERS
-#include <linux/stm/pad.h>
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
-       int ret = 0;
-       struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
-
-       /* Pad routing setup */
-       if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
-                       dev_name(&pdev->dev)))) {
-               printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
-               ret = -ENODEV;
-       }
-       return ret;
-}
-#else
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
-       return 0;
-}
-#endif
-
 extern int stmmac_mdio_unregister(struct net_device *ndev);
 extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
index 6d65482..f2695fd 100644 (file)
@@ -94,7 +94,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (!priv->is_gmac)
+       if (!priv->plat->has_gmac)
                strcpy(info->driver, MAC100_ETHTOOL_NAME);
        else
                strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
 
        memset(reg_space, 0x0, REG_SPACE_SIZE);
 
-       if (!priv->is_gmac) {
+       if (!priv->plat->has_gmac) {
                /* MAC registers */
                for (i = 0; i < 12; i++)
                        reg_space[i] = readl(priv->ioaddr + (i * 4));
index 06bc603..730a6fd 100644 (file)
@@ -186,6 +186,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
        return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
 }
 
+/* On some ST platforms, some HW system configuraton registers have to be
+ * set according to the link speed negotiated.
+ */
+static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+{
+       struct phy_device *phydev = priv->phydev;
+
+       if (likely(priv->plat->fix_mac_speed))
+               priv->plat->fix_mac_speed(priv->plat->bsp_priv,
+                                         phydev->speed);
+}
+
 /**
  * stmmac_adjust_link
  * @dev: net device structure
@@ -228,15 +240,13 @@ static void stmmac_adjust_link(struct net_device *dev)
                        new_state = 1;
                        switch (phydev->speed) {
                        case 1000:
-                               if (likely(priv->is_gmac))
+                               if (likely(priv->plat->has_gmac))
                                        ctrl &= ~priv->hw->link.port;
-                               if (likely(priv->fix_mac_speed))
-                                       priv->fix_mac_speed(priv->bsp_priv,
-                                                           phydev->speed);
+                               stmmac_hw_fix_mac_speed(priv);
                                break;
                        case 100:
                        case 10:
-                               if (priv->is_gmac) {
+                               if (priv->plat->has_gmac) {
                                        ctrl |= priv->hw->link.port;
                                        if (phydev->speed == SPEED_100) {
                                                ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@ static void stmmac_adjust_link(struct net_device *dev)
                                } else {
                                        ctrl &= ~priv->hw->link.port;
                                }
-                               if (likely(priv->fix_mac_speed))
-                                       priv->fix_mac_speed(priv->bsp_priv,
-                                                           phydev->speed);
+                               stmmac_hw_fix_mac_speed(priv);
                                break;
                        default:
                                if (netif_msg_link(priv))
@@ -305,7 +313,7 @@ static int stmmac_init_phy(struct net_device *dev)
                return 0;
        }
 
-       snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+       snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
        snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
                 priv->phy_addr);
        pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
  */
 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 {
-       if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
+       if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
                /* In case of GMAC, SF mode has to be enabled
                 * to perform the TX COE. This depends on:
                 * 1) TX COE if actually supported
@@ -814,7 +822,7 @@ static int stmmac_open(struct net_device *dev)
        init_dma_desc_rings(dev);
 
        /* DMA initialization and SW reset */
-       if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
+       if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
                                         priv->dma_tx_phy,
                                         priv->dma_rx_phy) < 0)) {
 
@@ -825,19 +833,17 @@ static int stmmac_open(struct net_device *dev)
        /* Copy the MAC addr into the HW  */
        priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
        /* If required, perform hw setup of the bus. */
-       if (priv->bus_setup)
-               priv->bus_setup(priv->ioaddr);
+       if (priv->plat->bus_setup)
+               priv->plat->bus_setup(priv->ioaddr);
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->ioaddr);
 
        priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
        if (priv->rx_coe)
                pr_info("stmmac: Rx Checksum Offload Engine supported\n");
-       if (priv->tx_coe)
+       if (priv->plat->tx_coe)
                pr_info("\tTX Checksum insertion supported\n");
 
-       priv->shutdown = 0;
-
        /* Initialise the MMC (if present) to disable all interrupts. */
        writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
        writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -1042,7 +1048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                return stmmac_sw_tso(priv, skb);
 
        if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
-               if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
+               if (unlikely((!priv->plat->tx_coe) ||
+                            (priv->no_csum_insertion)))
                        skb_checksum_help(skb);
                else
                        csum_insertion = 1;
@@ -1146,7 +1153,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                                           DMA_FROM_DEVICE);
 
                        (p + entry)->des2 = priv->rx_skbuff_dma[entry];
-                       if (unlikely(priv->is_gmac)) {
+                       if (unlikely(priv->plat->has_gmac)) {
                                if (bfsize >= BUF_SIZE_8KiB)
                                        (p + entry)->des3 =
                                            (p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
                return -EBUSY;
        }
 
-       if (priv->is_gmac)
+       if (priv->plat->has_gmac)
                max_mtu = JUMBO_LEN;
        else
                max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
         * needs to have the Tx COE disabled for oversized frames
         * (due to limited buffer sizes). In this case we disable
         * the TX csum insertionin the TDES and not use SF. */
-       if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
+       if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
                priv->no_csum_insertion = 1;
        else
                priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
                return IRQ_NONE;
        }
 
-       if (priv->is_gmac)
+       if (priv->plat->has_gmac)
                /* To handle GMAC own interrupts */
                priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
 
@@ -1536,7 +1543,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
 
        struct mac_device_info *device;
 
-       if (priv->is_gmac)
+       if (priv->plat->has_gmac)
                device = dwmac1000_setup(priv->ioaddr);
        else
                device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1551,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
        if (!device)
                return -ENOMEM;
 
-       if (priv->enh_desc) {
+       if (priv->plat->enh_desc) {
                device->desc = &enh_desc_ops;
                pr_info("\tEnhanced descriptor structure\n");
        } else
@@ -1598,7 +1605,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
                plat_dat->bus_id);
 
        /* Check that this phy is for the MAC being initialised */
-       if (priv->bus_id != plat_dat->bus_id)
+       if (priv->plat->bus_id != plat_dat->bus_id)
                return 0;
 
        /* OK, this PHY is connected to the MAC.
@@ -1634,7 +1641,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *addr = NULL;
        struct net_device *ndev = NULL;
-       struct stmmac_priv *priv;
+       struct stmmac_priv *priv = NULL;
        struct plat_stmmacenet_data *plat_dat;
 
        pr_info("STMMAC driver:\n\tplatform registration... ");
@@ -1683,13 +1690,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
        priv->device = &(pdev->dev);
        priv->dev = ndev;
        plat_dat = pdev->dev.platform_data;
-       priv->bus_id = plat_dat->bus_id;
-       priv->pbl = plat_dat->pbl;      /* TLI */
-       priv->mii_clk_csr = plat_dat->clk_csr;
-       priv->tx_coe = plat_dat->tx_coe;
-       priv->bugged_jumbo = plat_dat->bugged_jumbo;
-       priv->is_gmac = plat_dat->has_gmac;     /* GMAC is on board */
-       priv->enh_desc = plat_dat->enh_desc;
+
+       priv->plat = plat_dat;
+
        priv->ioaddr = addr;
 
        /* PMT module is not integrated in all the MAC devices. */
@@ -1703,10 +1706,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
        /* Set the I/O base addr */
        ndev->base_addr = (unsigned long)addr;
 
-       /* Verify embedded resource for the platform */
-       ret = stmmac_claim_resource(pdev);
-       if (ret < 0)
-               goto out;
+       /* Custom initialisation */
+       if (priv->plat->init) {
+               ret = priv->plat->init(pdev);
+               if (unlikely(ret))
+                       goto out;
+       }
 
        /* MAC HW revice detection */
        ret = stmmac_mac_device_setup(ndev);
@@ -1727,16 +1732,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
                goto out;
        }
 
-       priv->fix_mac_speed = plat_dat->fix_mac_speed;
-       priv->bus_setup = plat_dat->bus_setup;
-       priv->bsp_priv = plat_dat->bsp_priv;
-
        pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
               "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
               pdev->id, ndev->irq, addr);
 
        /* MDIO bus Registration */
-       pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
+       pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
        ret = stmmac_mdio_register(ndev);
        if (ret < 0)
                goto out;
@@ -1744,6 +1745,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
 
 out:
        if (ret < 0) {
+               if (priv->plat->exit)
+                       priv->plat->exit(pdev);
+
                platform_set_drvdata(pdev, NULL);
                release_mem_region(res->start, resource_size(res));
                if (addr != NULL)
@@ -1777,6 +1781,9 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
 
        stmmac_mdio_unregister(ndev);
 
+       if (priv->plat->exit)
+               priv->plat->exit(pdev);
+
        platform_set_drvdata(pdev, NULL);
        unregister_netdev(ndev);
 
@@ -1790,69 +1797,54 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
-static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
+static int stmmac_suspend(struct device *dev)
 {
-       struct net_device *dev = platform_get_drvdata(pdev);
-       struct stmmac_priv *priv = netdev_priv(dev);
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
        int dis_ic = 0;
 
-       if (!dev || !netif_running(dev))
+       if (!ndev || !netif_running(ndev))
                return 0;
 
        spin_lock(&priv->lock);
 
-       if (state.event == PM_EVENT_SUSPEND) {
-               netif_device_detach(dev);
-               netif_stop_queue(dev);
-               if (priv->phydev)
-                       phy_stop(priv->phydev);
+       netif_device_detach(ndev);
+       netif_stop_queue(ndev);
+       if (priv->phydev)
+               phy_stop(priv->phydev);
 
 #ifdef CONFIG_STMMAC_TIMER
-               priv->tm->timer_stop();
-               if (likely(priv->tm->enable))
-                       dis_ic = 1;
+       priv->tm->timer_stop();
+       if (likely(priv->tm->enable))
+               dis_ic = 1;
 #endif
-               napi_disable(&priv->napi);
-
-               /* Stop TX/RX DMA */
-               priv->hw->dma->stop_tx(priv->ioaddr);
-               priv->hw->dma->stop_rx(priv->ioaddr);
-               /* Clear the Rx/Tx descriptors */
-               priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
-                                            dis_ic);
-               priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
-
-               /* Enable Power down mode by programming the PMT regs */
-               if (device_can_wakeup(priv->device))
-                       priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
-               else
-                       stmmac_disable_mac(priv->ioaddr);
-       } else {
-               priv->shutdown = 1;
-               /* Although this can appear slightly redundant it actually
-                * makes fast the standby operation and guarantees the driver
-                * working if hibernation is on media. */
-               stmmac_release(dev);
-       }
+       napi_disable(&priv->napi);
+
+       /* Stop TX/RX DMA */
+       priv->hw->dma->stop_tx(priv->ioaddr);
+       priv->hw->dma->stop_rx(priv->ioaddr);
+       /* Clear the Rx/Tx descriptors */
+       priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+                                    dis_ic);
+       priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+
+       /* Enable Power down mode by programming the PMT regs */
+       if (device_may_wakeup(priv->device))
+               priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+       else
+               stmmac_disable_mac(priv->ioaddr);
 
        spin_unlock(&priv->lock);
        return 0;
 }
 
-static int stmmac_resume(struct platform_device *pdev)
+static int stmmac_resume(struct device *dev)
 {
-       struct net_device *dev = platform_get_drvdata(pdev);
-       struct stmmac_priv *priv = netdev_priv(dev);
-
-       if (!netif_running(dev))
-               return 0;
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
 
-       if (priv->shutdown) {
-               /* Re-open the interface and re-init the MAC/DMA
-                  and the rings (i.e. on hibernation stage) */
-               stmmac_open(dev);
+       if (!netif_running(ndev))
                return 0;
-       }
 
        spin_lock(&priv->lock);
 
@@ -1861,10 +1853,10 @@ static int stmmac_resume(struct platform_device *pdev)
         * is received. Anyway, it's better to manually clear
         * this bit because it can generate problems while resuming
         * from another devices (e.g. serial console). */
-       if (device_can_wakeup(priv->device))
+       if (device_may_wakeup(priv->device))
                priv->hw->mac->pmt(priv->ioaddr, 0);
 
-       netif_device_attach(dev);
+       netif_device_attach(ndev);
 
        /* Enable the MAC and DMA */
        stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1864,59 @@ static int stmmac_resume(struct platform_device *pdev)
        priv->hw->dma->start_rx(priv->ioaddr);
 
 #ifdef CONFIG_STMMAC_TIMER
-       priv->tm->timer_start(tmrate);
+       if (likely(priv->tm->enable))
+               priv->tm->timer_start(tmrate);
 #endif
        napi_enable(&priv->napi);
 
        if (priv->phydev)
                phy_start(priv->phydev);
 
-       netif_start_queue(dev);
+       netif_start_queue(ndev);
 
        spin_unlock(&priv->lock);
        return 0;
 }
-#endif
 
-static struct platform_driver stmmac_driver = {
-       .driver = {
-                  .name = STMMAC_RESOURCE_NAME,
-                  },
-       .probe = stmmac_dvr_probe,
-       .remove = stmmac_dvr_remove,
-#ifdef CONFIG_PM
+static int stmmac_freeze(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       if (!ndev || !netif_running(ndev))
+               return 0;
+
+       return stmmac_release(ndev);
+}
+
+static int stmmac_restore(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       if (!ndev || !netif_running(ndev))
+               return 0;
+
+       return stmmac_open(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pm_ops = {
        .suspend = stmmac_suspend,
        .resume = stmmac_resume,
-#endif
+       .freeze = stmmac_freeze,
+       .thaw = stmmac_restore,
+       .restore = stmmac_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pm_ops;
+#endif /* CONFIG_PM */
 
+static struct platform_driver stmmac_driver = {
+       .probe = stmmac_dvr_probe,
+       .remove = stmmac_dvr_remove,
+       .driver = {
+               .name = STMMAC_RESOURCE_NAME,
+               .owner = THIS_MODULE,
+               .pm = &stmmac_pm_ops,
+       },
 };
 
 /**
index d744161..234b406 100644 (file)
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
        int data;
        u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
                        ((phyreg << 6) & (0x000007C0)));
-       regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+       regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
 
        do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
        writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
            (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
            | MII_WRITE;
 
-       value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+       value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
 
 
        /* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
 
        if (priv->phy_reset) {
                pr_debug("stmmac_mdio_reset: calling phy_reset\n");
-               priv->phy_reset(priv->bsp_priv);
+               priv->phy_reset(priv->plat->bsp_priv);
        }
 
        /* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@ int stmmac_mdio_register(struct net_device *ndev)
        new_bus->read = &stmmac_mdio_read;
        new_bus->write = &stmmac_mdio_write;
        new_bus->reset = &stmmac_mdio_reset;
-       snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
        new_bus->priv = ndev;
        new_bus->irq = irqlist;
        new_bus->phy_mask = priv->phy_mask;
index 30ccbb6..afb79db 100644 (file)
@@ -2728,12 +2728,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
                     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
                        mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
 
-               if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
-                       mac_mode |= tp->mac_mode &
-                                   (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
-                       if (mac_mode & MAC_MODE_APE_TX_EN)
-                               mac_mode |= MAC_MODE_TDE_ENABLE;
-               }
+               if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+                       mac_mode |= MAC_MODE_APE_TX_EN |
+                                   MAC_MODE_APE_RX_EN |
+                                   MAC_MODE_TDE_ENABLE;
 
                tw32_f(MAC_MODE, mac_mode);
                udelay(100);
@@ -6339,13 +6337,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
        kfree(tpr->rx_jmb_buffers);
        tpr->rx_jmb_buffers = NULL;
        if (tpr->rx_std) {
-               pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
-                                   tpr->rx_std, tpr->rx_std_mapping);
+               dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+                                 tpr->rx_std, tpr->rx_std_mapping);
                tpr->rx_std = NULL;
        }
        if (tpr->rx_jmb) {
-               pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
-                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
+               dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+                                 tpr->rx_jmb, tpr->rx_jmb_mapping);
                tpr->rx_jmb = NULL;
        }
 }
@@ -6358,8 +6356,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
        if (!tpr->rx_std_buffers)
                return -ENOMEM;
 
-       tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
-                                          &tpr->rx_std_mapping);
+       tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+                                        TG3_RX_STD_RING_BYTES(tp),
+                                        &tpr->rx_std_mapping,
+                                        GFP_KERNEL);
        if (!tpr->rx_std)
                goto err_out;
 
@@ -6370,9 +6370,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
                if (!tpr->rx_jmb_buffers)
                        goto err_out;
 
-               tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
-                                                  TG3_RX_JMB_RING_BYTES(tp),
-                                                  &tpr->rx_jmb_mapping);
+               tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+                                                TG3_RX_JMB_RING_BYTES(tp),
+                                                &tpr->rx_jmb_mapping,
+                                                GFP_KERNEL);
                if (!tpr->rx_jmb)
                        goto err_out;
        }
@@ -6491,7 +6492,7 @@ static void tg3_free_consistent(struct tg3 *tp)
                struct tg3_napi *tnapi = &tp->napi[i];
 
                if (tnapi->tx_ring) {
-                       pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
+                       dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
                                tnapi->tx_ring, tnapi->tx_desc_mapping);
                        tnapi->tx_ring = NULL;
                }
@@ -6500,25 +6501,26 @@ static void tg3_free_consistent(struct tg3 *tp)
                tnapi->tx_buffers = NULL;
 
                if (tnapi->rx_rcb) {
-                       pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
-                                           tnapi->rx_rcb,
-                                           tnapi->rx_rcb_mapping);
+                       dma_free_coherent(&tp->pdev->dev,
+                                         TG3_RX_RCB_RING_BYTES(tp),
+                                         tnapi->rx_rcb,
+                                         tnapi->rx_rcb_mapping);
                        tnapi->rx_rcb = NULL;
                }
 
                tg3_rx_prodring_fini(tp, &tnapi->prodring);
 
                if (tnapi->hw_status) {
-                       pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
-                                           tnapi->hw_status,
-                                           tnapi->status_mapping);
+                       dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+                                         tnapi->hw_status,
+                                         tnapi->status_mapping);
                        tnapi->hw_status = NULL;
                }
        }
 
        if (tp->hw_stats) {
-               pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
-                                   tp->hw_stats, tp->stats_mapping);
+               dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+                                 tp->hw_stats, tp->stats_mapping);
                tp->hw_stats = NULL;
        }
 }
@@ -6531,9 +6533,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 {
        int i;
 
-       tp->hw_stats = pci_alloc_consistent(tp->pdev,
-                                           sizeof(struct tg3_hw_stats),
-                                           &tp->stats_mapping);
+       tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+                                         sizeof(struct tg3_hw_stats),
+                                         &tp->stats_mapping,
+                                         GFP_KERNEL);
        if (!tp->hw_stats)
                goto err_out;
 
@@ -6543,9 +6546,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                struct tg3_napi *tnapi = &tp->napi[i];
                struct tg3_hw_status *sblk;
 
-               tnapi->hw_status = pci_alloc_consistent(tp->pdev,
-                                                       TG3_HW_STATUS_SIZE,
-                                                       &tnapi->status_mapping);
+               tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+                                                     TG3_HW_STATUS_SIZE,
+                                                     &tnapi->status_mapping,
+                                                     GFP_KERNEL);
                if (!tnapi->hw_status)
                        goto err_out;
 
@@ -6566,9 +6570,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                        if (!tnapi->tx_buffers)
                                goto err_out;
 
-                       tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
-                                                             TG3_TX_RING_BYTES,
-                                                      &tnapi->tx_desc_mapping);
+                       tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+                                                           TG3_TX_RING_BYTES,
+                                                       &tnapi->tx_desc_mapping,
+                                                           GFP_KERNEL);
                        if (!tnapi->tx_ring)
                                goto err_out;
                }
@@ -6601,9 +6606,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
                        continue;
 
-               tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
-                                                    TG3_RX_RCB_RING_BYTES(tp),
-                                                    &tnapi->rx_rcb_mapping);
+               tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+                                                  TG3_RX_RCB_RING_BYTES(tp),
+                                                  &tnapi->rx_rcb_mapping,
+                                                  GFP_KERNEL);
                if (!tnapi->rx_rcb)
                        goto err_out;
 
@@ -6987,7 +6993,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
                if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
-                       pcie_set_readrq(tp->pdev, 4096);
+                       pcie_set_readrq(tp->pdev, tp->pcie_readrq);
                else {
                        pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
                                              tp->pci_cacheline_sz);
@@ -7181,7 +7187,7 @@ static int tg3_chip_reset(struct tg3 *tp)
                                      tp->pcie_cap + PCI_EXP_DEVCTL,
                                      val16);
 
-               pcie_set_readrq(tp->pdev, 4096);
+               pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 
                /* Clear error status */
                pci_write_config_word(tp->pdev,
@@ -7222,19 +7228,21 @@ static int tg3_chip_reset(struct tg3 *tp)
                tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
        }
 
+       if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+               tp->mac_mode = MAC_MODE_APE_TX_EN |
+                              MAC_MODE_APE_RX_EN |
+                              MAC_MODE_TDE_ENABLE;
+
        if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
-               tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
-               tw32_f(MAC_MODE, tp->mac_mode);
+               tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+               val = tp->mac_mode;
        } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
-               tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
-               tw32_f(MAC_MODE, tp->mac_mode);
-       } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
-               tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
-               if (tp->mac_mode & MAC_MODE_APE_TX_EN)
-                       tp->mac_mode |= MAC_MODE_TDE_ENABLE;
-               tw32_f(MAC_MODE, tp->mac_mode);
+               tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+               val = tp->mac_mode;
        } else
-               tw32_f(MAC_MODE, 0);
+               val = 0;
+
+       tw32_f(MAC_MODE, val);
        udelay(40);
 
        tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7860,18 +7868,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(GRC_MODE, grc_mode);
        }
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
-               u32 grc_mode = tr32(GRC_MODE);
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+               if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+                       u32 grc_mode = tr32(GRC_MODE);
 
-               /* Access the lower 1K of PL PCIE block registers. */
-               val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
-               tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+                       /* Access the lower 1K of PL PCIE block registers. */
+                       val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+                       tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
 
-               val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
-               tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
-                    val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+                       val = tr32(TG3_PCIE_TLDLPL_PORT +
+                                  TG3_PCIE_PL_LO_PHYCTL5);
+                       tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+                            val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
 
-               tw32(GRC_MODE, grc_mode);
+                       tw32(GRC_MODE, grc_mode);
+               }
 
                val = tr32(TG3_CPMU_LSPD_10MB_CLK);
                val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -8162,8 +8173,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
                      RDMAC_MODE_LNGREAD_ENAB);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
                rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8203,6 +8213,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
            (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
                val = tr32(TG3_RDMA_RSRVCTRL_REG);
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+                       val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
+                       val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+               }
                tw32(TG3_RDMA_RSRVCTRL_REG,
                     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
        }
@@ -8280,7 +8294,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        }
 
        if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
-               tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+               tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
        else
                tp->mac_mode = 0;
        tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -9031,8 +9045,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
                pci_disable_msix(tp->pdev);
                return false;
        }
-       if (tp->irq_cnt > 1)
+
+       if (tp->irq_cnt > 1) {
                tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+                       tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+                       netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
+               }
+       }
 
        return true;
 }
@@ -12411,8 +12431,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                if (cfg2 & (1 << 18))
                        tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
 
-               if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
-                     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+               if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
+                   ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+                     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
                    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
                        tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
 
@@ -13359,7 +13380,45 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
                tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
 
-               pcie_set_readrq(tp->pdev, 4096);
+               tp->pcie_readrq = 4096;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+                       u16 word;
+
+                       pci_read_config_word(tp->pdev,
+                                            tp->pcie_cap + PCI_EXP_LNKSTA,
+                                            &word);
+                       switch (word & PCI_EXP_LNKSTA_CLS) {
+                       case PCI_EXP_LNKSTA_CLS_2_5GB:
+                               word &= PCI_EXP_LNKSTA_NLW;
+                               word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
+                               switch (word) {
+                               case 2:
+                                       tp->pcie_readrq = 2048;
+                                       break;
+                               case 4:
+                                       tp->pcie_readrq = 1024;
+                                       break;
+                               }
+                               break;
+
+                       case PCI_EXP_LNKSTA_CLS_5_0GB:
+                               word &= PCI_EXP_LNKSTA_NLW;
+                               word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
+                               switch (word) {
+                               case 1:
+                                       tp->pcie_readrq = 2048;
+                                       break;
+                               case 2:
+                                       tp->pcie_readrq = 1024;
+                                       break;
+                               case 4:
+                                       tp->pcie_readrq = 512;
+                                       break;
+                               }
+                       }
+               }
+
+               pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 
                pci_read_config_word(tp->pdev,
                                     tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13722,8 +13781,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
        /* Preserve the APE MAC_MODE bits */
        if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
-               tp->mac_mode = tr32(MAC_MODE) |
-                              MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+               tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
        else
                tp->mac_mode = TG3_DEF_MAC_MODE;
 
@@ -14159,7 +14217,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
        u32 *buf, saved_dma_rwctrl;
        int ret = 0;
 
-       buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
+       buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+                                &buf_dma, GFP_KERNEL);
        if (!buf) {
                ret = -ENOMEM;
                goto out_nofree;
@@ -14343,7 +14402,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
        }
 
 out:
-       pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
+       dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
 out_nofree:
        return ret;
 }
index 4a19748..59b0e09 100644 (file)
 
 #define TG3_RDMA_RSRVCTRL_REG          0x00004900
 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX         0x00000004
+#define TG3_RDMA_RSRVCTRL_TXMRGN_320B   0x28000000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK   0xffe00000
 /* 0x4904 --> 0x4910 unused */
 
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL     0x00004910
@@ -2562,10 +2564,6 @@ struct ring_info {
        DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
-struct tg3_config_info {
-       u32                             flags;
-};
-
 struct tg3_link_config {
        /* Describes what we're trying to get. */
        u32                             advertising;
@@ -2713,17 +2711,17 @@ struct tg3_napi {
        u32                             last_irq_tag;
        u32                             int_mbox;
        u32                             coal_now;
-       u32                             tx_prod;
-       u32                             tx_cons;
-       u32                             tx_pending;
-       u32                             prodmbox;
 
-       u32                             consmbox;
+       u32                             consmbox ____cacheline_aligned;
        u32                             rx_rcb_ptr;
        u16                             *rx_rcb_prod_idx;
        struct tg3_rx_prodring_set      prodring;
-
        struct tg3_rx_buffer_desc       *rx_rcb;
+
+       u32                             tx_prod ____cacheline_aligned;
+       u32                             tx_cons;
+       u32                             tx_pending;
+       u32                             prodmbox;
        struct tg3_tx_buffer_desc       *tx_ring;
        struct ring_info                *tx_buffers;
 
@@ -2946,6 +2944,7 @@ struct tg3 {
        int                             pcix_cap;
        int                             pcie_cap;
        };
+       int                             pcie_readrq;
 
        struct mii_bus                  *mdio_bus;
        int                             mdio_irq[PHY_MAX_ADDR];
index 9ddaea6..8e17fc8 100644 (file)
@@ -553,7 +553,7 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
        return -EOPNOTSUPP;
 }
 
-
+#ifdef VMXNET3_RSS
 static int
 vmxnet3_get_rss_indir(struct net_device *netdev,
                      struct ethtool_rxfh_indir *p)
@@ -598,6 +598,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
        return 0;
 
 }
+#endif
 
 static struct ethtool_ops vmxnet3_ethtool_ops = {
        .get_settings      = vmxnet3_get_settings,
@@ -623,8 +624,10 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
        .get_ringparam     = vmxnet3_get_ringparam,
        .set_ringparam     = vmxnet3_set_ringparam,
        .get_rxnfc         = vmxnet3_get_rxnfc,
+#ifdef VMXNET3_RSS
        .get_rxfh_indir    = vmxnet3_get_rss_indir,
        .set_rxfh_indir    = vmxnet3_set_rss_indir,
+#endif
 };
 
 void vmxnet3_set_ethtool_ops(struct net_device *netdev)
index 409c2e6..a0241fe 100644 (file)
@@ -1219,14 +1219,12 @@ vxge_hw_device_initialize(
        if (status != VXGE_HW_OK)
                goto exit;
 
-       hldev = (struct __vxge_hw_device *)
-                       vmalloc(sizeof(struct __vxge_hw_device));
+       hldev = vzalloc(sizeof(struct __vxge_hw_device));
        if (hldev == NULL) {
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
                goto exit;
        }
 
-       memset(hldev, 0, sizeof(struct __vxge_hw_device));
        hldev->magic = VXGE_HW_DEVICE_MAGIC;
 
        vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -2064,15 +2062,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
                 * allocate new memblock and its private part at once.
                 * This helps to minimize memory usage a lot. */
                mempool->memblocks_priv_arr[i] =
-                               vmalloc(mempool->items_priv_size * n_items);
+                               vzalloc(mempool->items_priv_size * n_items);
                if (mempool->memblocks_priv_arr[i] == NULL) {
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
                        goto exit;
                }
 
-               memset(mempool->memblocks_priv_arr[i], 0,
-                            mempool->items_priv_size * n_items);
-
                /* allocate DMA-capable memblock */
                mempool->memblocks_arr[i] =
                        __vxge_hw_blockpool_malloc(mempool->devh,
@@ -2144,13 +2139,11 @@ __vxge_hw_mempool_create(
                goto exit;
        }
 
-       mempool = (struct vxge_hw_mempool *)
-                       vmalloc(sizeof(struct vxge_hw_mempool));
+       mempool = vzalloc(sizeof(struct vxge_hw_mempool));
        if (mempool == NULL) {
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
                goto exit;
        }
-       memset(mempool, 0, sizeof(struct vxge_hw_mempool));
 
        mempool->devh                   = devh;
        mempool->memblock_size          = memblock_size;
@@ -2170,31 +2163,27 @@ __vxge_hw_mempool_create(
 
        /* allocate array of memblocks */
        mempool->memblocks_arr =
-               (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
+               vzalloc(sizeof(void *) * mempool->memblocks_max);
        if (mempool->memblocks_arr == NULL) {
                __vxge_hw_mempool_destroy(mempool);
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
                mempool = NULL;
                goto exit;
        }
-       memset(mempool->memblocks_arr, 0,
-               sizeof(void *) * mempool->memblocks_max);
 
        /* allocate array of private parts of items per memblocks */
        mempool->memblocks_priv_arr =
-               (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
+               vzalloc(sizeof(void *) * mempool->memblocks_max);
        if (mempool->memblocks_priv_arr == NULL) {
                __vxge_hw_mempool_destroy(mempool);
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
                mempool = NULL;
                goto exit;
        }
-       memset(mempool->memblocks_priv_arr, 0,
-                   sizeof(void *) * mempool->memblocks_max);
 
        /* allocate array of memblocks DMA objects */
-       mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
-               vmalloc(sizeof(struct vxge_hw_mempool_dma) *
+       mempool->memblocks_dma_arr =
+               vzalloc(sizeof(struct vxge_hw_mempool_dma) *
                        mempool->memblocks_max);
 
        if (mempool->memblocks_dma_arr == NULL) {
@@ -2203,20 +2192,15 @@ __vxge_hw_mempool_create(
                mempool = NULL;
                goto exit;
        }
-       memset(mempool->memblocks_dma_arr, 0,
-                       sizeof(struct vxge_hw_mempool_dma) *
-                       mempool->memblocks_max);
 
        /* allocate hash array of items */
-       mempool->items_arr =
-               (void **) vmalloc(sizeof(void *) * mempool->items_max);
+       mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
        if (mempool->items_arr == NULL) {
                __vxge_hw_mempool_destroy(mempool);
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
                mempool = NULL;
                goto exit;
        }
-       memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
 
        /* calculate initial number of memblocks */
        memblocks_to_allocate = (mempool->items_initial +
@@ -4271,15 +4255,12 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
        if (status != VXGE_HW_OK)
                goto vpath_open_exit1;
 
-       vp = (struct __vxge_hw_vpath_handle *)
-               vmalloc(sizeof(struct __vxge_hw_vpath_handle));
+       vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
        if (vp == NULL) {
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
                goto vpath_open_exit2;
        }
 
-       memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
-
        vp->vpath = vpath;
 
        if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -5080,8 +5061,7 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
                                item);
 
        if (entry == NULL)
-               entry = (struct __vxge_hw_blockpool_entry *)
-                       vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
+               entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
        else
                list_del(&entry->item);
 
@@ -5197,8 +5177,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
                                        item);
 
                if (entry == NULL)
-                       entry = (struct __vxge_hw_blockpool_entry *)
-                               vmalloc(sizeof(
+                       entry = vmalloc(sizeof(
                                        struct __vxge_hw_blockpool_entry));
                else
                        list_del(&entry->item);
index 5cba4a6..a21dae1 100644 (file)
@@ -4602,9 +4602,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        /* Copy the station mac address to the list */
        for (i = 0; i < vdev->no_of_vpath; i++) {
-               entry = (struct vxge_mac_addrs *)
-                               kzalloc(sizeof(struct vxge_mac_addrs),
-                                       GFP_KERNEL);
+               entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
                if (NULL == entry) {
                        vxge_debug_init(VXGE_ERR,
                                "%s: mac_addr_list : memory allocation failed",
index 30f8d40..6a9b660 100644 (file)
@@ -117,6 +117,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
 
        /* Allocate a single memory block for values and addresses. */
        count16 = 2*count;
+       /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
        a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
                                   GFP_KERNEL);
        if (!a16) {
index 0f19d54..c9f13b9 100644 (file)
@@ -1188,7 +1188,8 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
        spin_lock_irqsave(&card->ipm_lock, flags);
        list_for_each(l, &card->ipm_list) {
                ipm = list_entry(l, struct lcs_ipm_list, list);
-               for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
+               for (im4 = rcu_dereference(in4_dev->mc_list);
+                    im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
                        lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
                        if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
                             (memcmp(buf, &ipm->ipm.mac_addr,
@@ -1233,7 +1234,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
        unsigned long flags;
 
        LCS_DBF_TEXT(4, trace, "setmclst");
-       for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
+       for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+            im4 = rcu_dereference(im4->next_rcu)) {
                lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
                ipm = lcs_check_addr_entry(card, im4, buf);
                if (ipm != NULL)
@@ -1269,10 +1271,10 @@ lcs_register_mc_addresses(void *data)
        in4_dev = in_dev_get(card->dev);
        if (in4_dev == NULL)
                goto out;
-       read_lock(&in4_dev->mc_list_lock);
+       rcu_read_lock();
        lcs_remove_mc_addresses(card,in4_dev);
        lcs_set_mc_addresses(card, in4_dev);
-       read_unlock(&in4_dev->mc_list_lock);
+       rcu_read_unlock();
        in_dev_put(in4_dev);
 
        netif_carrier_off(card->dev);
index 42fa783..b5e967c 100644 (file)
@@ -372,7 +372,7 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
        i = simple_strtoul(buf, &tmp, 16);
        if ((i == 0) || (i == 1)) {
                if (i == card->options.performance_stats)
-                       goto out;;
+                       goto out;
                card->options.performance_stats = i;
                if (i == 0)
                        memset(&card->perf_stats, 0,
index 847e879..7a7a1b6 100644 (file)
@@ -849,8 +849,6 @@ static int qeth_l2_open(struct net_device *dev)
        card->state = CARD_STATE_UP;
        netif_start_queue(dev);
 
-       if (!card->lan_online && netif_carrier_ok(dev))
-               netif_carrier_off(dev);
        if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
                napi_enable(&card->napi);
                napi_schedule(&card->napi);
@@ -1013,13 +1011,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                        dev_warn(&card->gdev->dev,
                                "The LAN is offline\n");
                        card->lan_online = 0;
-                       goto out;
+                       goto contin;
                }
                rc = -ENODEV;
                goto out_remove;
        } else
                card->lan_online = 1;
 
+contin:
        if ((card->info.type == QETH_CARD_TYPE_OSD) ||
            (card->info.type == QETH_CARD_TYPE_OSX))
                /* configure isolation level */
@@ -1038,7 +1037,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                goto out_remove;
        }
        card->state = CARD_STATE_SOFTSETUP;
-       netif_carrier_on(card->dev);
+       if (card->lan_online)
+               netif_carrier_on(card->dev);
+       else
+               netif_carrier_off(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
        if (recover_flag == CARD_STATE_RECOVER) {
@@ -1055,7 +1057,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
-out:
        mutex_unlock(&card->conf_mutex);
        mutex_unlock(&card->discipline_mutex);
        return 0;
index 74d1401..a1abb37 100644 (file)
@@ -1796,7 +1796,8 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
        char buf[MAX_ADDR_LEN];
 
        QETH_CARD_TEXT(card, 4, "addmc");
-       for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
+       for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+            im4 = rcu_dereference(im4->next_rcu)) {
                qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
                ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
                if (!ipm)
@@ -1828,9 +1829,9 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
                in_dev = in_dev_get(netdev);
                if (!in_dev)
                        continue;
-               read_lock(&in_dev->mc_list_lock);
+               rcu_read_lock();
                qeth_l3_add_mc(card, in_dev);
-               read_unlock(&in_dev->mc_list_lock);
+               rcu_read_unlock();
                in_dev_put(in_dev);
        }
 }
@@ -1843,10 +1844,10 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
        in4_dev = in_dev_get(card->dev);
        if (in4_dev == NULL)
                return;
-       read_lock(&in4_dev->mc_list_lock);
+       rcu_read_lock();
        qeth_l3_add_mc(card, in4_dev);
        qeth_l3_add_vlan_mc(card);
-       read_unlock(&in4_dev->mc_list_lock);
+       rcu_read_unlock();
        in_dev_put(in4_dev);
 }
 
@@ -2938,6 +2939,7 @@ static void qeth_tso_fill_header(struct qeth_card *card,
 
        /*fix header to TSO values ...*/
        hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
+       hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
        /*set values which are fix for the first approach ...*/
        hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
        hdr->ext.imb_hdr_no  = 1;
@@ -3176,8 +3178,6 @@ static int qeth_l3_open(struct net_device *dev)
        card->state = CARD_STATE_UP;
        netif_start_queue(dev);
 
-       if (!card->lan_online && netif_carrier_ok(dev))
-               netif_carrier_off(dev);
        if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
                napi_enable(&card->napi);
                napi_schedule(&card->napi);
@@ -3449,13 +3449,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                        dev_warn(&card->gdev->dev,
                                "The LAN is offline\n");
                        card->lan_online = 0;
-                       goto out;
+                       goto contin;
                }
                rc = -ENODEV;
                goto out_remove;
        } else
                card->lan_online = 1;
 
+contin:
        rc = qeth_l3_setadapter_parms(card);
        if (rc)
                QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
@@ -3480,10 +3481,13 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                goto out_remove;
        }
        card->state = CARD_STATE_SOFTSETUP;
-       netif_carrier_on(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
        qeth_l3_set_ip_addr_list(card);
+       if (card->lan_online)
+               netif_carrier_on(card->dev);
+       else
+               netif_carrier_off(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                if (recovery_mode)
                        qeth_l3_open(card->dev);
@@ -3496,7 +3500,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
-out:
        mutex_unlock(&card->conf_mutex);
        mutex_unlock(&card->discipline_mutex);
        return 0;
index 8e429d0..0c99776 100644 (file)
@@ -364,7 +364,7 @@ struct ipv6_pinfo {
 
        __u32                   dst_cookie;
 
-       struct ipv6_mc_socklist *ipv6_mc_list;
+       struct ipv6_mc_socklist __rcu *ipv6_mc_list;
        struct ipv6_ac_socklist *ipv6_ac_list;
        struct ipv6_fl_socklist *ipv6_fl_list;
 
index b45c1b8..4b0c7f3 100644 (file)
@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n)
 enum netdev_queue_state_t {
        __QUEUE_STATE_XOFF,
        __QUEUE_STATE_FROZEN,
+#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)          | \
+                                   (1 << __QUEUE_STATE_FROZEN))
 };
 
 struct netdev_queue {
@@ -503,6 +505,10 @@ struct netdev_queue {
        struct Qdisc            *qdisc;
        unsigned long           state;
        struct Qdisc            *qdisc_sleeping;
+#ifdef CONFIG_RPS
+       struct kobject          kobj;
+#endif
+
 /*
  * write mostly part
  */
@@ -596,6 +602,32 @@ struct netdev_rx_queue {
 } ____cacheline_aligned_in_smp;
 #endif /* CONFIG_RPS */
 
+#ifdef CONFIG_XPS
+/*
+ * This structure holds an XPS map which can be of variable length.  The
+ * map is an array of queues.
+ */
+struct xps_map {
+       unsigned int len;
+       unsigned int alloc_len;
+       struct rcu_head rcu;
+       u16 queues[0];
+};
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
+#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))   \
+    / sizeof(u16))
+
+/*
+ * This structure holds all XPS maps for device.  Maps are indexed by CPU.
+ */
+struct xps_dev_maps {
+       struct rcu_head rcu;
+       struct xps_map __rcu *cpu_map[0];
+};
+#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +               \
+    (nr_cpu_ids * sizeof(struct xps_map *)))
+#endif /* CONFIG_XPS */
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -1016,6 +1048,10 @@ struct net_device {
        unsigned long           tx_queue_len;   /* Max frames per queue allowed */
        spinlock_t              tx_global_lock;
 
+#ifdef CONFIG_XPS
+       struct xps_dev_maps __rcu *xps_maps;
+#endif
+
        /* These may be needed for future network-power-down code. */
 
        /*
@@ -1599,9 +1635,9 @@ static inline int netif_queue_stopped(const struct net_device *dev)
        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 }
 
-static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
+static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
 {
-       return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
+       return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
 }
 
 /**
index e6ba898..19f37a6 100644 (file)
@@ -386,9 +386,10 @@ struct sk_buff {
 #else
        __u8                    deliver_no_wcard:1;
 #endif
+       __u8                    ooo_okay:1;
        kmemcheck_bitfield_end(flags2);
 
-       /* 0/14 bit hole */
+       /* 0/13 bit hole */
 
 #ifdef CONFIG_NET_DMA
        dma_cookie_t            dma_cookie;
index d66c617..e103529 100644 (file)
@@ -40,9 +40,9 @@ struct plat_stmmacenet_data {
        int pmt;
        void (*fix_mac_speed)(void *priv, unsigned int speed);
        void (*bus_setup)(void __iomem *ioaddr);
-#ifdef CONFIG_STM_DRIVERS
-       struct stm_pad_config *pad_config;
-#endif
+       int (*init)(struct platform_device *pdev);
+       void (*exit)(struct platform_device *pdev);
+       void *custom_cfg;
        void *bsp_priv;
 };
 
index f95ff8d..04977ee 100644 (file)
@@ -89,10 +89,11 @@ struct ip6_sf_socklist {
 struct ipv6_mc_socklist {
        struct in6_addr         addr;
        int                     ifindex;
-       struct ipv6_mc_socklist *next;
+       struct ipv6_mc_socklist __rcu *next;
        rwlock_t                sflock;
        unsigned int            sfmode;         /* MCAST_{INCLUDE,EXCLUDE} */
        struct ip6_sf_socklist  *sflist;
+       struct rcu_head         rcu;
 };
 
 struct ip6_sf_list {
index 35be0bb..4093ca7 100644 (file)
@@ -92,8 +92,10 @@ extern void  rtnl_link_unregister(struct rtnl_link_ops *ops);
  *                    specific netlink attributes.
  *     @get_link_af_size: Function to calculate size of address family specific
  *                        netlink attributes exlusive the container attribute.
- *     @parse_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
- *                     net_device accordingly.
+ *     @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
+ *                        for invalid configuration settings.
+ *     @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
+ *                   net_device accordingly.
  */
 struct rtnl_af_ops {
        struct list_head        list;
@@ -103,8 +105,10 @@ struct rtnl_af_ops {
                                                const struct net_device *dev);
        size_t                  (*get_link_af_size)(const struct net_device *dev);
 
-       int                     (*parse_link_af)(struct net_device *dev,
-                                                const struct nlattr *attr);
+       int                     (*validate_link_af)(const struct net_device *dev,
+                                                   const struct nlattr *attr);
+       int                     (*set_link_af)(struct net_device *dev,
+                                              const struct nlattr *attr);
 };
 
 extern int     __rtnl_af_register(struct rtnl_af_ops *ops);
index 3165650..745460f 100644 (file)
 /* Well, we should have at least one descriptor open
  * to accept passed FDs 8)
  */
-#define SCM_MAX_FD     255
+#define SCM_MAX_FD     253
 
 struct scm_fp_list {
        struct list_head        list;
-       int                     count;
+       short                   count;
+       short                   max;
        struct file             *fp[SCM_MAX_FD];
 };
 
index 2c55a7e..c01dc99 100644 (file)
@@ -111,9 +111,6 @@ typedef enum {
        SCTP_CMD_LAST
 } sctp_verb_t;
 
-#define SCTP_CMD_MAX           (SCTP_CMD_LAST - 1)
-#define SCTP_CMD_NUM_VERBS     (SCTP_CMD_MAX + 1)
-
 /* How many commands can you put in an sctp_cmd_seq_t?
  * This is a rather arbitrary number, ideally derived from a careful
  * analysis of the state functions, but in reality just taken from
index 6390884..c70d8cc 100644 (file)
@@ -61,7 +61,6 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
  * symbols.  CIDs are dense through SCTP_CID_BASE_MAX.
  */
 #define SCTP_CID_BASE_MAX              SCTP_CID_SHUTDOWN_COMPLETE
-#define SCTP_CID_MAX                   SCTP_CID_ASCONF_ACK
 
 #define SCTP_NUM_BASE_CHUNK_TYPES      (SCTP_CID_BASE_MAX + 1)
 
@@ -86,9 +85,6 @@ typedef enum {
 
 } sctp_event_t;
 
-#define SCTP_EVENT_T_MAX SCTP_EVENT_T_PRIMITIVE
-#define SCTP_EVENT_T_NUM (SCTP_EVENT_T_MAX + 1)
-
 /* As a convenience for the state machine, we append SCTP_EVENT_* and
  * SCTP_ULP_* to the list of possible chunks.
  */
@@ -162,9 +158,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive)
                                - (unsigned long)(c->chunk_hdr)\
                                - sizeof(sctp_data_chunk_t)))
 
-#define SCTP_MAX_ERROR_CAUSE  SCTP_ERROR_NONEXIST_IP
-#define SCTP_NUM_ERROR_CAUSE  10
-
 /* Internal error codes */
 typedef enum {
 
@@ -266,7 +259,6 @@ enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
 #define SCTP_TSN_MAP_INITIAL BITS_PER_LONG
 #define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL
 #define SCTP_TSN_MAP_SIZE 4096
-#define SCTP_TSN_MAX_GAP  65535
 
 /* We will not record more than this many duplicate TSNs between two
  * SACKs.  The minimum PMTU is 576.  Remove all the headers and there
@@ -301,9 +293,6 @@ enum { SCTP_MAX_GABS = 16 };
 
 #define SCTP_CLOCK_GRANULARITY 1       /* 1 jiffy */
 
-#define SCTP_DEF_MAX_INIT 6
-#define SCTP_DEF_MAX_SEND 10
-
 #define SCTP_DEFAULT_COOKIE_LIFE       (60 * 1000) /* 60 seconds */
 
 #define SCTP_DEFAULT_MINWINDOW 1500    /* default minimum rwnd size */
@@ -317,9 +306,6 @@ enum { SCTP_MAX_GABS = 16 };
                                         */
 #define SCTP_DEFAULT_MINSEGMENT 512    /* MTU size ... if no mtu disc */
 #define SCTP_HOW_MANY_SECRETS 2                /* How many secrets I keep */
-#define SCTP_HOW_LONG_COOKIE_LIVE 3600 /* How many seconds the current
-                                        * secret will live?
-                                        */
 #define SCTP_SECRET_SIZE 32            /* Number of octets in a 256 bits. */
 
 #define SCTP_SIGNATURE_SIZE 20         /* size of a SLA-1 signature */
index 69fef4f..cc9185c 100644 (file)
@@ -261,8 +261,6 @@ extern struct sctp_globals {
 #define sctp_assoc_hashsize            (sctp_globals.assoc_hashsize)
 #define sctp_assoc_hashtable           (sctp_globals.assoc_hashtable)
 #define sctp_port_hashsize             (sctp_globals.port_hashsize)
-#define sctp_port_rover                        (sctp_globals.port_rover)
-#define sctp_port_alloc_lock           (sctp_globals.port_alloc_lock)
 #define sctp_port_hashtable            (sctp_globals.port_hashtable)
 #define sctp_local_addr_list           (sctp_globals.local_addr_list)
 #define sctp_local_addr_lock           (sctp_globals.addr_list_lock)
index 1479cb4..a06119a 100644 (file)
@@ -315,6 +315,8 @@ extern struct list_head x25_route_list;
 extern rwlock_t x25_route_list_lock;
 extern struct list_head x25_forward_list;
 extern rwlock_t x25_forward_list_lock;
+extern struct list_head x25_neigh_list;
+extern rwlock_t x25_neigh_list_lock;
 
 extern int x25_proc_init(void);
 extern void x25_proc_exit(void);
index 54b2832..7fa5b00 100644 (file)
@@ -806,7 +806,7 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
                port = htons(fl->fl_mh_type);
                break;
        case IPPROTO_GRE:
-               port = htonl(fl->fl_gre_key) >> 16;
+               port = htons(ntohl(fl->fl_gre_key) >> 16);
                break;
        default:
                port = 0;       /*XXX*/
@@ -830,7 +830,7 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
                port = htons(fl->fl_icmp_code);
                break;
        case IPPROTO_GRE:
-               port = htonl(fl->fl_gre_key) & 0xffff;
+               port = htons(ntohl(fl->fl_gre_key) & 0xffff);
                break;
        default:
                port = 0;       /*XXX*/
index dc10713..6e64f7c 100644 (file)
@@ -341,9 +341,6 @@ static void vlan_transfer_features(struct net_device *dev,
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
        vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
 #endif
-       vlandev->real_num_tx_queues = dev->real_num_tx_queues;
-       BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
-
        if (old_features != vlandev->features)
                netdev_features_change(vlandev);
 }
index 55fd82e..126c2af 100644 (file)
@@ -220,6 +220,11 @@ config RPS
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
 
+config XPS
+       boolean
+       depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
+       default y
+
 menu "Network testing"
 
 config NET_PKTGEN
index 381b8e2..3259d2c 100644 (file)
@@ -1557,12 +1557,19 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  */
 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
 {
+       int rc;
+
        if (txq < 1 || txq > dev->num_tx_queues)
                return -EINVAL;
 
        if (dev->reg_state == NETREG_REGISTERED) {
                ASSERT_RTNL();
 
+               rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
+                                                 txq);
+               if (rc)
+                       return rc;
+
                if (txq < dev->real_num_tx_queues)
                        qdisc_reset_all_tx_gt(dev, txq);
        }
@@ -2142,26 +2149,70 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
        return queue_index;
 }
 
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+       struct xps_dev_maps *dev_maps;
+       struct xps_map *map;
+       int queue_index = -1;
+
+       rcu_read_lock();
+       dev_maps = rcu_dereference(dev->xps_maps);
+       if (dev_maps) {
+               map = rcu_dereference(
+                   dev_maps->cpu_map[raw_smp_processor_id()]);
+               if (map) {
+                       if (map->len == 1)
+                               queue_index = map->queues[0];
+                       else {
+                               u32 hash;
+                               if (skb->sk && skb->sk->sk_hash)
+                                       hash = skb->sk->sk_hash;
+                               else
+                                       hash = (__force u16) skb->protocol ^
+                                           skb->rxhash;
+                               hash = jhash_1word(hash, hashrnd);
+                               queue_index = map->queues[
+                                   ((u64)hash * map->len) >> 32];
+                       }
+                       if (unlikely(queue_index >= dev->real_num_tx_queues))
+                               queue_index = -1;
+               }
+       }
+       rcu_read_unlock();
+
+       return queue_index;
+#else
+       return -1;
+#endif
+}
+
 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
 {
        int queue_index;
        const struct net_device_ops *ops = dev->netdev_ops;
 
-       if (ops->ndo_select_queue) {
+       if (dev->real_num_tx_queues == 1)
+               queue_index = 0;
+       else if (ops->ndo_select_queue) {
                queue_index = ops->ndo_select_queue(dev, skb);
                queue_index = dev_cap_txqueue(dev, queue_index);
        } else {
                struct sock *sk = skb->sk;
                queue_index = sk_tx_queue_get(sk);
-               if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
 
-                       queue_index = 0;
-                       if (dev->real_num_tx_queues > 1)
+               if (queue_index < 0 || skb->ooo_okay ||
+                   queue_index >= dev->real_num_tx_queues) {
+                       int old_index = queue_index;
+
+                       queue_index = get_xps_queue(dev, skb);
+                       if (queue_index < 0)
                                queue_index = skb_tx_hash(dev, skb);
 
-                       if (sk) {
-                               struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
+                       if (queue_index != old_index && sk) {
+                               struct dst_entry *dst =
+                                   rcu_dereference_check(sk->sk_dst_cache, 1);
 
                                if (dst && skb_dst(skb) == dst)
                                        sk_tx_queue_set(sk, queue_index);
@@ -5037,9 +5088,9 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 }
 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
 
+#ifdef CONFIG_RPS
 static int netif_alloc_rx_queues(struct net_device *dev)
 {
-#ifdef CONFIG_RPS
        unsigned int i, count = dev->num_rx_queues;
        struct netdev_rx_queue *rx;
 
@@ -5054,14 +5105,15 @@ static int netif_alloc_rx_queues(struct net_device *dev)
 
        for (i = 0; i < count; i++)
                rx[i].dev = dev;
-#endif
        return 0;
 }
+#endif
 
 static int netif_alloc_netdev_queues(struct net_device *dev)
 {
        unsigned int count = dev->num_tx_queues;
        struct netdev_queue *tx;
+       int i;
 
        BUG_ON(count < 1);
 
@@ -5072,6 +5124,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
                return -ENOMEM;
        }
        dev->_tx = tx;
+
+       for (i = 0; i < count; i++)
+               tx[i].dev = dev;
+
        return 0;
 }
 
@@ -5079,8 +5135,6 @@ static void netdev_init_one_queue(struct net_device *dev,
                                  struct netdev_queue *queue,
                                  void *_unused)
 {
-       queue->dev = dev;
-
        /* Initialize queue lock */
        spin_lock_init(&queue->_xmit_lock);
        netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
index 7abeb7c..f85cee3 100644 (file)
@@ -751,10 +751,12 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
 
        return error;
 }
+#endif /* CONFIG_RPS */
 
 int
 net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 {
+#ifdef CONFIG_RPS
        int i;
        int error = 0;
 
@@ -770,23 +772,412 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
                kobject_put(&net->_rx[i].kobj);
 
        return error;
+#else
+       return 0;
+#endif
+}
+
+#ifdef CONFIG_XPS
+/*
+ * netdev_queue sysfs structures and functions.
+ */
+struct netdev_queue_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct netdev_queue *queue,
+           struct netdev_queue_attribute *attr, char *buf);
+       ssize_t (*store)(struct netdev_queue *queue,
+           struct netdev_queue_attribute *attr, const char *buf, size_t len);
+};
+#define to_netdev_queue_attr(_attr) container_of(_attr,                \
+    struct netdev_queue_attribute, attr)
+
+#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
+
+static ssize_t netdev_queue_attr_show(struct kobject *kobj,
+                                     struct attribute *attr, char *buf)
+{
+       struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+       struct netdev_queue *queue = to_netdev_queue(kobj);
+
+       if (!attribute->show)
+               return -EIO;
+
+       return attribute->show(queue, attribute, buf);
 }
 
-static int rx_queue_register_kobjects(struct net_device *net)
+static ssize_t netdev_queue_attr_store(struct kobject *kobj,
+                                      struct attribute *attr,
+                                      const char *buf, size_t count)
 {
+       struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+       struct netdev_queue *queue = to_netdev_queue(kobj);
+
+       if (!attribute->store)
+               return -EIO;
+
+       return attribute->store(queue, attribute, buf, count);
+}
+
+static const struct sysfs_ops netdev_queue_sysfs_ops = {
+       .show = netdev_queue_attr_show,
+       .store = netdev_queue_attr_store,
+};
+
+static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+{
+       struct net_device *dev = queue->dev;
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++)
+               if (queue == &dev->_tx[i])
+                       break;
+
+       BUG_ON(i >= dev->num_tx_queues);
+
+       return i;
+}
+
+
+static ssize_t show_xps_map(struct netdev_queue *queue,
+                           struct netdev_queue_attribute *attribute, char *buf)
+{
+       struct net_device *dev = queue->dev;
+       struct xps_dev_maps *dev_maps;
+       cpumask_var_t mask;
+       unsigned long index;
+       size_t len = 0;
+       int i;
+
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       index = get_netdev_queue_index(queue);
+
+       rcu_read_lock();
+       dev_maps = rcu_dereference(dev->xps_maps);
+       if (dev_maps) {
+               for_each_possible_cpu(i) {
+                       struct xps_map *map =
+                           rcu_dereference(dev_maps->cpu_map[i]);
+                       if (map) {
+                               int j;
+                               for (j = 0; j < map->len; j++) {
+                                       if (map->queues[j] == index) {
+                                               cpumask_set_cpu(i, mask);
+                                               break;
+                                       }
+                               }
+                       }
+               }
+       }
+       rcu_read_unlock();
+
+       len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
+       if (PAGE_SIZE - len < 3) {
+               free_cpumask_var(mask);
+               return -EINVAL;
+       }
+
+       free_cpumask_var(mask);
+       len += sprintf(buf + len, "\n");
+       return len;
+}
+
+static void xps_map_release(struct rcu_head *rcu)
+{
+       struct xps_map *map = container_of(rcu, struct xps_map, rcu);
+
+       kfree(map);
+}
+
+static void xps_dev_maps_release(struct rcu_head *rcu)
+{
+       struct xps_dev_maps *dev_maps =
+           container_of(rcu, struct xps_dev_maps, rcu);
+
+       kfree(dev_maps);
+}
+
+static DEFINE_MUTEX(xps_map_mutex);
+#define xmap_dereference(P)            \
+       rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
+
+static ssize_t store_xps_map(struct netdev_queue *queue,
+                     struct netdev_queue_attribute *attribute,
+                     const char *buf, size_t len)
+{
+       struct net_device *dev = queue->dev;
+       cpumask_var_t mask;
+       int err, i, cpu, pos, map_len, alloc_len, need_set;
+       unsigned long index;
+       struct xps_map *map, *new_map;
+       struct xps_dev_maps *dev_maps, *new_dev_maps;
+       int nonempty = 0;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       index = get_netdev_queue_index(queue);
+
+       err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+       if (err) {
+               free_cpumask_var(mask);
+               return err;
+       }
+
+       new_dev_maps = kzalloc(max_t(unsigned,
+           XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
+       if (!new_dev_maps) {
+               free_cpumask_var(mask);
+               return -ENOMEM;
+       }
+
+       mutex_lock(&xps_map_mutex);
+
+       dev_maps = xmap_dereference(dev->xps_maps);
+
+       for_each_possible_cpu(cpu) {
+               map = dev_maps ?
+                       xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+               new_map = map;
+               if (map) {
+                       for (pos = 0; pos < map->len; pos++)
+                               if (map->queues[pos] == index)
+                                       break;
+                       map_len = map->len;
+                       alloc_len = map->alloc_len;
+               } else
+                       pos = map_len = alloc_len = 0;
+
+               need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
+
+               if (need_set && pos >= map_len) {
+                       /* Need to add queue to this CPU's map */
+                       if (map_len >= alloc_len) {
+                               alloc_len = alloc_len ?
+                                   2 * alloc_len : XPS_MIN_MAP_ALLOC;
+                               new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
+                                                      GFP_KERNEL,
+                                                      cpu_to_node(cpu));
+                               if (!new_map)
+                                       goto error;
+                               new_map->alloc_len = alloc_len;
+                               for (i = 0; i < map_len; i++)
+                                       new_map->queues[i] = map->queues[i];
+                               new_map->len = map_len;
+                       }
+                       new_map->queues[new_map->len++] = index;
+               } else if (!need_set && pos < map_len) {
+                       /* Need to remove queue from this CPU's map */
+                       if (map_len > 1)
+                               new_map->queues[pos] =
+                                   new_map->queues[--new_map->len];
+                       else
+                               new_map = NULL;
+               }
+               RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
+       }
+
+       /* Cleanup old maps */
+       for_each_possible_cpu(cpu) {
+               map = dev_maps ?
+                       xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+               if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
+                       call_rcu(&map->rcu, xps_map_release);
+               if (new_dev_maps->cpu_map[cpu])
+                       nonempty = 1;
+       }
+
+       if (nonempty)
+               rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+       else {
+               kfree(new_dev_maps);
+               rcu_assign_pointer(dev->xps_maps, NULL);
+       }
+
+       if (dev_maps)
+               call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+
+       mutex_unlock(&xps_map_mutex);
+
+       free_cpumask_var(mask);
+       return len;
+
+error:
+       mutex_unlock(&xps_map_mutex);
+
+       if (new_dev_maps)
+               for_each_possible_cpu(i)
+                       kfree(rcu_dereference_protected(
+                               new_dev_maps->cpu_map[i],
+                               1));
+       kfree(new_dev_maps);
+       free_cpumask_var(mask);
+       return -ENOMEM;
+}
+
+static struct netdev_queue_attribute xps_cpus_attribute =
+    __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+
+static struct attribute *netdev_queue_default_attrs[] = {
+       &xps_cpus_attribute.attr,
+       NULL
+};
+
+static void netdev_queue_release(struct kobject *kobj)
+{
+       struct netdev_queue *queue = to_netdev_queue(kobj);
+       struct net_device *dev = queue->dev;
+       struct xps_dev_maps *dev_maps;
+       struct xps_map *map;
+       unsigned long index;
+       int i, pos, nonempty = 0;
+
+       index = get_netdev_queue_index(queue);
+
+       mutex_lock(&xps_map_mutex);
+       dev_maps = xmap_dereference(dev->xps_maps);
+
+       if (dev_maps) {
+               for_each_possible_cpu(i) {
+                       map = xmap_dereference(dev_maps->cpu_map[i]);
+                       if (!map)
+                               continue;
+
+                       for (pos = 0; pos < map->len; pos++)
+                               if (map->queues[pos] == index)
+                                       break;
+
+                       if (pos < map->len) {
+                               if (map->len > 1)
+                                       map->queues[pos] =
+                                           map->queues[--map->len];
+                               else {
+                                       RCU_INIT_POINTER(dev_maps->cpu_map[i],
+                                           NULL);
+                                       call_rcu(&map->rcu, xps_map_release);
+                                       map = NULL;
+                               }
+                       }
+                       if (map)
+                               nonempty = 1;
+               }
+
+               if (!nonempty) {
+                       RCU_INIT_POINTER(dev->xps_maps, NULL);
+                       call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+               }
+       }
+
+       mutex_unlock(&xps_map_mutex);
+
+       memset(kobj, 0, sizeof(*kobj));
+       dev_put(queue->dev);
+}
+
+static struct kobj_type netdev_queue_ktype = {
+       .sysfs_ops = &netdev_queue_sysfs_ops,
+       .release = netdev_queue_release,
+       .default_attrs = netdev_queue_default_attrs,
+};
+
+static int netdev_queue_add_kobject(struct net_device *net, int index)
+{
+       struct netdev_queue *queue = net->_tx + index;
+       struct kobject *kobj = &queue->kobj;
+       int error = 0;
+
+       kobj->kset = net->queues_kset;
+       error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
+           "tx-%u", index);
+       if (error) {
+               kobject_put(kobj);
+               return error;
+       }
+
+       kobject_uevent(kobj, KOBJ_ADD);
+       dev_hold(queue->dev);
+
+       return error;
+}
+#endif /* CONFIG_XPS */
+
+int
+netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+{
+#ifdef CONFIG_XPS
+       int i;
+       int error = 0;
+
+       for (i = old_num; i < new_num; i++) {
+               error = netdev_queue_add_kobject(net, i);
+               if (error) {
+                       new_num = old_num;
+                       break;
+               }
+       }
+
+       while (--i >= new_num)
+               kobject_put(&net->_tx[i].kobj);
+
+       return error;
+#else
+       return 0;
+#endif
+}
+
+static int register_queue_kobjects(struct net_device *net)
+{
+       int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
+
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
        net->queues_kset = kset_create_and_add("queues",
            NULL, &net->dev.kobj);
        if (!net->queues_kset)
                return -ENOMEM;
-       return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues);
+#endif
+
+#ifdef CONFIG_RPS
+       real_rx = net->real_num_rx_queues;
+#endif
+       real_tx = net->real_num_tx_queues;
+
+       error = net_rx_queue_update_kobjects(net, 0, real_rx);
+       if (error)
+               goto error;
+       rxq = real_rx;
+
+       error = netdev_queue_update_kobjects(net, 0, real_tx);
+       if (error)
+               goto error;
+       txq = real_tx;
+
+       return 0;
+
+error:
+       netdev_queue_update_kobjects(net, txq, 0);
+       net_rx_queue_update_kobjects(net, rxq, 0);
+       return error;
 }
 
-static void rx_queue_remove_kobjects(struct net_device *net)
+static void remove_queue_kobjects(struct net_device *net)
 {
-       net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0);
+       int real_rx = 0, real_tx = 0;
+
+#ifdef CONFIG_RPS
+       real_rx = net->real_num_rx_queues;
+#endif
+       real_tx = net->real_num_tx_queues;
+
+       net_rx_queue_update_kobjects(net, real_rx, 0);
+       netdev_queue_update_kobjects(net, real_tx, 0);
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
        kset_unregister(net->queues_kset);
+#endif
 }
-#endif /* CONFIG_RPS */
 
 static const void *net_current_ns(void)
 {
@@ -885,9 +1276,7 @@ void netdev_unregister_kobject(struct net_device * net)
 
        kobject_get(&dev->kobj);
 
-#ifdef CONFIG_RPS
-       rx_queue_remove_kobjects(net);
-#endif
+       remove_queue_kobjects(net);
 
        device_del(dev);
 }
@@ -926,13 +1315,11 @@ int netdev_register_kobject(struct net_device *net)
        if (error)
                return error;
 
-#ifdef CONFIG_RPS
-       error = rx_queue_register_kobjects(net);
+       error = register_queue_kobjects(net);
        if (error) {
                device_del(dev);
                return error;
        }
-#endif
 
        return error;
 }
index 778e157..bd7751e 100644 (file)
@@ -4,8 +4,8 @@
 int netdev_kobject_init(void);
 int netdev_register_kobject(struct net_device *);
 void netdev_unregister_kobject(struct net_device *);
-#ifdef CONFIG_RPS
 int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
-#endif
+int netdev_queue_update_kobjects(struct net_device *net,
+                                int old_num, int new_num);
 
 #endif
index 4e98ffa..ee38acb 100644 (file)
@@ -76,8 +76,7 @@ static void queue_process(struct work_struct *work)
 
                local_irq_save(flags);
                __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_stopped(txq) ||
-                   netif_tx_queue_frozen(txq) ||
+               if (netif_tx_queue_frozen_or_stopped(txq) ||
                    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
                        __netif_tx_unlock(txq);
index 2e57830..2953b2a 100644 (file)
@@ -3527,7 +3527,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 
        __netif_tx_lock_bh(txq);
 
-       if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) {
+       if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
                ret = NETDEV_TX_BUSY;
                pkt_dev->last_ok = 0;
                goto unlock;
index bf69e58..750db57 100644 (file)
@@ -1107,6 +1107,28 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
                        return -EINVAL;
        }
 
+       if (tb[IFLA_AF_SPEC]) {
+               struct nlattr *af;
+               int rem, err;
+
+               nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
+                       const struct rtnl_af_ops *af_ops;
+
+                       if (!(af_ops = rtnl_af_lookup(nla_type(af))))
+                               return -EAFNOSUPPORT;
+
+                       if (!af_ops->set_link_af)
+                               return -EOPNOTSUPP;
+
+                       if (af_ops->validate_link_af) {
+                               err = af_ops->validate_link_af(dev,
+                                                       tb[IFLA_AF_SPEC]);
+                               if (err < 0)
+                                       return err;
+                       }
+               }
+       }
+
        return 0;
 }
 
@@ -1356,12 +1378,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        const struct rtnl_af_ops *af_ops;
 
                        if (!(af_ops = rtnl_af_lookup(nla_type(af))))
-                               continue;
-
-                       if (!af_ops->parse_link_af)
-                               continue;
+                               BUG();
 
-                       err = af_ops->parse_link_af(dev, af);
+                       err = af_ops->set_link_af(dev, af);
                        if (err < 0)
                                goto errout;
 
index 413cab8..bbe4544 100644 (file)
@@ -79,10 +79,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                        return -ENOMEM;
                *fplp = fpl;
                fpl->count = 0;
+               fpl->max = SCM_MAX_FD;
        }
        fpp = &fpl->fp[fpl->count];
 
-       if (fpl->count + num > SCM_MAX_FD)
+       if (fpl->count + num > fpl->max)
                return -EINVAL;
 
        /*
@@ -331,11 +332,12 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
        if (!fpl)
                return NULL;
 
-       new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
+       new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
+                         GFP_KERNEL);
        if (new_fpl) {
-               for (i=fpl->count-1; i>=0; i--)
+               for (i = 0; i < fpl->count; i++)
                        get_file(fpl->fp[i]);
-               memcpy(new_fpl, fpl, sizeof(*fpl));
+               new_fpl->max = new_fpl->count;
        }
        return new_fpl;
 }
index 71afc26..d9f71ba 100644 (file)
@@ -1289,14 +1289,14 @@ static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
        [IFLA_INET_CONF]        = { .type = NLA_NESTED },
 };
 
-static int inet_parse_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet_validate_link_af(const struct net_device *dev,
+                                const struct nlattr *nla)
 {
-       struct in_device *in_dev = __in_dev_get_rcu(dev);
        struct nlattr *a, *tb[IFLA_INET_MAX+1];
        int err, rem;
 
-       if (!in_dev)
-               return -EOPNOTSUPP;
+       if (dev && !__in_dev_get_rcu(dev))
+               return -EAFNOSUPPORT;
 
        err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
        if (err < 0)
@@ -1314,6 +1314,21 @@ static int inet_parse_link_af(struct net_device *dev, const struct nlattr *nla)
                }
        }
 
+       return 0;
+}
+
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+{
+       struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct nlattr *a, *tb[IFLA_INET_MAX+1];
+       int rem;
+
+       if (!in_dev)
+               return -EAFNOSUPPORT;
+
+       if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
+               BUG();
+
        if (tb[IFLA_INET_CONF]) {
                nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
                        ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
@@ -1689,7 +1704,8 @@ static struct rtnl_af_ops inet_af_ops = {
        .family           = AF_INET,
        .fill_link_af     = inet_fill_link_af,
        .get_link_af_size = inet_get_link_af_size,
-       .parse_link_af    = inet_parse_link_af,
+       .validate_link_af = inet_validate_link_af,
+       .set_link_af      = inet_set_link_af,
 };
 
 void __init devinet_init(void)
index 3a6e1ec..2b09775 100644 (file)
@@ -1191,13 +1191,13 @@ static int __init ic_dynamic(void)
                    (ic_proto_enabled & IC_USE_DHCP) &&
                    ic_dhcp_msgtype != DHCPACK) {
                        ic_got_reply = 0;
-                       printk(",");
+                       printk(KERN_CONT ",");
                        continue;
                }
 #endif /* IPCONFIG_DHCP */
 
                if (ic_got_reply) {
-                       printk(" OK\n");
+                       printk(KERN_CONT " OK\n");
                        break;
                }
 
@@ -1205,7 +1205,7 @@ static int __init ic_dynamic(void)
                        continue;
 
                if (! --retries) {
-                       printk(" timed out!\n");
+                       printk(KERN_CONT " timed out!\n");
                        break;
                }
 
@@ -1215,7 +1215,7 @@ static int __init ic_dynamic(void)
                if (timeout > CONF_TIMEOUT_MAX)
                        timeout = CONF_TIMEOUT_MAX;
 
-               printk(".");
+               printk(KERN_CONT ".");
        }
 
 #ifdef IPCONFIG_BOOTP
@@ -1236,7 +1236,7 @@ static int __init ic_dynamic(void)
                ((ic_got_reply & IC_RARP) ? "RARP"
                 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
                &ic_servaddr);
-       printk("my address is %pI4\n", &ic_myaddr);
+       printk(KERN_CONT "my address is %pI4\n", &ic_myaddr);
 
        return 0;
 }
@@ -1468,19 +1468,19 @@ static int __init ip_auto_config(void)
        /*
         * Clue in the operator.
         */
-       printk("IP-Config: Complete:");
-       printk("\n     device=%s", ic_dev->name);
-       printk(", addr=%pI4", &ic_myaddr);
-       printk(", mask=%pI4", &ic_netmask);
-       printk(", gw=%pI4", &ic_gateway);
-       printk(",\n     host=%s, domain=%s, nis-domain=%s",
+       printk("IP-Config: Complete:\n");
+       printk("     device=%s", ic_dev->name);
+       printk(KERN_CONT ", addr=%pI4", &ic_myaddr);
+       printk(KERN_CONT ", mask=%pI4", &ic_netmask);
+       printk(KERN_CONT ", gw=%pI4", &ic_gateway);
+       printk(KERN_CONT ",\n     host=%s, domain=%s, nis-domain=%s",
               utsname()->nodename, ic_domain, utsname()->domainname);
-       printk(",\n     bootserver=%pI4", &ic_servaddr);
-       printk(", rootserver=%pI4", &root_server_addr);
-       printk(", rootpath=%s", root_server_path);
+       printk(KERN_CONT ",\n     bootserver=%pI4", &ic_servaddr);
+       printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
+       printk(KERN_CONT ", rootpath=%s", root_server_path);
        if (ic_dev_mtu)
-               printk(", mtu=%d", ic_dev_mtu);
-       printk("\n");
+               printk(KERN_CONT ", mtu=%d", ic_dev_mtu);
+       printk(KERN_CONT "\n");
 #endif /* !SILENT */
 
        return 0;
index bb8f547..5f29b2e 100644 (file)
@@ -822,8 +822,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                                                           &md5);
        tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 
-       if (tcp_packets_in_flight(tp) == 0)
+       if (tcp_packets_in_flight(tp) == 0) {
                tcp_ca_event(sk, CA_EVENT_TX_START);
+               skb->ooo_okay = 1;
+       } else
+               skb->ooo_okay = 0;
 
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
index 4cf7605..1023ad0 100644 (file)
@@ -3956,11 +3956,6 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
        return 0;
 }
 
-static int inet6_parse_link_af(struct net_device *dev, const struct nlattr *nla)
-{
-       return -EOPNOTSUPP;
-}
-
 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
                             u32 pid, u32 seq, int event, unsigned int flags)
 {
@@ -4670,7 +4665,6 @@ static struct rtnl_af_ops inet6_ops = {
        .family           = AF_INET6,
        .fill_link_af     = inet6_fill_link_af,
        .get_link_af_size = inet6_get_link_af_size,
-       .parse_link_af    = inet6_parse_link_af,
 };
 
 /*
index 8a16280..861d252 100644 (file)
@@ -60,18 +60,16 @@ EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
                           const u32 rnd, const u16 synq_hsize)
 {
-       u32 a = (__force u32)raddr->s6_addr32[0];
-       u32 b = (__force u32)raddr->s6_addr32[1];
-       u32 c = (__force u32)raddr->s6_addr32[2];
-
-       a += JHASH_GOLDEN_RATIO;
-       b += JHASH_GOLDEN_RATIO;
-       c += rnd;
-       __jhash_mix(a, b, c);
-
-       a += (__force u32)raddr->s6_addr32[3];
-       b += (__force u32)rport;
-       __jhash_mix(a, b, c);
+       u32 c;
+
+       c = jhash_3words((__force u32)raddr->s6_addr32[0],
+                        (__force u32)raddr->s6_addr32[1],
+                        (__force u32)raddr->s6_addr32[2],
+                        rnd);
+
+       c = jhash_2words((__force u32)raddr->s6_addr32[3],
+                        (__force u32)rport,
+                        c);
 
        return c & (synq_hsize - 1);
 }
index 2a59610..b115555 100644 (file)
@@ -58,8 +58,6 @@ MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
 
-#define IPV6_TLV_TEL_DST_SIZE 8
-
 #ifdef IP6_TNL_DEBUG
 #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
 #else
index 9c50745..49f986d 100644 (file)
@@ -82,7 +82,7 @@ static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
 
 /* Big mc list lock for all the sockets */
-static DEFINE_RWLOCK(ipv6_sk_mc_lock);
+static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
 
 static void igmp6_join_group(struct ifmcaddr6 *ma);
 static void igmp6_leave_group(struct ifmcaddr6 *ma);
@@ -123,6 +123,11 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
  *     socket join on multicast group
  */
 
+#define for_each_pmc_rcu(np, pmc)                              \
+       for (pmc = rcu_dereference(np->ipv6_mc_list);           \
+            pmc != NULL;                                       \
+            pmc = rcu_dereference(pmc->next))
+
 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 {
        struct net_device *dev = NULL;
@@ -134,15 +139,15 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        if (!ipv6_addr_is_multicast(addr))
                return -EINVAL;
 
-       read_lock_bh(&ipv6_sk_mc_lock);
-       for (mc_lst=np->ipv6_mc_list; mc_lst; mc_lst=mc_lst->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(np, mc_lst) {
                if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
                    ipv6_addr_equal(&mc_lst->addr, addr)) {
-                       read_unlock_bh(&ipv6_sk_mc_lock);
+                       rcu_read_unlock();
                        return -EADDRINUSE;
                }
        }
-       read_unlock_bh(&ipv6_sk_mc_lock);
+       rcu_read_unlock();
 
        mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
 
@@ -186,33 +191,41 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
                return err;
        }
 
-       write_lock_bh(&ipv6_sk_mc_lock);
+       spin_lock(&ipv6_sk_mc_lock);
        mc_lst->next = np->ipv6_mc_list;
-       np->ipv6_mc_list = mc_lst;
-       write_unlock_bh(&ipv6_sk_mc_lock);
+       rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
+       spin_unlock(&ipv6_sk_mc_lock);
 
        rcu_read_unlock();
 
        return 0;
 }
 
+static void ipv6_mc_socklist_reclaim(struct rcu_head *head)
+{
+       kfree(container_of(head, struct ipv6_mc_socklist, rcu));
+}
 /*
  *     socket leave on multicast group
  */
 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct ipv6_mc_socklist *mc_lst, **lnk;
+       struct ipv6_mc_socklist *mc_lst;
+       struct ipv6_mc_socklist __rcu **lnk;
        struct net *net = sock_net(sk);
 
-       write_lock_bh(&ipv6_sk_mc_lock);
-       for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) {
+       spin_lock(&ipv6_sk_mc_lock);
+       for (lnk = &np->ipv6_mc_list;
+            (mc_lst = rcu_dereference_protected(*lnk,
+                       lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
+             lnk = &mc_lst->next) {
                if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
                    ipv6_addr_equal(&mc_lst->addr, addr)) {
                        struct net_device *dev;
 
                        *lnk = mc_lst->next;
-                       write_unlock_bh(&ipv6_sk_mc_lock);
+                       spin_unlock(&ipv6_sk_mc_lock);
 
                        rcu_read_lock();
                        dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -225,11 +238,12 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
                        } else
                                (void) ip6_mc_leave_src(sk, mc_lst, NULL);
                        rcu_read_unlock();
-                       sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
+                       atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+                       call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
                        return 0;
                }
        }
-       write_unlock_bh(&ipv6_sk_mc_lock);
+       spin_unlock(&ipv6_sk_mc_lock);
 
        return -EADDRNOTAVAIL;
 }
@@ -272,12 +286,13 @@ void ipv6_sock_mc_close(struct sock *sk)
        struct ipv6_mc_socklist *mc_lst;
        struct net *net = sock_net(sk);
 
-       write_lock_bh(&ipv6_sk_mc_lock);
-       while ((mc_lst = np->ipv6_mc_list) != NULL) {
+       spin_lock(&ipv6_sk_mc_lock);
+       while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
+                               lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
                struct net_device *dev;
 
                np->ipv6_mc_list = mc_lst->next;
-               write_unlock_bh(&ipv6_sk_mc_lock);
+               spin_unlock(&ipv6_sk_mc_lock);
 
                rcu_read_lock();
                dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -290,11 +305,13 @@ void ipv6_sock_mc_close(struct sock *sk)
                } else
                        (void) ip6_mc_leave_src(sk, mc_lst, NULL);
                rcu_read_unlock();
-               sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 
-               write_lock_bh(&ipv6_sk_mc_lock);
+               atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+               call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
+
+               spin_lock(&ipv6_sk_mc_lock);
        }
-       write_unlock_bh(&ipv6_sk_mc_lock);
+       spin_unlock(&ipv6_sk_mc_lock);
 }
 
 int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -328,8 +345,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
 
        err = -EADDRNOTAVAIL;
 
-       read_lock(&ipv6_sk_mc_lock);
-       for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rcu(inet6, pmc) {
                if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
                        continue;
                if (ipv6_addr_equal(&pmc->addr, group))
@@ -428,7 +444,6 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
 done:
        if (pmclocked)
                write_unlock(&pmc->sflock);
-       read_unlock(&ipv6_sk_mc_lock);
        read_unlock_bh(&idev->lock);
        rcu_read_unlock();
        if (leavegroup)
@@ -466,14 +481,13 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
        dev = idev->dev;
 
        err = 0;
-       read_lock(&ipv6_sk_mc_lock);
 
        if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
                leavegroup = 1;
                goto done;
        }
 
-       for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rcu(inet6, pmc) {
                if (pmc->ifindex != gsf->gf_interface)
                        continue;
                if (ipv6_addr_equal(&pmc->addr, group))
@@ -521,7 +535,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
        write_unlock(&pmc->sflock);
        err = 0;
 done:
-       read_unlock(&ipv6_sk_mc_lock);
        read_unlock_bh(&idev->lock);
        rcu_read_unlock();
        if (leavegroup)
@@ -562,7 +575,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
         * so reading the list is safe.
         */
 
-       for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rcu(inet6, pmc) {
                if (pmc->ifindex != gsf->gf_interface)
                        continue;
                if (ipv6_addr_equal(group, &pmc->addr))
@@ -612,13 +625,13 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
        struct ip6_sf_socklist *psl;
        int rv = 1;
 
-       read_lock(&ipv6_sk_mc_lock);
-       for (mc = np->ipv6_mc_list; mc; mc = mc->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(np, mc) {
                if (ipv6_addr_equal(&mc->addr, mc_addr))
                        break;
        }
        if (!mc) {
-               read_unlock(&ipv6_sk_mc_lock);
+               rcu_read_unlock();
                return 1;
        }
        read_lock(&mc->sflock);
@@ -638,7 +651,7 @@ int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
                        rv = 0;
        }
        read_unlock(&mc->sflock);
-       read_unlock(&ipv6_sk_mc_lock);
+       rcu_read_unlock();
 
        return rv;
 }
index 0f27664..07beeb0 100644 (file)
@@ -104,26 +104,22 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
                             const struct in6_addr *daddr, u32 rnd)
 {
-       u32 a, b, c;
-
-       a = (__force u32)saddr->s6_addr32[0];
-       b = (__force u32)saddr->s6_addr32[1];
-       c = (__force u32)saddr->s6_addr32[2];
-
-       a += JHASH_GOLDEN_RATIO;
-       b += JHASH_GOLDEN_RATIO;
-       c += rnd;
-       __jhash_mix(a, b, c);
-
-       a += (__force u32)saddr->s6_addr32[3];
-       b += (__force u32)daddr->s6_addr32[0];
-       c += (__force u32)daddr->s6_addr32[1];
-       __jhash_mix(a, b, c);
-
-       a += (__force u32)daddr->s6_addr32[2];
-       b += (__force u32)daddr->s6_addr32[3];
-       c += (__force u32)id;
-       __jhash_mix(a, b, c);
+       u32 c;
+
+       c = jhash_3words((__force u32)saddr->s6_addr32[0],
+                        (__force u32)saddr->s6_addr32[1],
+                        (__force u32)saddr->s6_addr32[2],
+                        rnd);
+
+       c = jhash_3words((__force u32)saddr->s6_addr32[3],
+                        (__force u32)daddr->s6_addr32[0],
+                        (__force u32)daddr->s6_addr32[1],
+                        c);
+
+       c =  jhash_3words((__force u32)daddr->s6_addr32[2],
+                         (__force u32)daddr->s6_addr32[3],
+                         (__force u32)id,
+                         c);
 
        return c & (INETFRAGS_HASHSZ - 1);
 }
index c346ccf..a0c4ad1 100644 (file)
@@ -2453,8 +2453,6 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 
 #ifdef CONFIG_PROC_FS
 
-#define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
-
 struct rt6_proc_arg
 {
        char *buffer;
index 5dbb3cd..7f0bd89 100644 (file)
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
 
                /* check the reason of requeuing without tx lock first */
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-               if (!netif_tx_queue_stopped(txq) &&
-                   !netif_tx_queue_frozen(txq)) {
+               if (!netif_tx_queue_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
                        q->q.qlen--;
                } else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        spin_unlock(root_lock);
 
        HARD_TX_LOCK(dev, txq, smp_processor_id());
-       if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
+       if (!netif_tx_queue_frozen_or_stopped(txq))
                ret = dev_hard_start_xmit(skb, dev, txq);
 
        HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
                ret = dev_requeue_skb(skb, q);
        }
 
-       if (ret && (netif_tx_queue_stopped(txq) ||
-                   netif_tx_queue_frozen(txq)))
+       if (ret && netif_tx_queue_frozen_or_stopped(txq))
                ret = 0;
 
        return ret;
index 401af95..106479a 100644 (file)
@@ -309,8 +309,7 @@ restart:
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
 
-                               if (!netif_tx_queue_stopped(slave_txq) &&
-                                   !netif_tx_queue_frozen(slave_txq) &&
+                               if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
                                    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
                                        txq_trans_update(slave_txq);
                                        __netif_tx_unlock(slave_txq);
index 2351ace..ad96ee9 100644 (file)
@@ -1415,47 +1415,43 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        rc = x25_route_ioctl(cmd, argp);
                        break;
                case SIOCX25GSUBSCRIP:
-                       lock_kernel();
                        rc = x25_subscr_ioctl(cmd, argp);
-                       unlock_kernel();
                        break;
                case SIOCX25SSUBSCRIP:
                        rc = -EPERM;
                        if (!capable(CAP_NET_ADMIN))
                                break;
-                       lock_kernel();
                        rc = x25_subscr_ioctl(cmd, argp);
-                       unlock_kernel();
                        break;
                case SIOCX25GFACILITIES: {
-                       struct x25_facilities fac = x25->facilities;
-                       lock_kernel();
-                       rc = copy_to_user(argp, &fac,
-                                         sizeof(fac)) ? -EFAULT : 0;
-                       unlock_kernel();
+                       lock_sock(sk);
+                       rc = copy_to_user(argp, &x25->facilities,
+                                               sizeof(x25->facilities))
+                                               ? -EFAULT : 0;
+                       release_sock(sk);
                        break;
                }
 
                case SIOCX25SFACILITIES: {
                        struct x25_facilities facilities;
                        rc = -EFAULT;
-                       lock_kernel();
                        if (copy_from_user(&facilities, argp,
                                           sizeof(facilities)))
                                break;
                        rc = -EINVAL;
+                       lock_sock(sk);
                        if (sk->sk_state != TCP_LISTEN &&
                            sk->sk_state != TCP_CLOSE)
-                               break;
+                               goto out_fac_release;
                        if (facilities.pacsize_in < X25_PS16 ||
                            facilities.pacsize_in > X25_PS4096)
-                               break;
+                               goto out_fac_release;
                        if (facilities.pacsize_out < X25_PS16 ||
                            facilities.pacsize_out > X25_PS4096)
-                               break;
+                               goto out_fac_release;
                        if (facilities.winsize_in < 1 ||
                            facilities.winsize_in > 127)
-                               break;
+                               goto out_fac_release;
                        if (facilities.throughput) {
                                int out = facilities.throughput & 0xf0;
                                int in  = facilities.throughput & 0x0f;
@@ -1463,27 +1459,28 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                                        facilities.throughput |=
                                                X25_DEFAULT_THROUGHPUT << 4;
                                else if (out < 0x30 || out > 0xD0)
-                                       break;
+                                       goto out_fac_release;
                                if (!in)
                                        facilities.throughput |=
                                                X25_DEFAULT_THROUGHPUT;
                                else if (in < 0x03 || in > 0x0D)
-                                       break;
+                                       goto out_fac_release;
                        }
                        if (facilities.reverse &&
                                (facilities.reverse & 0x81) != 0x81)
-                               break;
+                               goto out_fac_release;
                        x25->facilities = facilities;
                        rc = 0;
-                       unlock_kernel();
+out_fac_release:
+                       release_sock(sk);
                        break;
                }
 
                case SIOCX25GDTEFACILITIES: {
-                       lock_kernel();
+                       lock_sock(sk);
                        rc = copy_to_user(argp, &x25->dte_facilities,
                                                sizeof(x25->dte_facilities));
-                       unlock_kernel();
+                       release_sock(sk);
                        if (rc)
                                rc = -EFAULT;
                        break;
@@ -1492,33 +1489,34 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                case SIOCX25SDTEFACILITIES: {
                        struct x25_dte_facilities dtefacs;
                        rc = -EFAULT;
-                       lock_kernel();
                        if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
                                break;
                        rc = -EINVAL;
+                       lock_sock(sk);
                        if (sk->sk_state != TCP_LISTEN &&
                                        sk->sk_state != TCP_CLOSE)
-                               break;
+                               goto out_dtefac_release;
                        if (dtefacs.calling_len > X25_MAX_AE_LEN)
-                               break;
+                               goto out_dtefac_release;
                        if (dtefacs.calling_ae == NULL)
-                               break;
+                               goto out_dtefac_release;
                        if (dtefacs.called_len > X25_MAX_AE_LEN)
-                               break;
+                               goto out_dtefac_release;
                        if (dtefacs.called_ae == NULL)
-                               break;
+                               goto out_dtefac_release;
                        x25->dte_facilities = dtefacs;
                        rc = 0;
-                       unlock_kernel();
+out_dtefac_release:
+                       release_sock(sk);
                        break;
                }
 
                case SIOCX25GCALLUSERDATA: {
-                       struct x25_calluserdata cud = x25->calluserdata;
-                       lock_kernel();
-                       rc = copy_to_user(argp, &cud,
-                                         sizeof(cud)) ? -EFAULT : 0;
-                       unlock_kernel();
+                       lock_sock(sk);
+                       rc = copy_to_user(argp, &x25->calluserdata,
+                                       sizeof(x25->calluserdata))
+                                       ? -EFAULT : 0;
+                       release_sock(sk);
                        break;
                }
 
@@ -1526,37 +1524,36 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        struct x25_calluserdata calluserdata;
 
                        rc = -EFAULT;
-                       lock_kernel();
                        if (copy_from_user(&calluserdata, argp,
                                           sizeof(calluserdata)))
                                break;
                        rc = -EINVAL;
                        if (calluserdata.cudlength > X25_MAX_CUD_LEN)
                                break;
+                       lock_sock(sk);
                        x25->calluserdata = calluserdata;
-                       unlock_kernel();
+                       release_sock(sk);
                        rc = 0;
                        break;
                }
 
                case SIOCX25GCAUSEDIAG: {
-                       struct x25_causediag causediag;
-                       lock_kernel();
-                       causediag = x25->causediag;
-                       rc = copy_to_user(argp, &causediag,
-                                         sizeof(causediag)) ? -EFAULT : 0;
-                       unlock_kernel();
+                       lock_sock(sk);
+                       rc = copy_to_user(argp, &x25->causediag,
+                                       sizeof(x25->causediag))
+                                       ? -EFAULT : 0;
+                       release_sock(sk);
                        break;
                }
 
                case SIOCX25SCAUSEDIAG: {
                        struct x25_causediag causediag;
                        rc = -EFAULT;
-                       lock_kernel();
                        if (copy_from_user(&causediag, argp, sizeof(causediag)))
                                break;
+                       lock_sock(sk);
                        x25->causediag = causediag;
-                       unlock_kernel();
+                       release_sock(sk);
                        rc = 0;
                        break;
 
@@ -1565,19 +1562,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                case SIOCX25SCUDMATCHLEN: {
                        struct x25_subaddr sub_addr;
                        rc = -EINVAL;
-                       lock_kernel();
+                       lock_sock(sk);
                        if(sk->sk_state != TCP_CLOSE)
-                               break;
+                               goto out_cud_release;
                        rc = -EFAULT;
                        if (copy_from_user(&sub_addr, argp,
                                        sizeof(sub_addr)))
-                               break;
+                               goto out_cud_release;
                        rc = -EINVAL;
                        if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
-                               break;
+                               goto out_cud_release;
                        x25->cudmatchlength = sub_addr.cudmatchlength;
-                       unlock_kernel();
                        rc = 0;
+out_cud_release:
+                       release_sock(sk);
                        break;
                }
 
@@ -1646,16 +1644,20 @@ static int compat_x25_subscr_ioctl(unsigned int cmd,
        dev_put(dev);
 
        if (cmd == SIOCX25GSUBSCRIP) {
+               read_lock_bh(&x25_neigh_list_lock);
                x25_subscr.extended = nb->extended;
                x25_subscr.global_facil_mask = nb->global_facil_mask;
+               read_unlock_bh(&x25_neigh_list_lock);
                rc = copy_to_user(x25_subscr32, &x25_subscr,
                                sizeof(*x25_subscr32)) ? -EFAULT : 0;
        } else {
                rc = -EINVAL;
                if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
                        rc = 0;
+                       write_lock_bh(&x25_neigh_list_lock);
                        nb->extended = x25_subscr.extended;
                        nb->global_facil_mask = x25_subscr.global_facil_mask;
+                       write_unlock_bh(&x25_neigh_list_lock);
                }
        }
        x25_neigh_put(nb);
@@ -1711,17 +1713,13 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
                rc = x25_route_ioctl(cmd, argp);
                break;
        case SIOCX25GSUBSCRIP:
-               lock_kernel();
                rc = compat_x25_subscr_ioctl(cmd, argp);
-               unlock_kernel();
                break;
        case SIOCX25SSUBSCRIP:
                rc = -EPERM;
                if (!capable(CAP_NET_ADMIN))
                        break;
-               lock_kernel();
                rc = compat_x25_subscr_ioctl(cmd, argp);
-               unlock_kernel();
                break;
        case SIOCX25GFACILITIES:
        case SIOCX25SFACILITIES:
index 73e7b95..4c81f6a 100644 (file)
@@ -31,8 +31,8 @@
 #include <linux/init.h>
 #include <net/x25.h>
 
-static LIST_HEAD(x25_neigh_list);
-static DEFINE_RWLOCK(x25_neigh_list_lock);
+LIST_HEAD(x25_neigh_list);
+DEFINE_RWLOCK(x25_neigh_list_lock);
 
 static void x25_t20timer_expiry(unsigned long);
 
@@ -360,16 +360,20 @@ int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
        dev_put(dev);
 
        if (cmd == SIOCX25GSUBSCRIP) {
+               read_lock_bh(&x25_neigh_list_lock);
                x25_subscr.extended          = nb->extended;
                x25_subscr.global_facil_mask = nb->global_facil_mask;
+               read_unlock_bh(&x25_neigh_list_lock);
                rc = copy_to_user(arg, &x25_subscr,
                                  sizeof(x25_subscr)) ? -EFAULT : 0;
        } else {
                rc = -EINVAL;
                if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
                        rc = 0;
+                       write_lock_bh(&x25_neigh_list_lock);
                        nb->extended         = x25_subscr.extended;
                        nb->global_facil_mask = x25_subscr.global_facil_mask;
+                       write_unlock_bh(&x25_neigh_list_lock);
                }
        }
        x25_neigh_put(nb);