Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 17 Jan 2011 22:45:48 +0000 (14:45 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 17 Jan 2011 22:45:48 +0000 (14:45 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA: Update workqueue usage
  RDMA/nes: Fix incorrect SFP+ link status detection on driver init
  RDMA/nes: Fix SFP+ link down detection issue with switch port disable
  RDMA/nes: Generate IB_EVENT_PORT_ERR/PORT_ACTIVE events
  RDMA/nes: Fix bonding on iw_nes
  IB/srp: Test only once whether iu allocation succeeded
  IB/mlx4: Handle protocol field in multicast table
  RDMA: Use vzalloc() to replace vmalloc()+memset(0)
  mlx4_{core, ib, en}: Fix driver when sizeof (phys_addr_t) > sizeof (long)
  IB/mthca: Fix driver when sizeof (phys_addr_t) > sizeof (long)

38 files changed:
drivers/infiniband/core/cache.c
drivers/infiniband/core/device.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/amso1100/c2_rnic.c
drivers/infiniband/hw/ehca/ipz_pt_fn.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/ipath/ipath_init_chip.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_mr.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/hw/qib/qib_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/net/mlx4/catas.c
drivers/net/mlx4/en_main.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mcg.c
include/linux/mlx4/device.h
include/linux/mlx4/driver.h
include/rdma/ib_verbs.h

index 6888356..f9ba7d7 100644 (file)
@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
                        INIT_WORK(&work->work, ib_cache_task);
                        work->device   = event->device;
                        work->port_num = event->element.port_num;
-                       schedule_work(&work->work);
+                       queue_work(ib_wq, &work->work);
                }
        }
 }
@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
        int p;
 
        ib_unregister_event_handler(&device->cache.event_handler);
-       flush_scheduled_work();
+       flush_workqueue(ib_wq);
 
        for (p = 0; p <= end_port(device) - start_port(device); ++p) {
                kfree(device->cache.pkey_cache[p]);
index a19effa..f793bf2 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 
 #include "core_priv.h"
 
@@ -52,6 +51,9 @@ struct ib_client_data {
        void *            data;
 };
 
+struct workqueue_struct *ib_wq;
+EXPORT_SYMBOL_GPL(ib_wq);
+
 static LIST_HEAD(device_list);
 static LIST_HEAD(client_list);
 
@@ -718,6 +720,10 @@ static int __init ib_core_init(void)
 {
        int ret;
 
+       ib_wq = alloc_workqueue("infiniband", 0, 0);
+       if (!ib_wq)
+               return -ENOMEM;
+
        ret = ib_sysfs_setup();
        if (ret)
                printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
@@ -726,6 +732,7 @@ static int __init ib_core_init(void)
        if (ret) {
                printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
                ib_sysfs_cleanup();
+               destroy_workqueue(ib_wq);
        }
 
        return ret;
@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void)
        ib_cache_cleanup();
        ib_sysfs_cleanup();
        /* Make sure that any pending umem accounting work is done. */
-       flush_scheduled_work();
+       destroy_workqueue(ib_wq);
 }
 
 module_init(ib_core_init);
index 91a6603..e38be1b 100644 (file)
@@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
                port->sm_ah = NULL;
                spin_unlock_irqrestore(&port->ah_lock, flags);
 
-               schedule_work(&sa_dev->port[event->element.port_num -
+               queue_work(ib_wq, &sa_dev->port[event->element.port_num -
                                            sa_dev->start_port].update_task);
        }
 }
index 415e186..b645e55 100644 (file)
@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem)
                        umem->mm   = mm;
                        umem->diff = diff;
 
-                       schedule_work(&umem->work);
+                       queue_work(ib_wq, &umem->work);
                        return;
                }
        } else
index 85cfae4..8c81992 100644 (file)
@@ -459,13 +459,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
             IB_DEVICE_MEM_WINDOW);
 
        /* Allocate the qptr_array */
-       c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
+       c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
        if (!c2dev->qptr_array) {
                return -ENOMEM;
        }
 
-       /* Inialize the qptr_array */
-       memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
+       /* Initialize the qptr_array */
        c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
        c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
        c2dev->qptr_array[2] = (void *) &c2dev->aeq;
index 1596e30..1898d6e 100644 (file)
@@ -222,15 +222,14 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
        queue->small_page = NULL;
 
        /* allocate queue page pointers */
-       queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
+       queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
        if (!queue->queue_pages) {
-               queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+               queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
                if (!queue->queue_pages) {
                        ehca_gen_err("Couldn't allocate queue page list");
                        return 0;
                }
        }
-       memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
 
        /* allocate actual queue pages */
        if (is_small) {
index b33f045..47db4bf 100644 (file)
@@ -199,12 +199,11 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
                goto bail;
        }
 
-       dd = vmalloc(sizeof(*dd));
+       dd = vzalloc(sizeof(*dd));
        if (!dd) {
                dd = ERR_PTR(-ENOMEM);
                goto bail;
        }
-       memset(dd, 0, sizeof(*dd));
        dd->ipath_unit = -1;
 
        spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -756,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
         */
        ipath_shutdown_device(dd);
 
-       flush_scheduled_work();
+       flush_workqueue(ib_wq);
 
        if (dd->verbs_dev)
                ipath_unregister_ib_device(dd->verbs_dev);
index 9292a15..6d4b29c 100644 (file)
@@ -1530,7 +1530,7 @@ static int init_subports(struct ipath_devdata *dd,
        }
 
        num_subports = uinfo->spu_subport_cnt;
-       pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
+       pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
        if (!pd->subport_uregbase) {
                ret = -ENOMEM;
                goto bail;
@@ -1538,13 +1538,13 @@ static int init_subports(struct ipath_devdata *dd,
        /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
        size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
                     sizeof(u32), PAGE_SIZE) * num_subports;
-       pd->subport_rcvhdr_base = vmalloc(size);
+       pd->subport_rcvhdr_base = vzalloc(size);
        if (!pd->subport_rcvhdr_base) {
                ret = -ENOMEM;
                goto bail_ureg;
        }
 
-       pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
+       pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
                                        pd->port_rcvegrbuf_size *
                                        num_subports);
        if (!pd->subport_rcvegrbuf) {
@@ -1556,11 +1556,6 @@ static int init_subports(struct ipath_devdata *dd,
        pd->port_subport_id = uinfo->spu_subport_id;
        pd->active_slaves = 1;
        set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-       memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
-       memset(pd->subport_rcvhdr_base, 0, size);
-       memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
-                                        pd->port_rcvegrbuf_size *
-                                        num_subports);
        goto bail;
 
 bail_rhdr:
index 7769382..fef0f42 100644 (file)
@@ -442,7 +442,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
        struct page **pages;
        dma_addr_t *addrs;
 
-       pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
+       pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
                        sizeof(struct page *));
        if (!pages) {
                ipath_dev_err(dd, "failed to allocate shadow page * "
@@ -461,9 +461,6 @@ static void init_shadow_tids(struct ipath_devdata *dd)
                return;
        }
 
-       memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-              sizeof(struct page *));
-
        dd->ipath_pageshadow = pages;
        dd->ipath_physshadow = addrs;
 }
index 5e86d73..bab9f74 100644 (file)
@@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
        work->mm = mm;
        work->num_pages = num_pages;
 
-       schedule_work(&work->work);
+       queue_work(ib_wq, &work->work);
        return;
 
 bail_mm:
index 4c85224..c7a6213 100644 (file)
@@ -623,8 +623,9 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
 
-       err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
-                                   MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+       err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
+                                   !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+                                   MLX4_PROTOCOL_IB);
        if (err)
                return err;
 
@@ -635,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        return 0;
 
 err_add:
-       mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
+       mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
        return err;
 }
 
@@ -665,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx4_ib_gid_entry *ge;
 
        err = mlx4_multicast_detach(mdev->dev,
-                                   &mqp->mqp, gid->raw);
+                                   &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
        if (err)
                return err;
 
@@ -1005,7 +1006,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
                goto err_pd;
 
-       ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+       ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
+                                PAGE_SIZE);
        if (!ibdev->uar_map)
                goto err_uar;
        MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
index 0aa0110..e4a08c2 100644 (file)
@@ -146,7 +146,7 @@ static void poll_catas(unsigned long dev_ptr)
 
 void mthca_start_catas_poll(struct mthca_dev *dev)
 {
-       unsigned long addr;
+       phys_addr_t addr;
 
        init_timer(&dev->catas_err.timer);
        dev->catas_err.map  = NULL;
@@ -158,7 +158,8 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
        dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
        if (!dev->catas_err.map) {
                mthca_warn(dev, "couldn't map catastrophic error region "
-                          "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
+                          "at 0x%llx/0x%x\n", (unsigned long long) addr,
+                          dev->catas_err.size * 4);
                return;
        }
 
index f4ceecd..7bfa2a1 100644 (file)
@@ -713,7 +713,7 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
 
 static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
 {
-       unsigned long addr;
+       phys_addr_t addr;
        u16 max_off = 0;
        int i;
 
index 8e8c728..76785c6 100644 (file)
@@ -653,7 +653,7 @@ static int mthca_map_reg(struct mthca_dev *dev,
                         unsigned long offset, unsigned long size,
                         void __iomem **map)
 {
-       unsigned long base = pci_resource_start(dev->pdev, 0);
+       phys_addr_t base = pci_resource_start(dev->pdev, 0);
 
        *map = ioremap(base + offset, size);
        if (!*map)
index 5eee666..8a40cd5 100644 (file)
@@ -790,7 +790,7 @@ static int mthca_setup_hca(struct mthca_dev *dev)
                goto err_uar_table_free;
        }
 
-       dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+       dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
        if (!dev->kar) {
                mthca_err(dev, "Couldn't map kernel access region, "
                          "aborting.\n");
index 065b208..44045c8 100644 (file)
@@ -853,7 +853,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
 
 int mthca_init_mr_table(struct mthca_dev *dev)
 {
-       unsigned long addr;
+       phys_addr_t addr;
        int mpts, mtts, err, i;
 
        err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
index 0c9f0aa..3b4ec32 100644 (file)
@@ -144,6 +144,7 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
        struct nes_device *nesdev;
        struct net_device *netdev;
        struct nes_vnic *nesvnic;
+       unsigned int is_bonded;
 
        nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n",
                  &ifa->ifa_address, &ifa->ifa_mask);
@@ -152,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
                                nesdev, nesdev->netdev[0]->name);
                netdev = nesdev->netdev[0];
                nesvnic = netdev_priv(netdev);
-               if (netdev == event_netdev) {
+               is_bonded = (netdev->master == event_netdev);
+               if ((netdev == event_netdev) || is_bonded) {
                        if (nesvnic->rdma_enabled == 0) {
                                nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
                                                " RDMA is not enabled.\n",
@@ -169,7 +171,10 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
                                        nes_manage_arp_cache(netdev, netdev->dev_addr,
                                                        ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
                                        nesvnic->local_ipaddr = 0;
-                                       return NOTIFY_OK;
+                                       if (is_bonded)
+                                               continue;
+                                       else
+                                               return NOTIFY_OK;
                                        break;
                                case NETDEV_UP:
                                        nes_debug(NES_DBG_NETDEV, "event:UP\n");
@@ -178,15 +183,24 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
                                                nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
                                                return NOTIFY_OK;
                                        }
+                                       /* fall through */
+                               case NETDEV_CHANGEADDR:
                                        /* Add the address to the IP table */
-                                       nesvnic->local_ipaddr = ifa->ifa_address;
+                                       if (netdev->master)
+                                               nesvnic->local_ipaddr =
+                                                       ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address;
+                                       else
+                                               nesvnic->local_ipaddr = ifa->ifa_address;
 
                                        nes_write_indexed(nesdev,
                                                        NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
-                                                       ntohl(ifa->ifa_address));
+                                                       ntohl(nesvnic->local_ipaddr));
                                        nes_manage_arp_cache(netdev, netdev->dev_addr,
                                                        ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
-                                       return NOTIFY_OK;
+                                       if (is_bonded)
+                                               continue;
+                                       else
+                                               return NOTIFY_OK;
                                        break;
                                default:
                                        break;
@@ -660,6 +674,8 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
        }
        nes_notifiers_registered++;
 
+       INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
+
        /* Initialize network devices */
        if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
                goto bail7;
@@ -742,6 +758,7 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
        struct nes_device *nesdev = pci_get_drvdata(pcidev);
        struct net_device *netdev;
        int netdev_index = 0;
+       unsigned long flags;
 
                if (nesdev->netdev_count) {
                        netdev = nesdev->netdev[netdev_index];
@@ -768,6 +785,14 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
        free_irq(pcidev->irq, nesdev);
        tasklet_kill(&nesdev->dpc_tasklet);
 
+       spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+       if (nesdev->link_recheck) {
+               spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+               cancel_delayed_work_sync(&nesdev->work);
+       } else {
+               spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+       }
+
        /* Deallocate the Adapter Structure */
        nes_destroy_adapter(nesdev->nesadapter);
 
index b3d145e..6fe7987 100644 (file)
@@ -268,6 +268,9 @@ struct nes_device {
        u8                     napi_isr_ran;
        u8                     disable_rx_flow_control;
        u8                     disable_tx_flow_control;
+
+       struct delayed_work    work;
+       u8                     link_recheck;
 };
 
 
@@ -507,6 +510,7 @@ void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
 void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
 int nes_destroy_cqp(struct nes_device *);
 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+void nes_recheck_link_status(struct work_struct *work);
 
 /* nes_nic.c */
 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
index 25ad0f9..009ec81 100644 (file)
@@ -1107,6 +1107,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        struct flowi fl;
        struct neighbour *neigh;
        int rc = arpindex;
+       struct net_device *netdev;
        struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
 
        memset(&fl, 0, sizeof fl);
@@ -1117,7 +1118,12 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                return rc;
        }
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev);
+       if (nesvnic->netdev->master)
+               netdev = nesvnic->netdev->master;
+       else
+               netdev = nesvnic->netdev;
+
+       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
        if (neigh) {
                if (neigh->nud_state & NUD_VALID) {
                        nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
index 1980a46..8b606fd 100644 (file)
@@ -2608,6 +2608,13 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                                netif_start_queue(nesvnic->netdev);
                                        nesvnic->linkup = 1;
                                        netif_carrier_on(nesvnic->netdev);
+
+                                       spin_lock(&nesvnic->port_ibevent_lock);
+                                       if (nesdev->iw_status == 0) {
+                                               nesdev->iw_status = 1;
+                                               nes_port_ibevent(nesvnic);
+                                       }
+                                       spin_unlock(&nesvnic->port_ibevent_lock);
                                }
                        }
                } else {
@@ -2633,9 +2640,23 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                                netif_stop_queue(nesvnic->netdev);
                                        nesvnic->linkup = 0;
                                        netif_carrier_off(nesvnic->netdev);
+
+                                       spin_lock(&nesvnic->port_ibevent_lock);
+                                       if (nesdev->iw_status == 1) {
+                                               nesdev->iw_status = 0;
+                                               nes_port_ibevent(nesvnic);
+                                       }
+                                       spin_unlock(&nesvnic->port_ibevent_lock);
                                }
                        }
                }
+               if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
+                       if (nesdev->link_recheck)
+                               cancel_delayed_work(&nesdev->work);
+                       nesdev->link_recheck = 1;
+                       schedule_delayed_work(&nesdev->work,
+                                             NES_LINK_RECHECK_DELAY);
+               }
        }
 
        spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
@@ -2643,6 +2664,80 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
        nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
 }
 
+void nes_recheck_link_status(struct work_struct *work)
+{
+       unsigned long flags;
+       struct nes_device *nesdev = container_of(work, struct nes_device, work.work);
+       struct nes_adapter *nesadapter = nesdev->nesadapter;
+       struct nes_vnic *nesvnic;
+       u32 mac_index = nesdev->mac_index;
+       u16 phy_data;
+       u16 temp_phy_data;
+
+       spin_lock_irqsave(&nesadapter->phy_lock, flags);
+
+       /* check link status */
+       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
+       temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+
+       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
+       nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
+       phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+
+       phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
+
+       nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+               __func__, phy_data,
+               nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
+
+       if (phy_data & 0x0004) {
+               nesadapter->mac_link_down[mac_index] = 0;
+               list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+                       if (nesvnic->linkup == 0) {
+                               printk(PFX "The Link is now up for port %s, netdev %p.\n",
+                                               nesvnic->netdev->name, nesvnic->netdev);
+                               if (netif_queue_stopped(nesvnic->netdev))
+                                       netif_start_queue(nesvnic->netdev);
+                               nesvnic->linkup = 1;
+                               netif_carrier_on(nesvnic->netdev);
+
+                               spin_lock(&nesvnic->port_ibevent_lock);
+                               if (nesdev->iw_status == 0) {
+                                       nesdev->iw_status = 1;
+                                       nes_port_ibevent(nesvnic);
+                               }
+                               spin_unlock(&nesvnic->port_ibevent_lock);
+                       }
+               }
+
+       } else {
+               nesadapter->mac_link_down[mac_index] = 1;
+               list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+                       if (nesvnic->linkup == 1) {
+                               printk(PFX "The Link is now down for port %s, netdev %p.\n",
+                                               nesvnic->netdev->name, nesvnic->netdev);
+                               if (!(netif_queue_stopped(nesvnic->netdev)))
+                                       netif_stop_queue(nesvnic->netdev);
+                               nesvnic->linkup = 0;
+                               netif_carrier_off(nesvnic->netdev);
+
+                               spin_lock(&nesvnic->port_ibevent_lock);
+                               if (nesdev->iw_status == 1) {
+                                       nesdev->iw_status = 0;
+                                       nes_port_ibevent(nesvnic);
+                               }
+                               spin_unlock(&nesvnic->port_ibevent_lock);
+                       }
+               }
+       }
+       if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX)
+               schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+       else
+               nesdev->link_recheck = 0;
+
+       spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+}
 
 
 static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
index 1204c34..d2abe07 100644 (file)
@@ -1193,6 +1193,8 @@ struct nes_listener {
 
 struct nes_ib_device;
 
+#define NES_EVENT_DELAY msecs_to_jiffies(100)
+
 struct nes_vnic {
        struct nes_ib_device *nesibdev;
        u64 sq_full;
@@ -1247,6 +1249,10 @@ struct nes_vnic {
        u32 lro_max_aggr;
        struct net_lro_mgr lro_mgr;
        struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
+       struct timer_list event_timer;
+       enum ib_event_type delayed_event;
+       enum ib_event_type last_dispatched_event;
+       spinlock_t port_ibevent_lock;
 };
 
 struct nes_ib_device {
@@ -1348,6 +1354,10 @@ struct nes_terminate_hdr {
 #define BAD_FRAME_OFFSET       64
 #define CQE_MAJOR_DRV          0x8000
 
+/* Used for link status recheck after interrupt processing */
+#define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50)
+#define NES_LINK_RECHECK_MAX   60
+
 #define nes_vlan_rx vlan_hwaccel_receive_skb
 #define nes_netif_rx netif_receive_skb
 
index 5a4c364..2c9c193 100644 (file)
@@ -144,6 +144,7 @@ static int nes_netdev_open(struct net_device *netdev)
        u32 nic_active_bit;
        u32 nic_active;
        struct list_head *list_pos, *list_temp;
+       unsigned long flags;
 
        assert(nesdev != NULL);
 
@@ -233,18 +234,36 @@ static int nes_netdev_open(struct net_device *netdev)
                first_nesvnic = nesvnic;
        }
 
-       if (nesvnic->of_device_registered) {
-               nesdev->iw_status = 1;
-               nesdev->nesadapter->send_term_ok = 1;
-               nes_port_ibevent(nesvnic);
-       }
-
        if (first_nesvnic->linkup) {
                /* Enable network packets */
                nesvnic->linkup = 1;
                netif_start_queue(netdev);
                netif_carrier_on(netdev);
        }
+
+       spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+       if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
+               if (nesdev->link_recheck)
+                       cancel_delayed_work(&nesdev->work);
+               nesdev->link_recheck = 1;
+               schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+       }
+       spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+
+       spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
+       if (nesvnic->of_device_registered) {
+               nesdev->nesadapter->send_term_ok = 1;
+               if (nesvnic->linkup == 1) {
+                       if (nesdev->iw_status == 0) {
+                               nesdev->iw_status = 1;
+                               nes_port_ibevent(nesvnic);
+                       }
+               } else {
+                       nesdev->iw_status = 0;
+               }
+       }
+       spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
+
        napi_enable(&nesvnic->napi);
        nesvnic->netdev_open = 1;
 
@@ -263,6 +282,7 @@ static int nes_netdev_stop(struct net_device *netdev)
        u32 nic_active;
        struct nes_vnic *first_nesvnic = NULL;
        struct list_head *list_pos, *list_temp;
+       unsigned long flags;
 
        nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
                        nesvnic, nesdev, netdev, netdev->name);
@@ -315,12 +335,17 @@ static int nes_netdev_stop(struct net_device *netdev)
        nic_active &= nic_active_mask;
        nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
 
-
+       spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
        if (nesvnic->of_device_registered) {
                nesdev->nesadapter->send_term_ok = 0;
                nesdev->iw_status = 0;
-               nes_port_ibevent(nesvnic);
+               if (nesvnic->linkup == 1)
+                       nes_port_ibevent(nesvnic);
        }
+       del_timer_sync(&nesvnic->event_timer);
+       nesvnic->event_timer.function = NULL;
+       spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
+
        nes_destroy_nic_qp(nesvnic);
 
        nesvnic->netdev_open = 0;
@@ -1750,7 +1775,10 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
                nesvnic->rdma_enabled = 0;
        }
        nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
+       init_timer(&nesvnic->event_timer);
+       nesvnic->event_timer.function = NULL;
        spin_lock_init(&nesvnic->tx_lock);
+       spin_lock_init(&nesvnic->port_ibevent_lock);
        nesdev->netdev[nesdev->netdev_count] = netdev;
 
        nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
@@ -1763,8 +1791,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
              (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
               ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
                u32 u32temp;
-               u32 link_mask;
-               u32 link_val;
+               u32 link_mask = 0;
+               u32 link_val = 0;
+               u16 temp_phy_data;
+               u16 phy_data = 0;
+               unsigned long flags;
 
                u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
                                (0x200 * (nesdev->mac_index & 1)));
@@ -1786,6 +1817,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
                                link_val = 0x02020000;
                        }
                        break;
+               case NES_PHY_TYPE_SFP_D:
+                       spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+                       nes_read_10G_phy_reg(nesdev,
+                                            nesdev->nesadapter->phy_index[nesdev->mac_index],
+                                            1, 0x9003);
+                       temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                       nes_read_10G_phy_reg(nesdev,
+                                            nesdev->nesadapter->phy_index[nesdev->mac_index],
+                                            3, 0x0021);
+                       nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                       nes_read_10G_phy_reg(nesdev,
+                                            nesdev->nesadapter->phy_index[nesdev->mac_index],
+                                            3, 0x0021);
+                       phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                       spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+                       phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
+                       break;
                default:
                        link_mask = 0x0f1f0000;
                        link_val = 0x0f0f0000;
@@ -1795,8 +1843,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
                u32temp = nes_read_indexed(nesdev,
                                           NES_IDX_PHY_PCS_CONTROL_STATUS0 +
                                           (0x200 * (nesdev->mac_index & 1)));
-               if ((u32temp & link_mask) == link_val)
-                       nesvnic->linkup = 1;
+
+               if (phy_type == NES_PHY_TYPE_SFP_D) {
+                       if (phy_data & 0x0004)
+                               nesvnic->linkup = 1;
+               } else {
+                       if ((u32temp & link_mask) == link_val)
+                               nesvnic->linkup = 1;
+               }
 
                /* clear the MAC interrupt status, assumes direct logical to physical mapping */
                u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
index 99933e4..26d8018 100644 (file)
@@ -3936,6 +3936,30 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
        return nesibdev;
 }
 
+
+/**
+ * nes_handle_delayed_event
+ */
+static void nes_handle_delayed_event(unsigned long data)
+{
+       struct nes_vnic *nesvnic = (void *) data;
+
+       if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
+               struct ib_event event;
+
+               event.device = &nesvnic->nesibdev->ibdev;
+               if (!event.device)
+                       goto stop_timer;
+               event.event = nesvnic->delayed_event;
+               event.element.port_num = nesvnic->logical_port + 1;
+               ib_dispatch_event(&event);
+       }
+
+stop_timer:
+       nesvnic->event_timer.function = NULL;
+}
+
+
 void  nes_port_ibevent(struct nes_vnic *nesvnic)
 {
        struct nes_ib_device *nesibdev = nesvnic->nesibdev;
@@ -3944,7 +3968,18 @@ void  nes_port_ibevent(struct nes_vnic *nesvnic)
        event.device = &nesibdev->ibdev;
        event.element.port_num = nesvnic->logical_port + 1;
        event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
-       ib_dispatch_event(&event);
+
+       if (!nesvnic->event_timer.function) {
+               ib_dispatch_event(&event);
+               nesvnic->last_dispatched_event = event.event;
+               nesvnic->event_timer.function = nes_handle_delayed_event;
+               nesvnic->event_timer.data = (unsigned long) nesvnic;
+               nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
+               add_timer(&nesvnic->event_timer);
+       } else {
+               mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY);
+       }
+       nesvnic->delayed_event = event.event;
 }
 
 
index 127a0d5..de799f1 100644 (file)
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
        wake_up(&ppd->cpspec->autoneg_wait);
-       cancel_delayed_work(&ppd->cpspec->autoneg_work);
-       flush_scheduled_work();
+       cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
 
        shutdown_7220_relock_poll(ppd->dd);
        val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)
 
        toggle_7220_rclkrls(ppd->dd);
        /* 2 msec is minimum length of a poll cycle */
-       schedule_delayed_work(&ppd->cpspec->autoneg_work,
-                             msecs_to_jiffies(2));
+       queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+                          msecs_to_jiffies(2));
 }
 
 /*
index abd409d..50cceb3 100644 (file)
@@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
        wake_up(&ppd->cpspec->autoneg_wait);
-       cancel_delayed_work(&ppd->cpspec->autoneg_work);
+       cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
        if (ppd->dd->cspec->r1)
-               cancel_delayed_work(&ppd->cpspec->ipg_work);
-       flush_scheduled_work();
+               cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
 
        ppd->cpspec->chase_end = 0;
        if (ppd->cpspec->chase_timer.data) /* if initted */
@@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                        if (!(pins & mask)) {
                                ++handled;
                                qd->t_insert = get_jiffies_64();
-                               schedule_work(&qd->work);
+                               queue_work(ib_wq, &qd->work);
                        }
                }
        }
@@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
        qib_7322_mini_pcs_reset(ppd);
        /* 2 msec is minimum length of a poll cycle */
-       schedule_delayed_work(&ppd->cpspec->autoneg_work,
-                             msecs_to_jiffies(2));
+       queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+                          msecs_to_jiffies(2));
 }
 
 /*
@@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
                ib_free_send_mad(send_buf);
 retry:
        delay = 2 << ppd->cpspec->ipg_tries;
-       schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+       queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
+                          msecs_to_jiffies(delay));
 }
 
 /*
index 7896afb..ffefb78 100644 (file)
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
 
-struct workqueue_struct *qib_wq;
 struct workqueue_struct *qib_cq_wq;
 
 static void verify_interrupt(unsigned long);
@@ -270,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd)
        struct page **pages;
        dma_addr_t *addrs;
 
-       pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+       pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
        if (!pages) {
                qib_dev_err(dd, "failed to allocate shadow page * "
                            "array, no expected sends!\n");
                goto bail;
        }
 
-       addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+       addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
        if (!addrs) {
                qib_dev_err(dd, "failed to allocate shadow dma handle "
                            "array, no expected sends!\n");
                goto bail_free;
        }
 
-       memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
-       memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
-
        dd->pageshadow = pages;
        dd->physshadow = addrs;
        return;
@@ -1047,24 +1043,10 @@ static int __init qlogic_ib_init(void)
        if (ret)
                goto bail;
 
-       /*
-        * We create our own workqueue mainly because we want to be
-        * able to flush it when devices are being removed.  We can't
-        * use schedule_work()/flush_scheduled_work() because both
-        * unregister_netdev() and linkwatch_event take the rtnl lock,
-        * so flush_scheduled_work() can deadlock during device
-        * removal.
-        */
-       qib_wq = create_workqueue("qib");
-       if (!qib_wq) {
-               ret = -ENOMEM;
-               goto bail_dev;
-       }
-
        qib_cq_wq = create_singlethread_workqueue("qib_cq");
        if (!qib_cq_wq) {
                ret = -ENOMEM;
-               goto bail_wq;
+               goto bail_dev;
        }
 
        /*
@@ -1094,8 +1076,6 @@ bail_unit:
        idr_destroy(&qib_unit_table);
 bail_cq_wq:
        destroy_workqueue(qib_cq_wq);
-bail_wq:
-       destroy_workqueue(qib_wq);
 bail_dev:
        qib_dev_cleanup();
 bail:
@@ -1119,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void)
 
        pci_unregister_driver(&qib_driver);
 
-       destroy_workqueue(qib_wq);
        destroy_workqueue(qib_cq_wq);
 
        qib_cpulist_count = 0;
@@ -1292,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
 
        if (qib_mini_init || initfail || ret) {
                qib_stop_timers(dd);
-               flush_scheduled_work();
+               flush_workqueue(ib_wq);
                for (pidx = 0; pidx < dd->num_pports; ++pidx)
                        dd->f_quiet_serdes(dd->pport + pidx);
                if (qib_mini_init)
@@ -1341,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)
 
        qib_stop_timers(dd);
 
-       /* wait until all of our (qsfp) schedule_work() calls complete */
-       flush_scheduled_work();
+       /* wait until all of our (qsfp) queue_work() calls complete */
+       flush_workqueue(ib_wq);
 
        ret = qibfs_remove(dd);
        if (ret)
index 35b3604..3374a52 100644 (file)
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
                goto bail;
        /* We see a module, but it may be unwise to look yet. Just schedule */
        qd->t_insert = get_jiffies_64();
-       schedule_work(&qd->work);
+       queue_work(ib_wq, &qd->work);
 bail:
        return;
 }
@@ -493,10 +493,9 @@ bail:
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
 {
        /*
-        * There is nothing to do here for now.  our
-        * work is scheduled with schedule_work(), and
-        * flush_scheduled_work() from remove_one will
-        * block until all work ssetup with schedule_work()
+        * There is nothing to do here for now.  our work is scheduled
+        * with queue_work(), and flush_workqueue() from remove_one
+        * will block until all work setup with queue_work()
         * completes.
         */
 }
index 63b22a9..95e5b47 100644 (file)
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
                 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
 }
 
-extern struct workqueue_struct *qib_wq;
 extern struct workqueue_struct *qib_cq_wq;
 
 /*
@@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq;
 static inline void qib_schedule_send(struct qib_qp *qp)
 {
        if (qib_send_ok(qp))
-               queue_work(qib_wq, &qp->s_work);
+               queue_work(ib_wq, &qp->s_work);
 }
 
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
index c1c49f2..93d5580 100644 (file)
@@ -352,15 +352,13 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
        int ret;
        int i;
 
-       rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
+       rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
        if (!rx->rx_ring) {
                printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
                       priv->ca->name, ipoib_recvq_size);
                return -ENOMEM;
        }
 
-       memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
-
        t = kmalloc(sizeof *t, GFP_KERNEL);
        if (!t) {
                ret = -ENOMEM;
@@ -1097,13 +1095,12 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
        struct ipoib_dev_priv *priv = netdev_priv(p->dev);
        int ret;
 
-       p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
+       p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
        if (!p->tx_ring) {
                ipoib_warn(priv, "failed to allocate tx ring\n");
                ret = -ENOMEM;
                goto err_tx;
        }
-       memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
 
        p->qp = ipoib_cm_create_tx_qp(p->dev, p);
        if (IS_ERR(p->qp)) {
@@ -1521,7 +1518,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
                return;
        }
 
-       priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
+       priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
        if (!priv->cm.srq_ring) {
                printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
                       priv->ca->name, ipoib_recvq_size);
@@ -1530,7 +1527,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
                return;
        }
 
-       memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
 }
 
 int ipoib_cm_dev_init(struct net_device *dev)
index 7a07a72..aca3b44 100644 (file)
@@ -916,13 +916,12 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
                goto out;
        }
 
-       priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
+       priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
        if (!priv->tx_ring) {
                printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
                       ca->name, ipoib_sendq_size);
                goto out_rx_ring_cleanup;
        }
-       memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
 
        /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
 
index 4b62105..83664ed 100644 (file)
@@ -638,7 +638,7 @@ err:
        if (target->state == SRP_TARGET_CONNECTING) {
                target->state = SRP_TARGET_DEAD;
                INIT_WORK(&target->work, srp_remove_work);
-               schedule_work(&target->work);
+               queue_work(ib_wq, &target->work);
        }
        spin_unlock_irq(&target->lock);
 
@@ -1132,15 +1132,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
 
        spin_lock_irqsave(&target->lock, flags);
        iu = __srp_get_tx_iu(target, SRP_IU_CMD);
-       if (iu) {
-               req = list_first_entry(&target->free_reqs, struct srp_request,
-                                     list);
-               list_del(&req->list);
-       }
-       spin_unlock_irqrestore(&target->lock, flags);
-
        if (!iu)
-               goto err;
+               goto err_unlock;
+
+       req = list_first_entry(&target->free_reqs, struct srp_request, list);
+       list_del(&req->list);
+       spin_unlock_irqrestore(&target->lock, flags);
 
        dev = target->srp_host->srp_dev->dev;
        ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
@@ -1185,6 +1182,8 @@ err_iu:
 
        spin_lock_irqsave(&target->lock, flags);
        list_add(&req->list, &target->free_reqs);
+
+err_unlock:
        spin_unlock_irqrestore(&target->lock, flags);
 
 err:
@@ -2199,7 +2198,7 @@ static void srp_remove_one(struct ib_device *device)
                 * started before we marked our target ports as
                 * removed, and any target port removal tasks.
                 */
-               flush_scheduled_work();
+               flush_workqueue(ib_wq);
 
                list_for_each_entry_safe(target, tmp_target,
                                         &host->target_list, list) {
index 68aaa42..32f9471 100644 (file)
@@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work)
 void mlx4_start_catas_poll(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       unsigned long addr;
+       phys_addr_t addr;
 
        INIT_LIST_HEAD(&priv->catas_err.list);
        init_timer(&priv->catas_err.timer);
@@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
 
        priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
        if (!priv->catas_err.map) {
-               mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
-                         addr);
+               mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+                         (unsigned long long) addr);
                return;
        }
 
index f6e0d40..1ff6ca6 100644 (file)
@@ -202,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
        if (mlx4_uar_alloc(dev, &mdev->priv_uar))
                goto err_pd;
 
-       mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+       mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
+                               PAGE_SIZE);
        if (!mdev->uar_map)
                goto err_uar;
        spin_lock_init(&mdev->uar_lock);
index 782f11d..4ffdc18 100644 (file)
@@ -829,7 +829,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                goto err_uar_table_free;
        }
 
-       priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+       priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
        if (!priv->kar) {
                mlx4_err(dev, "Couldn't map kernel access region, "
                         "aborting.\n");
index c4f88b7..79cf42d 100644 (file)
@@ -95,7 +95,8 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
  * entry in hash chain and *mgm holds end of hash chain.
  */
 static int find_mgm(struct mlx4_dev *dev,
-                   u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
+                   u8 *gid, enum mlx4_protocol protocol,
+                   struct mlx4_cmd_mailbox *mgm_mailbox,
                    u16 *hash, int *prev, int *index)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -134,7 +135,8 @@ static int find_mgm(struct mlx4_dev *dev,
                        return err;
                }
 
-               if (!memcmp(mgm->gid, gid, 16))
+               if (!memcmp(mgm->gid, gid, 16) &&
+                   be32_to_cpu(mgm->members_count) >> 30 == protocol)
                        return err;
 
                *prev = *index;
@@ -146,7 +148,7 @@ static int find_mgm(struct mlx4_dev *dev,
 }
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         int block_mcast_loopback)
+                         int block_mcast_loopback, enum mlx4_protocol protocol)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
@@ -165,7 +167,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
        mutex_lock(&priv->mcg_table.mutex);
 
-       err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+       err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
        if (err)
                goto out;
 
@@ -187,7 +189,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                memcpy(mgm->gid, gid, 16);
        }
 
-       members_count = be32_to_cpu(mgm->members_count);
+       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        if (members_count == MLX4_QP_PER_MGM) {
                mlx4_err(dev, "MGM at index %x is full.\n", index);
                err = -ENOMEM;
@@ -207,7 +209,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        else
                mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
 
-       mgm->members_count       = cpu_to_be32(members_count);
+       mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
 
        err = mlx4_WRITE_MCG(dev, index, mailbox);
        if (err)
@@ -242,7 +244,8 @@ out:
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
 
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         enum mlx4_protocol protocol)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
@@ -260,7 +263,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
 
        mutex_lock(&priv->mcg_table.mutex);
 
-       err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+       err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
        if (err)
                goto out;
 
@@ -270,7 +273,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
                goto out;
        }
 
-       members_count = be32_to_cpu(mgm->members_count);
+       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        for (loc = -1, i = 0; i < members_count; ++i)
                if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
                        loc = i;
@@ -282,7 +285,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
        }
 
 
-       mgm->members_count = cpu_to_be32(--members_count);
+       mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
        mgm->qp[loc]       = mgm->qp[i - 1];
        mgm->qp[i - 1]     = 0;
 
index a7b15bc..0492146 100644 (file)
@@ -144,6 +144,11 @@ enum {
        MLX4_STAT_RATE_OFFSET   = 5
 };
 
+enum mlx4_protocol {
+       MLX4_PROTOCOL_IB,
+       MLX4_PROTOCOL_EN,
+};
+
 enum {
        MLX4_MTT_FLAG_PRESENT           = 1
 };
@@ -500,8 +505,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         int block_mcast_loopback);
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
+                         int block_mcast_loopback, enum mlx4_protocol protocol);
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         enum mlx4_protocol protocol);
 
 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
index f407cd4..e1eebf7 100644 (file)
@@ -34,6 +34,7 @@
 #define MLX4_DRIVER_H
 
 #include <linux/device.h>
+#include <linux/mlx4/device.h>
 
 struct mlx4_dev;
 
@@ -44,11 +45,6 @@ enum mlx4_dev_event {
        MLX4_DEV_EVENT_PORT_REINIT,
 };
 
-enum mlx4_protocol {
-       MLX4_PROTOCOL_IB,
-       MLX4_PROTOCOL_EN,
-};
-
 struct mlx4_interface {
        void *                  (*add)   (struct mlx4_dev *dev);
        void                    (*remove)(struct mlx4_dev *dev, void *context);
index e04c488..55cd0a0 100644 (file)
 #include <linux/list.h>
 #include <linux/rwsem.h>
 #include <linux/scatterlist.h>
+#include <linux/workqueue.h>
 
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
 
+extern struct workqueue_struct *ib_wq;
+
 union ib_gid {
        u8      raw[16];
        struct {