Merge branches 'bart-srp', 'generic-errors', 'ira-cleanups' and 'mwang-v8' into k...
authorDoug Ledford <dledford@redhat.com>
Wed, 20 May 2015 20:12:40 +0000 (16:12 -0400)
committerDoug Ledford <dledford@redhat.com>
Wed, 20 May 2015 20:12:40 +0000 (16:12 -0400)
40 files changed:
drivers/infiniband/core/agent.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad_priv.h
drivers/infiniband/core/multicast.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/smi.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/ehca/ehca_iverbs.h
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/scsi_transport_srp.c
include/rdma/ib_mad.h
include/rdma/ib_verbs.h
include/scsi/srp.h
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_transport.c

index f6d2961..a6fc4d6 100644 (file)
@@ -156,7 +156,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
                goto error1;
        }
 
-       if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
+       if (rdma_cap_ib_smi(device, port_num)) {
                /* Obtain send only MAD agent for SMI QP */
                port_priv->agent[0] = ib_register_mad_agent(device, port_num,
                                                            IB_QPT_SMI, NULL, 0,
index 80f6cf2..08921b3 100644 (file)
@@ -58,17 +58,6 @@ struct ib_update_work {
        u8                 port_num;
 };
 
-static inline int start_port(struct ib_device *device)
-{
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
-}
-
-static inline int end_port(struct ib_device *device)
-{
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ?
-               0 : device->phys_port_cnt;
-}
-
 int ib_get_cached_gid(struct ib_device *device,
                      u8                port_num,
                      int               index,
@@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device,
        unsigned long flags;
        int ret = 0;
 
-       if (port_num < start_port(device) || port_num > end_port(device))
+       if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
        read_lock_irqsave(&device->cache.lock, flags);
 
-       cache = device->cache.gid_cache[port_num - start_port(device)];
+       cache = device->cache.gid_cache[port_num - rdma_start_port(device)];
 
        if (index < 0 || index >= cache->table_len)
                ret = -EINVAL;
@@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device,
 
        read_lock_irqsave(&device->cache.lock, flags);
 
-       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+       for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
                cache = device->cache.gid_cache[p];
                for (i = 0; i < cache->table_len; ++i) {
                        if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
-                               *port_num = p + start_port(device);
+                               *port_num = p + rdma_start_port(device);
                                if (index)
                                        *index = i;
                                ret = 0;
@@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device,
        unsigned long flags;
        int ret = 0;
 
-       if (port_num < start_port(device) || port_num > end_port(device))
+       if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
        read_lock_irqsave(&device->cache.lock, flags);
 
-       cache = device->cache.pkey_cache[port_num - start_port(device)];
+       cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
 
        if (index < 0 || index >= cache->table_len)
                ret = -EINVAL;
@@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device,
        int ret = -ENOENT;
        int partial_ix = -1;
 
-       if (port_num < start_port(device) || port_num > end_port(device))
+       if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
        read_lock_irqsave(&device->cache.lock, flags);
 
-       cache = device->cache.pkey_cache[port_num - start_port(device)];
+       cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
 
        *index = -1;
 
@@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
        int i;
        int ret = -ENOENT;
 
-       if (port_num < start_port(device) || port_num > end_port(device))
+       if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
        read_lock_irqsave(&device->cache.lock, flags);
 
-       cache = device->cache.pkey_cache[port_num - start_port(device)];
+       cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
 
        *index = -1;
 
@@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device,
        unsigned long flags;
        int ret = 0;
 
-       if (port_num < start_port(device) || port_num > end_port(device))
+       if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
        read_lock_irqsave(&device->cache.lock, flags);
-       *lmc = device->cache.lmc_cache[port_num - start_port(device)];
+       *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
        read_unlock_irqrestore(&device->cache.lock, flags);
 
        return ret;
@@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device,
 
        write_lock_irq(&device->cache.lock);
 
-       old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
-       old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
+       old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+       old_gid_cache  = device->cache.gid_cache [port - rdma_start_port(device)];
 
-       device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
-       device->cache.gid_cache [port - start_port(device)] = gid_cache;
+       device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+       device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;
 
-       device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
+       device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
 
        write_unlock_irq(&device->cache.lock);
 
@@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device)
 
        device->cache.pkey_cache =
                kmalloc(sizeof *device->cache.pkey_cache *
-                       (end_port(device) - start_port(device) + 1), GFP_KERNEL);
+                       (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
        device->cache.gid_cache =
                kmalloc(sizeof *device->cache.gid_cache *
-                       (end_port(device) - start_port(device) + 1), GFP_KERNEL);
+                       (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
 
        device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
-                                         (end_port(device) -
-                                          start_port(device) + 1),
+                                         (rdma_end_port(device) -
+                                          rdma_start_port(device) + 1),
                                          GFP_KERNEL);
 
        if (!device->cache.pkey_cache || !device->cache.gid_cache ||
@@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device)
                goto err;
        }
 
-       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+       for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
                device->cache.pkey_cache[p] = NULL;
                device->cache.gid_cache [p] = NULL;
-               ib_cache_update(device, p + start_port(device));
+               ib_cache_update(device, p + rdma_start_port(device));
        }
 
        INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
@@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device)
        return;
 
 err_cache:
-       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+       for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
                kfree(device->cache.pkey_cache[p]);
                kfree(device->cache.gid_cache[p]);
        }
@@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
        ib_unregister_event_handler(&device->cache.event_handler);
        flush_workqueue(ib_wq);
 
-       for (p = 0; p <= end_port(device) - start_port(device); ++p) {
+       for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
                kfree(device->cache.pkey_cache[p]);
                kfree(device->cache.gid_cache[p]);
        }
index 0c14191..14423c2 100644 (file)
@@ -3759,11 +3759,9 @@ static void cm_add_one(struct ib_device *ib_device)
        };
        unsigned long flags;
        int ret;
+       int count = 0;
        u8 i;
 
-       if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
-               return;
-
        cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
                         ib_device->phys_port_cnt, GFP_KERNEL);
        if (!cm_dev)
@@ -3782,6 +3780,9 @@ static void cm_add_one(struct ib_device *ib_device)
 
        set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
        for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+               if (!rdma_cap_ib_cm(ib_device, i))
+                       continue;
+
                port = kzalloc(sizeof *port, GFP_KERNEL);
                if (!port)
                        goto error1;
@@ -3808,7 +3809,13 @@ static void cm_add_one(struct ib_device *ib_device)
                ret = ib_modify_port(ib_device, i, 0, &port_modify);
                if (ret)
                        goto error3;
+
+               count++;
        }
+
+       if (!count)
+               goto free;
+
        ib_set_client_data(ib_device, &cm_client, cm_dev);
 
        write_lock_irqsave(&cm.device_lock, flags);
@@ -3824,11 +3831,15 @@ error1:
        port_modify.set_port_cap_mask = 0;
        port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
        while (--i) {
+               if (!rdma_cap_ib_cm(ib_device, i))
+                       continue;
+
                port = cm_dev->port[i-1];
                ib_modify_port(ib_device, port->port_num, 0, &port_modify);
                ib_unregister_mad_agent(port->mad_agent);
                cm_remove_port_fs(port);
        }
+free:
        device_unregister(cm_dev->device);
        kfree(cm_dev);
 }
@@ -3852,6 +3863,9 @@ static void cm_remove_one(struct ib_device *ib_device)
        write_unlock_irqrestore(&cm.device_lock, flags);
 
        for (i = 1; i <= ib_device->phys_port_cnt; i++) {
+               if (!rdma_cap_ib_cm(ib_device, i))
+                       continue;
+
                port = cm_dev->port[i-1];
                ib_modify_port(ib_device, port->port_num, 0, &port_modify);
                ib_unregister_mad_agent(port->mad_agent);
index b2114ef..c34d650 100644 (file)
@@ -377,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
        return ret;
 }
 
+static inline int cma_validate_port(struct ib_device *device, u8 port,
+                                     union ib_gid *gid, int dev_type)
+{
+       u8 found_port;
+       int ret = -ENODEV;
+
+       if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
+               return ret;
+
+       if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
+               return ret;
+
+       ret = ib_find_cached_gid(device, gid, &found_port, NULL);
+       if (port != found_port)
+               return -ENODEV;
+
+       return ret;
+}
+
 static int cma_acquire_dev(struct rdma_id_private *id_priv,
                           struct rdma_id_private *listen_id_priv)
 {
        struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
        struct cma_device *cma_dev;
-       union ib_gid gid, iboe_gid;
+       union ib_gid gid, iboe_gid, *gidp;
        int ret = -ENODEV;
-       u8 port, found_port;
-       enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
-               IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+       u8 port;
 
-       if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
+       if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
            id_priv->id.ps == RDMA_PS_IPOIB)
                return -EINVAL;
 
@@ -398,41 +415,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
 
        memcpy(&gid, dev_addr->src_dev_addr +
               rdma_addr_gid_offset(dev_addr), sizeof gid);
-       if (listen_id_priv &&
-           rdma_port_get_link_layer(listen_id_priv->id.device,
-                                    listen_id_priv->id.port_num) == dev_ll) {
+
+       if (listen_id_priv) {
                cma_dev = listen_id_priv->cma_dev;
                port = listen_id_priv->id.port_num;
-               if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
-                   rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
-                       ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
-                                                &found_port, NULL);
-               else
-                       ret = ib_find_cached_gid(cma_dev->device, &gid,
-                                                &found_port, NULL);
+               gidp = rdma_protocol_roce(cma_dev->device, port) ?
+                      &iboe_gid : &gid;
 
-               if (!ret && (port  == found_port)) {
-                       id_priv->id.port_num = found_port;
+               ret = cma_validate_port(cma_dev->device, port, gidp,
+                                       dev_addr->dev_type);
+               if (!ret) {
+                       id_priv->id.port_num = port;
                        goto out;
                }
        }
+
        list_for_each_entry(cma_dev, &dev_list, list) {
                for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
                        if (listen_id_priv &&
                            listen_id_priv->cma_dev == cma_dev &&
                            listen_id_priv->id.port_num == port)
                                continue;
-                       if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
-                               if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
-                                   rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
-                                       ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
-                               else
-                                       ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
-
-                               if (!ret && (port == found_port)) {
-                                       id_priv->id.port_num = found_port;
-                                       goto out;
-                               }
+
+                       gidp = rdma_protocol_roce(cma_dev->device, port) ?
+                              &iboe_gid : &gid;
+
+                       ret = cma_validate_port(cma_dev->device, port, gidp,
+                                               dev_addr->dev_type);
+                       if (!ret) {
+                               id_priv->id.port_num = port;
+                               goto out;
                        }
                }
        }
@@ -463,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
        pkey = ntohs(addr->sib_pkey);
 
        list_for_each_entry(cur_dev, &dev_list, list) {
-               if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
-                       continue;
-
                for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
+                       if (!rdma_cap_af_ib(cur_dev->device, p))
+                               continue;
+
                        if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
                                continue;
 
@@ -661,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
        if (ret)
                goto out;
 
-       if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
-           == RDMA_TRANSPORT_IB &&
-           rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
-           == IB_LINK_LAYER_ETHERNET) {
+       BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
+
+       if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
                ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
 
                if (ret)
@@ -728,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
        int ret;
        u16 pkey;
 
-       if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
-           IB_LINK_LAYER_INFINIBAND)
-               pkey = ib_addr_get_pkey(dev_addr);
-       else
+       if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
                pkey = 0xffff;
+       else
+               pkey = ib_addr_get_pkey(dev_addr);
 
        ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
                                  pkey, &qp_attr->pkey_index);
@@ -763,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
        int ret = 0;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
-       case RDMA_TRANSPORT_IB:
+       if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
                        ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
                else
@@ -773,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
 
                if (qp_attr->qp_state == IB_QPS_RTR)
                        qp_attr->rq_psn = id_priv->seq_num;
-               break;
-       case RDMA_TRANSPORT_IWARP:
+       } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
                if (!id_priv->cm_id.iw) {
                        qp_attr->qp_access_flags = 0;
                        *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
                } else
                        ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
                                                 qp_attr_mask);
-               break;
-       default:
+       } else
                ret = -ENOSYS;
-               break;
-       }
 
        return ret;
 }
@@ -963,13 +968,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
 
 static void cma_cancel_route(struct rdma_id_private *id_priv)
 {
-       switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
-       case IB_LINK_LAYER_INFINIBAND:
+       if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
                if (id_priv->query)
                        ib_sa_cancel_query(id_priv->query_id, id_priv->query);
-               break;
-       default:
-               break;
        }
 }
 
@@ -1041,17 +1042,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
                mc = container_of(id_priv->mc_list.next,
                                  struct cma_multicast, list);
                list_del(&mc->list);
-               switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
-               case IB_LINK_LAYER_INFINIBAND:
+               if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
+                                     id_priv->id.port_num)) {
                        ib_sa_free_multicast(mc->multicast.ib);
                        kfree(mc);
-                       break;
-               case IB_LINK_LAYER_ETHERNET:
+               } else
                        kref_put(&mc->mcref, release_mc);
-                       break;
-               default:
-                       break;
-               }
        }
 }
 
@@ -1072,17 +1068,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
        mutex_unlock(&id_priv->handler_mutex);
 
        if (id_priv->cma_dev) {
-               switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
-               case RDMA_TRANSPORT_IB:
+               if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
                        if (id_priv->cm_id.ib)
                                ib_destroy_cm_id(id_priv->cm_id.ib);
-                       break;
-               case RDMA_TRANSPORT_IWARP:
+               } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
                        if (id_priv->cm_id.iw)
                                iw_destroy_cm_id(id_priv->cm_id.iw);
-                       break;
-               default:
-                       break;
                }
                cma_leave_mc_groups(id_priv);
                cma_release_dev(id_priv);
@@ -1660,8 +1651,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        struct rdma_cm_id *id;
        int ret;
 
-       if (cma_family(id_priv) == AF_IB &&
-           rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
+       if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
                return;
 
        id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
@@ -2002,26 +1992,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
                return -EINVAL;
 
        atomic_inc(&id_priv->refcount);
-       switch (rdma_node_get_transport(id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
-               switch (rdma_port_get_link_layer(id->device, id->port_num)) {
-               case IB_LINK_LAYER_INFINIBAND:
-                       ret = cma_resolve_ib_route(id_priv, timeout_ms);
-                       break;
-               case IB_LINK_LAYER_ETHERNET:
-                       ret = cma_resolve_iboe_route(id_priv);
-                       break;
-               default:
-                       ret = -ENOSYS;
-               }
-               break;
-       case RDMA_TRANSPORT_IWARP:
+       if (rdma_cap_ib_sa(id->device, id->port_num))
+               ret = cma_resolve_ib_route(id_priv, timeout_ms);
+       else if (rdma_protocol_roce(id->device, id->port_num))
+               ret = cma_resolve_iboe_route(id_priv);
+       else if (rdma_protocol_iwarp(id->device, id->port_num))
                ret = cma_resolve_iw_route(id_priv, timeout_ms);
-               break;
-       default:
+       else
                ret = -ENOSYS;
-               break;
-       }
+
        if (ret)
                goto err;
 
@@ -2063,7 +2042,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
        mutex_lock(&lock);
        list_for_each_entry(cur_dev, &dev_list, list) {
                if (cma_family(id_priv) == AF_IB &&
-                   rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
+                   !rdma_cap_ib_cm(cur_dev->device, 1))
                        continue;
 
                if (!cma_dev)
@@ -2095,7 +2074,7 @@ port_found:
                goto out;
 
        id_priv->id.route.addr.dev_addr.dev_type =
-               (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
+               (rdma_protocol_ib(cma_dev->device, p)) ?
                ARPHRD_INFINIBAND : ARPHRD_ETHER;
 
        rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@ -2572,18 +2551,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
 
        id_priv->backlog = backlog;
        if (id->device) {
-               switch (rdma_node_get_transport(id->device->node_type)) {
-               case RDMA_TRANSPORT_IB:
+               if (rdma_cap_ib_cm(id->device, 1)) {
                        ret = cma_ib_listen(id_priv);
                        if (ret)
                                goto err;
-                       break;
-               case RDMA_TRANSPORT_IWARP:
+               } else if (rdma_cap_iw_cm(id->device, 1)) {
                        ret = cma_iw_listen(id_priv, backlog);
                        if (ret)
                                goto err;
-                       break;
-               default:
+               } else {
                        ret = -ENOSYS;
                        goto err;
                }
@@ -2919,20 +2895,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
                id_priv->srq = conn_param->srq;
        }
 
-       switch (rdma_node_get_transport(id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
+       if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (id->qp_type == IB_QPT_UD)
                        ret = cma_resolve_ib_udp(id_priv, conn_param);
                else
                        ret = cma_connect_ib(id_priv, conn_param);
-               break;
-       case RDMA_TRANSPORT_IWARP:
+       } else if (rdma_cap_iw_cm(id->device, id->port_num))
                ret = cma_connect_iw(id_priv, conn_param);
-               break;
-       default:
+       else
                ret = -ENOSYS;
-               break;
-       }
        if (ret)
                goto err;
 
@@ -3035,8 +3006,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
                id_priv->srq = conn_param->srq;
        }
 
-       switch (rdma_node_get_transport(id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
+       if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (id->qp_type == IB_QPT_UD) {
                        if (conn_param)
                                ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
@@ -3052,14 +3022,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
                        else
                                ret = cma_rep_recv(id_priv);
                }
-               break;
-       case RDMA_TRANSPORT_IWARP:
+       } else if (rdma_cap_iw_cm(id->device, id->port_num))
                ret = cma_accept_iw(id_priv, conn_param);
-               break;
-       default:
+       else
                ret = -ENOSYS;
-               break;
-       }
 
        if (ret)
                goto reject;
@@ -3103,8 +3069,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
        if (!id_priv->cm_id.ib)
                return -EINVAL;
 
-       switch (rdma_node_get_transport(id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
+       if (rdma_cap_ib_cm(id->device, id->port_num)) {
                if (id->qp_type == IB_QPT_UD)
                        ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
                                                private_data, private_data_len);
@@ -3112,15 +3077,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
                        ret = ib_send_cm_rej(id_priv->cm_id.ib,
                                             IB_CM_REJ_CONSUMER_DEFINED, NULL,
                                             0, private_data, private_data_len);
-               break;
-       case RDMA_TRANSPORT_IWARP:
+       } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
                ret = iw_cm_reject(id_priv->cm_id.iw,
                                   private_data, private_data_len);
-               break;
-       default:
+       } else
                ret = -ENOSYS;
-               break;
-       }
+
        return ret;
 }
 EXPORT_SYMBOL(rdma_reject);
@@ -3134,22 +3096,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
        if (!id_priv->cm_id.ib)
                return -EINVAL;
 
-       switch (rdma_node_get_transport(id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
+       if (rdma_cap_ib_cm(id->device, id->port_num)) {
                ret = cma_modify_qp_err(id_priv);
                if (ret)
                        goto out;
                /* Initiate or respond to a disconnect. */
                if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
                        ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
-               break;
-       case RDMA_TRANSPORT_IWARP:
+       } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
                ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
-               break;
-       default:
+       } else
                ret = -EINVAL;
-               break;
-       }
+
 out:
        return ret;
 }
@@ -3395,24 +3353,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
        list_add(&mc->list, &id_priv->mc_list);
        spin_unlock(&id_priv->lock);
 
-       switch (rdma_node_get_transport(id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
-               switch (rdma_port_get_link_layer(id->device, id->port_num)) {
-               case IB_LINK_LAYER_INFINIBAND:
-                       ret = cma_join_ib_multicast(id_priv, mc);
-                       break;
-               case IB_LINK_LAYER_ETHERNET:
-                       kref_init(&mc->mcref);
-                       ret = cma_iboe_join_multicast(id_priv, mc);
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-               break;
-       default:
+       if (rdma_protocol_roce(id->device, id->port_num)) {
+               kref_init(&mc->mcref);
+               ret = cma_iboe_join_multicast(id_priv, mc);
+       } else if (rdma_cap_ib_mcast(id->device, id->port_num))
+               ret = cma_join_ib_multicast(id_priv, mc);
+       else
                ret = -ENOSYS;
-               break;
-       }
 
        if (ret) {
                spin_lock_irq(&id_priv->lock);
@@ -3440,19 +3387,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
                                ib_detach_mcast(id->qp,
                                                &mc->multicast.ib->rec.mgid,
                                                be16_to_cpu(mc->multicast.ib->rec.mlid));
-                       if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
-                               switch (rdma_port_get_link_layer(id->device, id->port_num)) {
-                               case IB_LINK_LAYER_INFINIBAND:
-                                       ib_sa_free_multicast(mc->multicast.ib);
-                                       kfree(mc);
-                                       break;
-                               case IB_LINK_LAYER_ETHERNET:
-                                       kref_put(&mc->mcref, release_mc);
-                                       break;
-                               default:
-                                       break;
-                               }
-                       }
+
+                       BUG_ON(id_priv->cma_dev->device != id->device);
+
+                       if (rdma_cap_ib_mcast(id->device, id->port_num)) {
+                               ib_sa_free_multicast(mc->multicast.ib);
+                               kfree(mc);
+                       } else if (rdma_protocol_roce(id->device, id->port_num))
+                               kref_put(&mc->mcref, release_mc);
+
                        return;
                }
        }
index 18c1ece..8d07c12 100644 (file)
@@ -92,7 +92,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
                IB_MANDATORY_FUNC(poll_cq),
                IB_MANDATORY_FUNC(req_notify_cq),
                IB_MANDATORY_FUNC(get_dma_mr),
-               IB_MANDATORY_FUNC(dereg_mr)
+               IB_MANDATORY_FUNC(dereg_mr),
+               IB_MANDATORY_FUNC(get_port_immutable)
        };
        int i;
 
@@ -151,18 +152,6 @@ static int alloc_name(char *name)
        return 0;
 }
 
-static int start_port(struct ib_device *device)
-{
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
-}
-
-
-static int end_port(struct ib_device *device)
-{
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ?
-               0 : device->phys_port_cnt;
-}
-
 /**
  * ib_alloc_device - allocate an IB device struct
  * @size:size of structure to allocate
@@ -222,42 +211,38 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
        return 0;
 }
 
-static int read_port_table_lengths(struct ib_device *device)
+static int read_port_immutable(struct ib_device *device)
 {
-       struct ib_port_attr *tprops = NULL;
-       int num_ports, ret = -ENOMEM;
-       u8 port_index;
-
-       tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
-       if (!tprops)
-               goto out;
-
-       num_ports = end_port(device) - start_port(device) + 1;
-
-       device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports,
-                                      GFP_KERNEL);
-       device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports,
-                                     GFP_KERNEL);
-       if (!device->pkey_tbl_len || !device->gid_tbl_len)
+       int ret = -ENOMEM;
+       u8 start_port = rdma_start_port(device);
+       u8 end_port = rdma_end_port(device);
+       u8 port;
+
+       /**
+        * device->port_immutable is indexed directly by the port number to make
+        * access to this data as efficient as possible.
+        *
+        * Therefore port_immutable is declared as a 1 based array with
+        * potential empty slots at the beginning.
+        */
+       device->port_immutable = kzalloc(sizeof(*device->port_immutable)
+                                        * (end_port + 1),
+                                        GFP_KERNEL);
+       if (!device->port_immutable)
                goto err;
 
-       for (port_index = 0; port_index < num_ports; ++port_index) {
-               ret = ib_query_port(device, port_index + start_port(device),
-                                       tprops);
+       for (port = start_port; port <= end_port; ++port) {
+               ret = device->get_port_immutable(device, port,
+                                                &device->port_immutable[port]);
                if (ret)
                        goto err;
-               device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len;
-               device->gid_tbl_len[port_index]  = tprops->gid_tbl_len;
        }
 
        ret = 0;
        goto out;
-
 err:
-       kfree(device->gid_tbl_len);
-       kfree(device->pkey_tbl_len);
+       kfree(device->port_immutable);
 out:
-       kfree(tprops);
        return ret;
 }
 
@@ -294,9 +279,9 @@ int ib_register_device(struct ib_device *device,
        spin_lock_init(&device->event_handler_lock);
        spin_lock_init(&device->client_data_lock);
 
-       ret = read_port_table_lengths(device);
+       ret = read_port_immutable(device);
        if (ret) {
-               printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n",
+               printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
                       device->name);
                goto out;
        }
@@ -305,8 +290,7 @@ int ib_register_device(struct ib_device *device,
        if (ret) {
                printk(KERN_WARNING "Couldn't register device %s with driver model\n",
                       device->name);
-               kfree(device->gid_tbl_len);
-               kfree(device->pkey_tbl_len);
+               kfree(device->port_immutable);
                goto out;
        }
 
@@ -348,9 +332,6 @@ void ib_unregister_device(struct ib_device *device)
 
        list_del(&device->core_list);
 
-       kfree(device->gid_tbl_len);
-       kfree(device->pkey_tbl_len);
-
        mutex_unlock(&device_mutex);
 
        ib_device_unregister_sysfs(device);
@@ -575,7 +556,7 @@ int ib_query_port(struct ib_device *device,
                  u8 port_num,
                  struct ib_port_attr *port_attr)
 {
-       if (port_num < start_port(device) || port_num > end_port(device))
+       if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
        return device->query_port(device, port_num, port_attr);
@@ -653,7 +634,7 @@ int ib_modify_port(struct ib_device *device,
        if (!device->modify_port)
                return -ENOSYS;
 
-       if (port_num < start_port(device) || port_num > end_port(device))
+       if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
        return device->modify_port(device, port_num, port_modify_mask,
@@ -676,8 +657,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
        union ib_gid tmp_gid;
        int ret, port, i;
 
-       for (port = start_port(device); port <= end_port(device); ++port) {
-               for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) {
+       for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
+               for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
                        ret = ib_query_gid(device, port, i, &tmp_gid);
                        if (ret)
                                return ret;
@@ -709,7 +690,7 @@ int ib_find_pkey(struct ib_device *device,
        u16 tmp_pkey;
        int partial_ix = -1;
 
-       for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
+       for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
                ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
                if (ret)
                        return ret;
index 74c30f4..600af26 100644 (file)
@@ -179,12 +179,12 @@ static int is_vendor_method_in_use(
        return 0;
 }
 
-int ib_response_mad(struct ib_mad *mad)
+int ib_response_mad(const struct ib_mad_hdr *hdr)
 {
-       return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
-               (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
-               ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
-                (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
+       return ((hdr->method & IB_MGMT_METHOD_RESP) ||
+               (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
+               ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
+                (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
 }
 EXPORT_SYMBOL(ib_response_mad);
 
@@ -791,7 +791,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
        switch (ret)
        {
        case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
-               if (ib_response_mad(&mad_priv->mad.mad) &&
+               if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) &&
                    mad_agent_priv->agent.recv_handler) {
                        local->mad_priv = mad_priv;
                        local->recv_mad_agent = mad_agent_priv;
@@ -910,7 +910,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
        return 0;
 }
 
-int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent)
+int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
 {
        return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
 }
@@ -1628,7 +1628,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
        unsigned long flags;
 
        spin_lock_irqsave(&port_priv->reg_lock, flags);
-       if (ib_response_mad(mad)) {
+       if (ib_response_mad(&mad->mad_hdr)) {
                u32 hi_tid;
                struct ib_mad_agent_private *entry;
 
@@ -1708,20 +1708,20 @@ out:
        return mad_agent;
 }
 
-static int validate_mad(struct ib_mad *mad, u32 qp_num)
+static int validate_mad(const struct ib_mad_hdr *mad_hdr, u32 qp_num)
 {
        int valid = 0;
 
        /* Make sure MAD base version is understood */
-       if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) {
+       if (mad_hdr->base_version != IB_MGMT_BASE_VERSION) {
                pr_err("MAD received with unsupported base version %d\n",
-                       mad->mad_hdr.base_version);
+                       mad_hdr->base_version);
                goto out;
        }
 
        /* Filter SMI packets sent to other than QP0 */
-       if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
-           (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
+       if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
+           (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
                if (qp_num == 0)
                        valid = 1;
        } else {
@@ -1734,8 +1734,8 @@ out:
        return valid;
 }
 
-static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
-                      struct ib_mad_hdr *mad_hdr)
+static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
+                           const struct ib_mad_hdr *mad_hdr)
 {
        struct ib_rmpp_mad *rmpp_mad;
 
@@ -1747,16 +1747,16 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
                (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
 }
 
-static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
-                                    struct ib_mad_recv_wc *rwc)
+static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
+                                    const struct ib_mad_recv_wc *rwc)
 {
-       return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
+       return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
                rwc->recv_buf.mad->mad_hdr.mgmt_class;
 }
 
-static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
-                                  struct ib_mad_send_wr_private *wr,
-                                  struct ib_mad_recv_wc *rwc )
+static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
+                                  const struct ib_mad_send_wr_private *wr,
+                                  const struct ib_mad_recv_wc *rwc )
 {
        struct ib_ah_attr attr;
        u8 send_resp, rcv_resp;
@@ -1765,8 +1765,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
        u8 port_num = mad_agent_priv->agent.port_num;
        u8 lmc;
 
-       send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
-       rcv_resp = ib_response_mad(rwc->recv_buf.mad);
+       send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
+       rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
 
        if (send_resp == rcv_resp)
                /* both requests, or both responses. GIDs different */
@@ -1811,8 +1811,8 @@ static inline int is_direct(u8 class)
 }
 
 struct ib_mad_send_wr_private*
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
-                struct ib_mad_recv_wc *wc)
+ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
+                const struct ib_mad_recv_wc *wc)
 {
        struct ib_mad_send_wr_private *wr;
        struct ib_mad *mad;
@@ -1836,7 +1836,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
         * been notified that the send has completed
         */
        list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
-               if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
+               if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
                    wr->tid == mad->mad_hdr.tid &&
                    wr->timeout &&
                    rcv_has_same_class(wr, wc) &&
@@ -1879,7 +1879,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
        }
 
        /* Complete corresponding request */
-       if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
+       if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
                spin_lock_irqsave(&mad_agent_priv->lock, flags);
                mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
                if (!mad_send_wr) {
@@ -1979,7 +1979,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
                snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
 
        /* Validate MAD */
-       if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
+       if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num))
                goto out;
 
        response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
@@ -2411,7 +2411,8 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
 
        list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
                            agent_list) {
-               if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
+               if (is_rmpp_data_mad(mad_agent_priv,
+                                    mad_send_wr->send_buf.mad) &&
                    &mad_send_wr->send_buf == send_buf)
                        return mad_send_wr;
        }
@@ -2938,7 +2939,7 @@ static int ib_mad_port_open(struct ib_device *device,
        init_mad_qp(port_priv, &port_priv->qp_info[1]);
 
        cq_size = mad_sendq_size + mad_recvq_size;
-       has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
+       has_smi = rdma_cap_ib_smi(device, port_num);
        if (has_smi)
                cq_size *= 2;
 
@@ -3057,9 +3058,6 @@ static void ib_mad_init_device(struct ib_device *device)
 {
        int start, end, i;
 
-       if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
-               return;
-
        if (device->node_type == RDMA_NODE_IB_SWITCH) {
                start = 0;
                end   = 0;
@@ -3069,6 +3067,9 @@ static void ib_mad_init_device(struct ib_device *device)
        }
 
        for (i = start; i <= end; i++) {
+               if (!rdma_cap_ib_mad(device, i))
+                       continue;
+
                if (ib_mad_port_open(device, i)) {
                        dev_err(&device->dev, "Couldn't open port %d\n", i);
                        goto error;
@@ -3086,40 +3087,39 @@ error_agent:
                dev_err(&device->dev, "Couldn't close port %d\n", i);
 
 error:
-       i--;
+       while (--i >= start) {
+               if (!rdma_cap_ib_mad(device, i))
+                       continue;
 
-       while (i >= start) {
                if (ib_agent_port_close(device, i))
                        dev_err(&device->dev,
                                "Couldn't close port %d for agents\n", i);
                if (ib_mad_port_close(device, i))
                        dev_err(&device->dev, "Couldn't close port %d\n", i);
-               i--;
        }
 }
 
 static void ib_mad_remove_device(struct ib_device *device)
 {
-       int i, num_ports, cur_port;
-
-       if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
-               return;
+       int start, end, i;
 
        if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               num_ports = 1;
-               cur_port = 0;
+               start = 0;
+               end   = 0;
        } else {
-               num_ports = device->phys_port_cnt;
-               cur_port = 1;
+               start = 1;
+               end   = device->phys_port_cnt;
        }
-       for (i = 0; i < num_ports; i++, cur_port++) {
-               if (ib_agent_port_close(device, cur_port))
+
+       for (i = start; i <= end; i++) {
+               if (!rdma_cap_ib_mad(device, i))
+                       continue;
+
+               if (ib_agent_port_close(device, i))
                        dev_err(&device->dev,
-                               "Couldn't close port %d for agents\n",
-                               cur_port);
-               if (ib_mad_port_close(device, cur_port))
-                       dev_err(&device->dev, "Couldn't close port %d\n",
-                               cur_port);
+                               "Couldn't close port %d for agents\n", i);
+               if (ib_mad_port_close(device, i))
+                       dev_err(&device->dev, "Couldn't close port %d\n", i);
        }
 }
 
index d1a0b0e..7b19cba 100644 (file)
@@ -213,8 +213,8 @@ struct ib_mad_port_private {
 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
 
 struct ib_mad_send_wr_private *
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
-                struct ib_mad_recv_wc *mad_recv_wc);
+ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
+                const struct ib_mad_recv_wc *mad_recv_wc);
 
 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
                             struct ib_mad_send_wc *mad_send_wc);
index fa17b55..605f20a 100644 (file)
@@ -780,8 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
        int index;
 
        dev = container_of(handler, struct mcast_device, event_handler);
-       if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
-           IB_LINK_LAYER_INFINIBAND)
+       if (WARN_ON(!rdma_cap_ib_mcast(dev->device, event->element.port_num)))
                return;
 
        index = event->element.port_num - dev->start_port;
@@ -808,9 +807,6 @@ static void mcast_add_one(struct ib_device *device)
        int i;
        int count = 0;
 
-       if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
-               return;
-
        dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
                      GFP_KERNEL);
        if (!dev)
@@ -824,8 +820,7 @@ static void mcast_add_one(struct ib_device *device)
        }
 
        for (i = 0; i <= dev->end_port - dev->start_port; i++) {
-               if (rdma_port_get_link_layer(device, dev->start_port + i) !=
-                   IB_LINK_LAYER_INFINIBAND)
+               if (!rdma_cap_ib_mcast(device, dev->start_port + i))
                        continue;
                port = &dev->port[i];
                port->dev = dev;
@@ -863,8 +858,7 @@ static void mcast_remove_one(struct ib_device *device)
        flush_workqueue(mcast_wq);
 
        for (i = 0; i <= dev->end_port - dev->start_port; i++) {
-               if (rdma_port_get_link_layer(device, dev->start_port + i) ==
-                   IB_LINK_LAYER_INFINIBAND) {
+               if (rdma_cap_ib_mcast(device, dev->start_port + i)) {
                        port = &dev->port[i];
                        deref_port(port);
                        wait_for_completion(&port->comp);
index c38f030..7f7c8c9 100644 (file)
@@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
                struct ib_sa_port *port =
                        &sa_dev->port[event->element.port_num - sa_dev->start_port];
 
-               if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
+               if (WARN_ON(!rdma_cap_ib_sa(handler->device, port->port_num)))
                        return;
 
                spin_lock_irqsave(&port->ah_lock, flags);
@@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
        ah_attr->port_num = port_num;
        ah_attr->static_rate = rec->rate;
 
-       force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
+       force_grh = rdma_cap_eth_ah(device, port_num);
 
        if (rec->hop_limit > 1 || force_grh) {
                ah_attr->ah_flags = IB_AH_GRH;
@@ -1153,9 +1153,7 @@ static void ib_sa_add_one(struct ib_device *device)
 {
        struct ib_sa_device *sa_dev;
        int s, e, i;
-
-       if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
-               return;
+       int count = 0;
 
        if (device->node_type == RDMA_NODE_IB_SWITCH)
                s = e = 0;
@@ -1175,7 +1173,7 @@ static void ib_sa_add_one(struct ib_device *device)
 
        for (i = 0; i <= e - s; ++i) {
                spin_lock_init(&sa_dev->port[i].ah_lock);
-               if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
+               if (!rdma_cap_ib_sa(device, i + 1))
                        continue;
 
                sa_dev->port[i].sm_ah    = NULL;
@@ -1189,8 +1187,13 @@ static void ib_sa_add_one(struct ib_device *device)
                        goto err;
 
                INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
+
+               count++;
        }
 
+       if (!count)
+               goto free;
+
        ib_set_client_data(device, &sa_client, sa_dev);
 
        /*
@@ -1204,19 +1207,20 @@ static void ib_sa_add_one(struct ib_device *device)
        if (ib_register_event_handler(&sa_dev->event_handler))
                goto err;
 
-       for (i = 0; i <= e - s; ++i)
-               if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+       for (i = 0; i <= e - s; ++i) {
+               if (rdma_cap_ib_sa(device, i + 1))
                        update_sm_ah(&sa_dev->port[i].update_task);
+       }
 
        return;
 
 err:
-       while (--i >= 0)
-               if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+       while (--i >= 0) {
+               if (rdma_cap_ib_sa(device, i + 1))
                        ib_unregister_mad_agent(sa_dev->port[i].agent);
-
+       }
+free:
        kfree(sa_dev);
-
        return;
 }
 
@@ -1233,7 +1237,7 @@ static void ib_sa_remove_one(struct ib_device *device)
        flush_workqueue(ib_wq);
 
        for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
-               if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
+               if (rdma_cap_ib_sa(device, i + 1)) {
                        ib_unregister_mad_agent(sa_dev->port[i].agent);
                        if (sa_dev->port[i].sm_ah)
                                kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
index 5855e44..e6c6810 100644 (file)
@@ -41,7 +41,7 @@
 
 /*
  * Fixup a directed route SMP for sending
- * Return 0 if the SMP should be discarded
+ * Return IB_SMI_DISCARD if the SMP should be discarded
  */
 enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
                                       u8 node_type, int port_num)
@@ -126,7 +126,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
 
 /*
  * Adjust information for a received SMP
- * Return 0 if the SMP should be dropped
+ * Return IB_SMI_DISCARD if the SMP should be dropped
  */
 enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
                                       int port_num, int phys_port_cnt)
index cbd0383..d0334c1 100644 (file)
@@ -456,6 +456,7 @@ static void ib_device_release(struct device *device)
 {
        struct ib_device *dev = container_of(device, struct ib_device, dev);
 
+       kfree(dev->port_immutable);
        kfree(dev);
 }
 
index f2f6393..62c24b1 100644 (file)
@@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device)
        dev_t base;
        struct ib_ucm_device *ucm_dev;
 
-       if (!device->alloc_ucontext ||
-           rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+       if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1))
                return;
 
        ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
index 45d67e9..ad45469 100644 (file)
@@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file,
 
        resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
        resp.port_num = ctx->cm_id->port_num;
-       switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
-       case RDMA_TRANSPORT_IB:
-               switch (rdma_port_get_link_layer(ctx->cm_id->device,
-                       ctx->cm_id->port_num)) {
-               case IB_LINK_LAYER_INFINIBAND:
-                       ucma_copy_ib_route(&resp, &ctx->cm_id->route);
-                       break;
-               case IB_LINK_LAYER_ETHERNET:
-                       ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
-                       break;
-               default:
-                       break;
-               }
-               break;
-       case RDMA_TRANSPORT_IWARP:
+
+       if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
+               ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+       else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
+               ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
+       else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
                ucma_copy_iw_route(&resp, &ctx->cm_id->route);
-               break;
-       default:
-               break;
-       }
 
 out:
        if (copy_to_user((void __user *)(unsigned long)cmd.response,
index 928cdd2..e58d701 100644 (file)
@@ -99,7 +99,6 @@ struct ib_umad_port {
 };
 
 struct ib_umad_device {
-       int                  start_port, end_port;
        struct kobject       kobj;
        struct ib_umad_port  port[0];
 };
@@ -426,11 +425,11 @@ static int is_duplicate(struct ib_umad_file *file,
                 * the same TID, reject the second as a duplicate.  This is more
                 * restrictive than required by the spec.
                 */
-               if (!ib_response_mad((struct ib_mad *) hdr)) {
-                       if (!ib_response_mad((struct ib_mad *) sent_hdr))
+               if (!ib_response_mad(hdr)) {
+                       if (!ib_response_mad(sent_hdr))
                                return 1;
                        continue;
-               } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
+               } else if (!ib_response_mad(sent_hdr))
                        continue;
 
                if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
@@ -1273,16 +1272,10 @@ static void ib_umad_add_one(struct ib_device *device)
 {
        struct ib_umad_device *umad_dev;
        int s, e, i;
+       int count = 0;
 
-       if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
-               return;
-
-       if (device->node_type == RDMA_NODE_IB_SWITCH)
-               s = e = 0;
-       else {
-               s = 1;
-               e = device->phys_port_cnt;
-       }
+       s = rdma_start_port(device);
+       e = rdma_end_port(device);
 
        umad_dev = kzalloc(sizeof *umad_dev +
                           (e - s + 1) * sizeof (struct ib_umad_port),
@@ -1292,25 +1285,34 @@ static void ib_umad_add_one(struct ib_device *device)
 
        kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
 
-       umad_dev->start_port = s;
-       umad_dev->end_port   = e;
-
        for (i = s; i <= e; ++i) {
+               if (!rdma_cap_ib_mad(device, i))
+                       continue;
+
                umad_dev->port[i - s].umad_dev = umad_dev;
 
                if (ib_umad_init_port(device, i, umad_dev,
                                      &umad_dev->port[i - s]))
                        goto err;
+
+               count++;
        }
 
+       if (!count)
+               goto free;
+
        ib_set_client_data(device, &umad_client, umad_dev);
 
        return;
 
 err:
-       while (--i >= s)
-               ib_umad_kill_port(&umad_dev->port[i - s]);
+       while (--i >= s) {
+               if (!rdma_cap_ib_mad(device, i))
+                       continue;
 
+               ib_umad_kill_port(&umad_dev->port[i - s]);
+       }
+free:
        kobject_put(&umad_dev->kobj);
 }
 
@@ -1322,8 +1324,10 @@ static void ib_umad_remove_one(struct ib_device *device)
        if (!umad_dev)
                return;
 
-       for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
-               ib_umad_kill_port(&umad_dev->port[i]);
+       for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) {
+               if (rdma_cap_ib_mad(device, i + rdma_start_port(device)))
+                       ib_umad_kill_port(&umad_dev->port[i]);
+       }
 
        kobject_put(&umad_dev->kobj);
 }
index 4c01a34..685a362 100644 (file)
@@ -263,11 +263,9 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
        u32 flow_class;
        u16 gid_index;
        int ret;
-       int is_eth = (rdma_port_get_link_layer(device, port_num) ==
-                       IB_LINK_LAYER_ETHERNET);
 
        memset(ah_attr, 0, sizeof *ah_attr);
-       if (is_eth) {
+       if (rdma_cap_eth_ah(device, port_num)) {
                if (!(wc->wc_flags & IB_WC_GRH))
                        return -EPROTOTYPE;
 
@@ -936,7 +934,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
        union ib_gid  sgid;
 
        if ((*qp_attr_mask & IB_QP_AV)  &&
-           (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
+           (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) {
                ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
                                   qp_attr->ah_attr.grh.sgid_index, &sgid);
                if (ret)
index bdf3507..d396c39 100644 (file)
@@ -757,6 +757,23 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
        return netdev;
 }
 
+static int c2_port_immutable(struct ib_device *ibdev, u8 port_num,
+                            struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = c2_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+       return 0;
+}
+
 int c2_register_device(struct c2_dev *dev)
 {
        int ret = -ENOMEM;
@@ -820,6 +837,7 @@ int c2_register_device(struct c2_dev *dev)
        dev->ibdev.reg_phys_mr = c2_reg_phys_mr;
        dev->ibdev.reg_user_mr = c2_reg_user_mr;
        dev->ibdev.dereg_mr = c2_dereg_mr;
+       dev->ibdev.get_port_immutable = c2_port_immutable;
 
        dev->ibdev.alloc_fmr = NULL;
        dev->ibdev.unmap_fmr = NULL;
index 811b24a..061ef08 100644 (file)
@@ -1343,6 +1343,23 @@ static struct device_attribute *iwch_class_attributes[] = {
        &dev_attr_board_id,
 };
 
+static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
+                              struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = iwch_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+       return 0;
+}
+
 int iwch_register_device(struct iwch_dev *dev)
 {
        int ret;
@@ -1420,6 +1437,7 @@ int iwch_register_device(struct iwch_dev *dev)
        dev->ibdev.post_recv = iwch_post_receive;
        dev->ibdev.get_protocol_stats = iwch_get_mib;
        dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
+       dev->ibdev.get_port_immutable = iwch_port_immutable;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index 66bd6a2..ef08a9f 100644 (file)
@@ -465,6 +465,23 @@ static struct device_attribute *c4iw_class_attributes[] = {
        &dev_attr_board_id,
 };
 
+static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+                              struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = c4iw_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+       return 0;
+}
+
 int c4iw_register_device(struct c4iw_dev *dev)
 {
        int ret;
@@ -542,6 +559,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.post_recv = c4iw_post_receive;
        dev->ibdev.get_protocol_stats = c4iw_get_mib;
        dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
+       dev->ibdev.get_port_immutable = c4iw_port_immutable;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index 22f79af..077185b 100644 (file)
@@ -49,6 +49,9 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
 int ehca_query_port(struct ib_device *ibdev, u8 port,
                    struct ib_port_attr *props);
 
+enum rdma_protocol_type
+ehca_query_protocol(struct ib_device *device, u8 port_num);
+
 int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
                        struct ehca_sma_attr *attr);
 
index cd8d290..5e30b72 100644 (file)
@@ -431,6 +431,23 @@ init_node_guid1:
        return ret;
 }
 
+static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
+                              struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = ehca_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+
+       return 0;
+}
+
 static int ehca_init_device(struct ehca_shca *shca)
 {
        int ret;
@@ -510,6 +527,7 @@ static int ehca_init_device(struct ehca_shca *shca)
        shca->ib_device.process_mad         = ehca_process_mad;
        shca->ib_device.mmap                = ehca_mmap;
        shca->ib_device.dma_ops             = &ehca_dma_mapping_ops;
+       shca->ib_device.get_port_immutable  = ehca_port_immutable;
 
        if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
                shca->ib_device.uverbs_cmd_mask |=
index 44ea939..764081d 100644 (file)
@@ -1980,6 +1980,23 @@ static int disable_timer(struct ipath_devdata *dd)
        return 0;
 }
 
+static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num,
+                               struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = ipath_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+
+       return 0;
+}
+
 /**
  * ipath_register_ib_device - register our device with the infiniband core
  * @dd: the device data structure
@@ -2179,6 +2196,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
        dev->process_mad = ipath_process_mad;
        dev->mmap = ipath_mmap;
        dev->dma_ops = &ipath_dma_mapping_ops;
+       dev->get_port_immutable = ipath_port_immutable;
 
        snprintf(dev->node_desc, sizeof(dev->node_desc),
                 IPATH_IDSTR " %s", init_utsname()->nodename);
index cc64400..c49dd0b 100644 (file)
@@ -2114,6 +2114,27 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
        kfree(ibdev->eq_table);
 }
 
+static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
+                              struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = mlx4_ib_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+
+       if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND)
+               immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+       else
+               immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+
+       return 0;
+}
+
 static void *mlx4_ib_add(struct mlx4_dev *dev)
 {
        struct mlx4_ib_dev *ibdev;
@@ -2241,6 +2262,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
        ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
        ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
+       ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
 
        if (!mlx4_is_slave(ibdev->dev)) {
                ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
index 57c9809..b2fdb9c 100644 (file)
@@ -1182,6 +1182,23 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
        mlx5_ib_dealloc_pd(devr->p0);
 }
 
+static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
+                              struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = mlx5_ib_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+
+       return 0;
+}
+
 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 {
        struct mlx5_ib_dev *dev;
@@ -1285,6 +1302,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
        dev->ib_dev.free_fast_reg_page_list  = mlx5_ib_free_fast_reg_page_list;
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
+       dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
 
        mlx5_ib_internal_query_odp_caps(dev);
 
index 415f8e1..509d59e 100644 (file)
@@ -1244,6 +1244,23 @@ out:
        return err;
 }
 
+static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
+                               struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = mthca_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+
+       return 0;
+}
+
 int mthca_register_device(struct mthca_dev *dev)
 {
        int ret;
@@ -1323,6 +1340,7 @@ int mthca_register_device(struct mthca_dev *dev)
        dev->ib_dev.reg_phys_mr          = mthca_reg_phys_mr;
        dev->ib_dev.reg_user_mr          = mthca_reg_user_mr;
        dev->ib_dev.dereg_mr             = mthca_dereg_mr;
+       dev->ib_dev.get_port_immutable   = mthca_port_immutable;
 
        if (dev->mthca_flags & MTHCA_FLAG_FMR) {
                dev->ib_dev.alloc_fmr            = mthca_alloc_fmr;
index c0d0296..05530e3 100644 (file)
@@ -606,7 +606,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
        return 0;
 }
 
-
 /**
  * nes_query_pkey
  */
@@ -3828,6 +3827,22 @@ static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_
        return 0;
 }
 
+static int nes_port_immutable(struct ib_device *ibdev, u8 port_num,
+                             struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = nes_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+       return 0;
+}
 
 /**
  * nes_init_ofa_device
@@ -3928,6 +3943,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
        nesibdev->ibdev.iwcm->reject = nes_reject;
        nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
        nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
+       nesibdev->ibdev.get_port_immutable   = nes_port_immutable;
 
        return nesibdev;
 }
index 7a2b59a..f552898 100644 (file)
@@ -202,6 +202,23 @@ static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
        return IB_LINK_LAYER_ETHERNET;
 }
 
+static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+                                struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = ocrdma_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+
+       return 0;
+}
+
 static int ocrdma_register_device(struct ocrdma_dev *dev)
 {
        strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
@@ -286,6 +303,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
        dev->ibdev.dma_device = &dev->nic_info.pdev->dev;
 
        dev->ibdev.process_mad = ocrdma_process_mad;
+       dev->ibdev.get_port_immutable = ocrdma_port_immutable;
 
        if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
                dev->ibdev.uverbs_cmd_mask |=
index b8f7853..3cdc81e 100644 (file)
@@ -41,6 +41,9 @@ int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
 int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
                       struct ib_port_modify *props);
 
+enum rdma_protocol_type
+ocrdma_query_protocol(struct ib_device *device, u8 port_num);
+
 void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
 int ocrdma_query_gid(struct ib_device *, u8 port,
                     int index, union ib_gid *gid);
index 4a35998..dba1c92 100644 (file)
@@ -2040,6 +2040,23 @@ static void init_ibport(struct qib_pportdata *ppd)
        RCU_INIT_POINTER(ibp->qp1, NULL);
 }
 
+static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
+                             struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = qib_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+
+       return 0;
+}
+
 /**
  * qib_register_ib_device - register our device with the infiniband core
  * @dd: the device data structure
@@ -2227,6 +2244,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        ibdev->process_mad = qib_process_mad;
        ibdev->mmap = qib_mmap;
        ibdev->dma_ops = &qib_dma_mapping_ops;
+       ibdev->get_port_immutable = qib_port_immutable;
 
        snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
                 "Intel Infiniband HCA %s", init_utsname()->nodename);
index 0d0f986..34c49b8 100644 (file)
@@ -300,6 +300,22 @@ static struct notifier_block usnic_ib_inetaddr_notifier = {
 };
 /* End of inet section*/
 
+static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
+                               struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = usnic_ib_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+
+       return 0;
+}
+
 /* Start of PF discovery section */
 static void *usnic_ib_device_add(struct pci_dev *dev)
 {
@@ -383,6 +399,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
        us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
        us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
        us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
+       us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
 
 
        if (ib_register_device(&us_ibdev->ib_dev, NULL))
index bb864f5..57ddba5 100644 (file)
@@ -27,6 +27,8 @@ int usnic_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props);
 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
                                struct ib_port_attr *props);
+enum rdma_protocol_type
+usnic_ib_query_protocol(struct ib_device *device, u8 port_num);
 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
                                int qp_attr_mask,
                                struct ib_qp_init_attr *qp_init_attr);
index 9e1b203..3421e42 100644 (file)
@@ -1685,9 +1685,7 @@ static void ipoib_add_one(struct ib_device *device)
        struct net_device *dev;
        struct ipoib_dev_priv *priv;
        int s, e, p;
-
-       if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
-               return;
+       int count = 0;
 
        dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
        if (!dev_list)
@@ -1704,15 +1702,21 @@ static void ipoib_add_one(struct ib_device *device)
        }
 
        for (p = s; p <= e; ++p) {
-               if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
+               if (!rdma_protocol_ib(device, p))
                        continue;
                dev = ipoib_add_port("ib%d", device, p);
                if (!IS_ERR(dev)) {
                        priv = netdev_priv(dev);
                        list_add_tail(&priv->list, dev_list);
+                       count++;
                }
        }
 
+       if (!count) {
+               kfree(dev_list);
+               return;
+       }
+
        ib_set_client_data(device, &ipoib_client, dev_list);
 }
 
@@ -1721,9 +1725,6 @@ static void ipoib_remove_one(struct ib_device *device)
        struct ipoib_dev_priv *priv, *tmp;
        struct list_head *dev_list;
 
-       if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
-               return;
-
        dev_list = ib_get_client_data(device, &ipoib_client);
        if (!dev_list)
                return;
index 667df9d..c3f654d 100644 (file)
 #define DRV_RELDATE    "July 1, 2013"
 
 MODULE_AUTHOR("Roland Dreier");
-MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
-                  "v" DRV_VERSION " (" DRV_RELDATE ")");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
 MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_INFO(release_date, DRV_RELDATE);
 
 static unsigned int srp_sg_tablesize;
 static unsigned int cmd_sg_entries;
@@ -466,14 +467,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
  */
 static void srp_destroy_qp(struct srp_rdma_ch *ch)
 {
-       struct srp_target_port *target = ch->target;
        static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
        static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
        struct ib_recv_wr *bad_wr;
        int ret;
 
        /* Destroying a QP and reusing ch->done is only safe if not connected */
-       WARN_ON_ONCE(target->connected);
+       WARN_ON_ONCE(ch->connected);
 
        ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
        WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
@@ -782,7 +782,7 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
                shost_printk(KERN_DEBUG, target->scsi_host,
                             PFX "Topspin/Cisco initiator port ID workaround "
                             "activated for target GUID %016llx\n",
-                            (unsigned long long) be64_to_cpu(target->ioc_guid));
+                            be64_to_cpu(target->ioc_guid));
                memset(req->priv.initiator_port_id, 0, 8);
                memcpy(req->priv.initiator_port_id + 8,
                       &target->srp_host->srp_dev->dev->node_guid, 8);
@@ -812,35 +812,19 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
        return changed;
 }
 
-static bool srp_change_conn_state(struct srp_target_port *target,
-                                 bool connected)
-{
-       bool changed = false;
-
-       spin_lock_irq(&target->lock);
-       if (target->connected != connected) {
-               target->connected = connected;
-               changed = true;
-       }
-       spin_unlock_irq(&target->lock);
-
-       return changed;
-}
-
 static void srp_disconnect_target(struct srp_target_port *target)
 {
        struct srp_rdma_ch *ch;
        int i;
 
-       if (srp_change_conn_state(target, false)) {
-               /* XXX should send SRP_I_LOGOUT request */
+       /* XXX should send SRP_I_LOGOUT request */
 
-               for (i = 0; i < target->ch_count; i++) {
-                       ch = &target->ch[i];
-                       if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
-                               shost_printk(KERN_DEBUG, target->scsi_host,
-                                            PFX "Sending CM DREQ failed\n");
-                       }
+       for (i = 0; i < target->ch_count; i++) {
+               ch = &target->ch[i];
+               ch->connected = false;
+               if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
+                       shost_printk(KERN_DEBUG, target->scsi_host,
+                                    PFX "Sending CM DREQ failed\n");
                }
        }
 }
@@ -853,7 +837,7 @@ static void srp_free_req_data(struct srp_target_port *target,
        struct srp_request *req;
        int i;
 
-       if (!ch->target || !ch->req_ring)
+       if (!ch->req_ring)
                return;
 
        for (i = 0; i < target->req_ring_size; ++i) {
@@ -987,14 +971,26 @@ static void srp_rport_delete(struct srp_rport *rport)
        srp_queue_remove_work(target);
 }
 
+/**
+ * srp_connected_ch() - number of connected channels
+ * @target: SRP target port.
+ */
+static int srp_connected_ch(struct srp_target_port *target)
+{
+       int i, c = 0;
+
+       for (i = 0; i < target->ch_count; i++)
+               c += target->ch[i].connected;
+
+       return c;
+}
+
 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
 {
        struct srp_target_port *target = ch->target;
        int ret;
 
-       WARN_ON_ONCE(!multich && target->connected);
-
-       target->qp_in_error = false;
+       WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
 
        ret = srp_lookup_path(ch);
        if (ret)
@@ -1017,7 +1013,7 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
                 */
                switch (ch->status) {
                case 0:
-                       srp_change_conn_state(target, true);
+                       ch->connected = true;
                        return 0;
 
                case SRP_PORT_REDIRECT:
@@ -1215,14 +1211,10 @@ static int srp_rport_reconnect(struct srp_rport *rport)
         */
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               if (!ch->target)
-                       break;
                ret += srp_new_cm_id(ch);
        }
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               if (!ch->target)
-                       break;
                for (j = 0; j < target->req_ring_size; ++j) {
                        struct srp_request *req = &ch->req_ring[j];
 
@@ -1231,8 +1223,6 @@ static int srp_rport_reconnect(struct srp_rport *rport)
        }
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               if (!ch->target)
-                       break;
                /*
                 * Whether or not creating a new CM ID succeeded, create a new
                 * QP. This guarantees that all completion callback function
@@ -1244,13 +1234,13 @@ static int srp_rport_reconnect(struct srp_rport *rport)
                for (j = 0; j < target->queue_size; ++j)
                        list_add(&ch->tx_ring[j]->list, &ch->free_tx);
        }
+
+       target->qp_in_error = false;
+
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               if (ret || !ch->target) {
-                       if (i > 1)
-                               ret = 0;
+               if (ret)
                        break;
-               }
                ret = srp_connect_ch(ch, multich);
                multich = true;
        }
@@ -1843,7 +1833,7 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch,
        s32 delta = be32_to_cpu(req->req_lim_delta);
 
        shost_printk(KERN_ERR, target->scsi_host, PFX
-                    "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
+                    "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
 
        if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
                shost_printk(KERN_ERR, target->scsi_host, PFX
@@ -1930,7 +1920,7 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
                return;
        }
 
-       if (target->connected && !target->qp_in_error) {
+       if (ch->connected && !target->qp_in_error) {
                if (wr_id & LOCAL_INV_WR_ID_MASK) {
                        shost_printk(KERN_ERR, target->scsi_host, PFX
                                     "LOCAL_INV failed with status %s (%d)\n",
@@ -2036,7 +2026,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
        memset(cmd, 0, sizeof *cmd);
 
        cmd->opcode = SRP_CMD;
-       cmd->lun    = cpu_to_be64((u64) scmnd->device->lun << 48);
+       int_to_scsilun(scmnd->device->lun, &cmd->lun);
        cmd->tag    = tag;
        memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
 
@@ -2369,7 +2359,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
        case IB_CM_DREQ_RECEIVED:
                shost_printk(KERN_WARNING, target->scsi_host,
                             PFX "DREQ received - connection closed\n");
-               srp_change_conn_state(target, false);
+               ch->connected = false;
                if (ib_send_cm_drep(cm_id, NULL, 0))
                        shost_printk(KERN_ERR, target->scsi_host,
                                     PFX "Sending CM DREP failed\n");
@@ -2416,8 +2406,8 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
        return scsi_change_queue_depth(sdev, qdepth);
 }
 
-static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
-                            unsigned int lun, u8 func)
+static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
+                            u8 func)
 {
        struct srp_target_port *target = ch->target;
        struct srp_rport *rport = target->rport;
@@ -2425,7 +2415,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
        struct srp_iu *iu;
        struct srp_tsk_mgmt *tsk_mgmt;
 
-       if (!target->connected || target->qp_in_error)
+       if (!ch->connected || target->qp_in_error)
                return -1;
 
        init_completion(&ch->tsk_mgmt_done);
@@ -2451,7 +2441,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
        memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
        tsk_mgmt->opcode        = SRP_TSK_MGMT;
-       tsk_mgmt->lun           = cpu_to_be64((u64) lun << 48);
+       int_to_scsilun(lun, &tsk_mgmt->lun);
        tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
        tsk_mgmt->tsk_mgmt_func = func;
        tsk_mgmt->task_tag      = req_tag;
@@ -2565,8 +2555,7 @@ static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
 {
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
 
-       return sprintf(buf, "0x%016llx\n",
-                      (unsigned long long) be64_to_cpu(target->id_ext));
+       return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
 }
 
 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
@@ -2574,8 +2563,7 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
 {
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
 
-       return sprintf(buf, "0x%016llx\n",
-                      (unsigned long long) be64_to_cpu(target->ioc_guid));
+       return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
 }
 
 static ssize_t show_service_id(struct device *dev,
@@ -2583,8 +2571,7 @@ static ssize_t show_service_id(struct device *dev,
 {
        struct srp_target_port *target = host_to_target(class_to_shost(dev));
 
-       return sprintf(buf, "0x%016llx\n",
-                      (unsigned long long) be64_to_cpu(target->service_id));
+       return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
 }
 
 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@@ -2775,7 +2762,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
 
        target->state = SRP_TARGET_SCANNING;
        sprintf(target->target_name, "SRP.T10:%016llX",
-                (unsigned long long) be64_to_cpu(target->id_ext));
+               be64_to_cpu(target->id_ext));
 
        if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
                return -ENODEV;
@@ -2799,7 +2786,8 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
        scsi_scan_target(&target->scsi_host->shost_gendev,
                         0, target->scsi_id, SCAN_WILD_CARD, 0);
 
-       if (!target->connected || target->qp_in_error) {
+       if (srp_connected_ch(target) < target->ch_count ||
+           target->qp_in_error) {
                shost_printk(KERN_INFO, target->scsi_host,
                             PFX "SCSI scan failed - removing SCSI host\n");
                srp_queue_remove_work(target);
@@ -3148,7 +3136,7 @@ static ssize_t srp_create_target(struct device *dev,
        target_host->transportt  = ib_srp_transport_template;
        target_host->max_channel = 0;
        target_host->max_id      = 1;
-       target_host->max_lun     = SRP_MAX_LUN;
+       target_host->max_lun     = -1LL;
        target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
 
        target = host_to_target(target_host);
@@ -3174,11 +3162,11 @@ static ssize_t srp_create_target(struct device *dev,
 
        ret = srp_parse_options(buf, target);
        if (ret)
-               goto err;
+               goto out;
 
        ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
        if (ret)
-               goto err;
+               goto out;
 
        target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
 
@@ -3189,7 +3177,7 @@ static ssize_t srp_create_target(struct device *dev,
                             be64_to_cpu(target->ioc_guid),
                             be64_to_cpu(target->initiator_ext));
                ret = -EEXIST;
-               goto err;
+               goto out;
        }
 
        if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
@@ -3210,7 +3198,7 @@ static ssize_t srp_create_target(struct device *dev,
        spin_lock_init(&target->lock);
        ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
        if (ret)
-               goto err;
+               goto out;
 
        ret = -ENOMEM;
        target->ch_count = max_t(unsigned, num_online_nodes(),
@@ -3221,7 +3209,7 @@ static ssize_t srp_create_target(struct device *dev,
        target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
                             GFP_KERNEL);
        if (!target->ch)
-               goto err;
+               goto out;
 
        node_idx = 0;
        for_each_online_node(node) {
@@ -3317,9 +3305,6 @@ err_disconnect:
        }
 
        kfree(target->ch);
-
-err:
-       scsi_host_put(target_host);
        goto out;
 }
 
index a611556..17ee3f8 100644 (file)
@@ -54,7 +54,6 @@ enum {
        SRP_DLID_REDIRECT       = 2,
        SRP_STALE_CONN          = 3,
 
-       SRP_MAX_LUN             = 512,
        SRP_DEF_SG_TABLESIZE    = 12,
 
        SRP_DEFAULT_QUEUE_SIZE  = 1 << 6,
@@ -170,6 +169,7 @@ struct srp_rdma_ch {
 
        struct completion       tsk_mgmt_done;
        u8                      tsk_mgmt_status;
+       bool                    connected;
 };
 
 /**
@@ -214,7 +214,6 @@ struct srp_target_port {
        __be16                  pkey;
 
        u32                     rq_tmo_jiffies;
-       bool                    connected;
 
        int                     zero_req_lim;
 
index acea5d6..6a41c36 100644 (file)
@@ -1053,7 +1053,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
        memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
        srp_cmd->opcode = SRP_CMD;
        memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
-       srp_cmd->lun = cpu_to_be64(((u64)lun) << 48);
+       int_to_scsilun(lun, &srp_cmd->lun);
 
        if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
@@ -1529,7 +1529,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
                /* Set up an abort SRP command */
                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
                tsk_mgmt->opcode = SRP_TSK_MGMT;
-               tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
+               int_to_scsilun(lun, &tsk_mgmt->lun);
                tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
                tsk_mgmt->task_tag = (u64) found_evt;
 
@@ -1652,7 +1652,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
                /* Set up a lun reset SRP command */
                memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
                tsk_mgmt->opcode = SRP_TSK_MGMT;
-               tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48);
+               int_to_scsilun(lun, &tsk_mgmt->lun);
                tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
 
                evt->sync_srp = &srp_rsp;
index ae45bd9..a85292b 100644 (file)
@@ -61,6 +61,11 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
        return dev_to_shost(r->dev.parent);
 }
 
+static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
+{
+       return transport_class_to_srp_rport(&shost->shost_gendev);
+}
+
 /**
  * srp_tmo_valid() - check timeout combination validity
  * @reconnect_delay: Reconnect delay in seconds.
@@ -396,6 +401,36 @@ static void srp_reconnect_work(struct work_struct *work)
        }
 }
 
+/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
+ *
+ * To do: add support for scsi-mq in this function.
+ */
+static int scsi_request_fn_active(struct Scsi_Host *shost)
+{
+       struct scsi_device *sdev;
+       struct request_queue *q;
+       int request_fn_active = 0;
+
+       shost_for_each_device(sdev, shost) {
+               q = sdev->request_queue;
+
+               spin_lock_irq(q->queue_lock);
+               request_fn_active += q->request_fn_active;
+               spin_unlock_irq(q->queue_lock);
+       }
+
+       return request_fn_active;
+}
+
+/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
+static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
+{
+       while (scsi_request_fn_active(shost))
+               msleep(20);
+}
+
 static void __rport_fail_io_fast(struct srp_rport *rport)
 {
        struct Scsi_Host *shost = rport_to_shost(rport);
@@ -409,8 +444,10 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
 
        /* Involve the LLD if possible to terminate all I/O on the rport. */
        i = to_srp_internal(shost->transportt);
-       if (i->f->terminate_rport_io)
+       if (i->f->terminate_rport_io) {
+               srp_wait_for_queuecommand(shost);
                i->f->terminate_rport_io(rport);
+       }
 }
 
 /**
@@ -503,27 +540,6 @@ void srp_start_tl_fail_timers(struct srp_rport *rport)
 }
 EXPORT_SYMBOL(srp_start_tl_fail_timers);
 
-/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
-       struct scsi_device *sdev;
-       struct request_queue *q;
-       int request_fn_active = 0;
-
-       shost_for_each_device(sdev, shost) {
-               q = sdev->request_queue;
-
-               spin_lock_irq(q->queue_lock);
-               request_fn_active += q->request_fn_active;
-               spin_unlock_irq(q->queue_lock);
-       }
-
-       return request_fn_active;
-}
-
 /**
  * srp_reconnect_rport() - reconnect to an SRP target port
  * @rport: SRP target port.
@@ -559,8 +575,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
        if (res)
                goto out;
        scsi_target_block(&shost->shost_gendev);
-       while (scsi_request_fn_active(shost))
-               msleep(20);
+       srp_wait_for_queuecommand(shost);
        res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
        pr_debug("%s (state %d): transport.reconnect() returned %d\n",
                 dev_name(&shost->shost_gendev), rport->state, res);
@@ -618,9 +633,11 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
        struct scsi_device *sdev = scmd->device;
        struct Scsi_Host *shost = sdev->host;
        struct srp_internal *i = to_srp_internal(shost->transportt);
+       struct srp_rport *rport = shost_to_rport(shost);
 
        pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
-       return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
+       return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
+               i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
                BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
 }
 
index 9bb99e9..c0ea51f 100644 (file)
@@ -263,7 +263,7 @@ struct ib_mad_send_buf {
  * ib_response_mad - Returns if the specified MAD has been generated in
  *   response to a sent request or trap.
  */
-int ib_response_mad(struct ib_mad *mad);
+int ib_response_mad(const struct ib_mad_hdr *hdr);
 
 /**
  * ib_get_rmpp_resptime - Returns the RMPP response time.
@@ -675,6 +675,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
  * @agent: the agent in question
  * @return: true if agent is performing rmpp, false otherwise.
  */
-int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent);
+int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
 
 #endif /* IB_MAD_H */
index 672fc8f..ad499bd 100644 (file)
@@ -81,6 +81,13 @@ enum rdma_transport_type {
        RDMA_TRANSPORT_USNIC_UDP
 };
 
+enum rdma_protocol_type {
+       RDMA_PROTOCOL_IB,
+       RDMA_PROTOCOL_IBOE,
+       RDMA_PROTOCOL_IWARP,
+       RDMA_PROTOCOL_USNIC_UDP
+};
+
 __attribute_const__ enum rdma_transport_type
 rdma_node_get_transport(enum rdma_node_type node_type);
 
@@ -346,6 +353,40 @@ union rdma_protocol_stats {
        struct iw_protocol_stats        iw;
 };
 
+/* Define bits for the various functionality this port needs to be supported by
+ * the core.
+ */
+/* Management                           0x00000FFF */
+#define RDMA_CORE_CAP_IB_MAD            0x00000001
+#define RDMA_CORE_CAP_IB_SMI            0x00000002
+#define RDMA_CORE_CAP_IB_CM             0x00000004
+#define RDMA_CORE_CAP_IW_CM             0x00000008
+#define RDMA_CORE_CAP_IB_SA             0x00000010
+
+/* Address format                       0x000FF000 */
+#define RDMA_CORE_CAP_AF_IB             0x00001000
+#define RDMA_CORE_CAP_ETH_AH            0x00002000
+
+/* Protocol                             0xFFF00000 */
+#define RDMA_CORE_CAP_PROT_IB           0x00100000
+#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
+#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
+
+#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
+                                       | RDMA_CORE_CAP_IB_MAD \
+                                       | RDMA_CORE_CAP_IB_SMI \
+                                       | RDMA_CORE_CAP_IB_CM  \
+                                       | RDMA_CORE_CAP_IB_SA  \
+                                       | RDMA_CORE_CAP_AF_IB)
+#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
+                                       | RDMA_CORE_CAP_IB_MAD  \
+                                       | RDMA_CORE_CAP_IB_CM   \
+                                       | RDMA_CORE_CAP_IB_SA   \
+                                       | RDMA_CORE_CAP_AF_IB   \
+                                       | RDMA_CORE_CAP_ETH_AH)
+#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
+                                       | RDMA_CORE_CAP_IW_CM)
+
 struct ib_port_attr {
        enum ib_port_state      state;
        enum ib_mtu             max_mtu;
@@ -1478,6 +1519,12 @@ struct ib_dma_mapping_ops {
 
 struct iw_cm_verbs;
 
+struct ib_port_immutable {
+       int                           pkey_tbl_len;
+       int                           gid_tbl_len;
+       u32                           core_cap_flags;
+};
+
 struct ib_device {
        struct device                *dma_device;
 
@@ -1491,8 +1538,10 @@ struct ib_device {
        struct list_head              client_data_list;
 
        struct ib_cache               cache;
-       int                          *pkey_tbl_len;
-       int                          *gid_tbl_len;
+       /**
+        * port_immutable is indexed by port number
+        */
+       struct ib_port_immutable     *port_immutable;
 
        int                           num_comp_vectors;
 
@@ -1679,6 +1728,14 @@ struct ib_device {
        u32                          local_dma_lkey;
        u8                           node_type;
        u8                           phys_port_cnt;
+
+       /**
+        * The following mandatory functions are used only at device
+        * registration.  Keep functions such as these at the end of this
+        * structure to avoid cache line misses when accessing struct ib_device
+        * in fast paths.
+        */
+       int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
 };
 
 struct ib_client {
@@ -1747,6 +1804,242 @@ int ib_query_port(struct ib_device *device,
 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
                                               u8 port_num);
 
+/**
+ * rdma_start_port - Return the first valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return start port number
+ */
+static inline u8 rdma_start_port(const struct ib_device *device)
+{
+       return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+}
+
+/**
+ * rdma_end_port - Return the last valid port number for the device
+ * specified
+ *
+ * @device: Device to be checked
+ *
+ * Return last port number
+ */
+static inline u8 rdma_end_port(const struct ib_device *device)
+{
+       return (device->node_type == RDMA_NODE_IB_SWITCH) ?
+               0 : device->phys_port_cnt;
+}
+
+static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
+}
+
+static inline bool rdma_protocol_roce(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
+}
+
+static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
+}
+
+static inline bool rdma_ib_or_roce(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags &
+               (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
+}
+
+/**
+ * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
+ * Management Datagrams.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Management Datagrams (MAD) are a required part of the InfiniBand
+ * specification and are supported on all InfiniBand devices.  A slightly
+ * extended version are also supported on OPA interfaces.
+ *
+ * Return: true if the port supports sending/receiving of MAD packets.
+ */
+static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
+}
+
+/**
+ * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
+ * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Each InfiniBand node is required to provide a Subnet Management Agent
+ * that the subnet manager can access.  Prior to the fabric being fully
+ * configured by the subnet manager, the SMA is accessed via a well known
+ * interface called the Subnet Management Interface (SMI).  This interface
+ * uses directed route packets to communicate with the SM to get around the
+ * chicken and egg problem of the SM needing to know what's on the fabric
+ * in order to configure the fabric, and needing to configure the fabric in
+ * order to send packets to the devices on the fabric.  These directed
+ * route packets do not need the fabric fully configured in order to reach
+ * their destination.  The SMI is the only method allowed to send
+ * directed route packets on an InfiniBand fabric.
+ *
+ * Return: true if the port provides an SMI.
+ */
+static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
+}
+
+/**
+ * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * The InfiniBand Communication Manager is one of many pre-defined General
+ * Service Agents (GSA) that are accessed via the General Service
+ * Interface (GSI).  It's role is to facilitate establishment of connections
+ * between nodes as well as other management related tasks for established
+ * connections.
+ *
+ * Return: true if the port supports an IB CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
+}
+
+/**
+ * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
+ * Communication Manager.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Similar to above, but specific to iWARP connections which have a different
+ * managment protocol than InfiniBand.
+ *
+ * Return: true if the port supports an iWARP CM (this does not guarantee that
+ * a CM is actually running however).
+ */
+static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
+}
+
+/**
+ * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
+ * Subnet Administration.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * An InfiniBand Subnet Administration (SA) service is a pre-defined General
+ * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
+ * fabrics, devices should resolve routes to other hosts by contacting the
+ * SA to query the proper route.
+ *
+ * Return: true if the port should act as a client to the fabric Subnet
+ * Administration interface.  This does not imply that the SA service is
+ * running locally.
+ */
+static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
+}
+
+/**
+ * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
+ * Multicast.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand multicast registration is more complex than normal IPv4 or
+ * IPv6 multicast registration.  Each Host Channel Adapter must register
+ * with the Subnet Manager when it wishes to join a multicast group.  It
+ * should do so only once regardless of how many queue pairs it subscribes
+ * to this group.  And it should leave the group only after all queue pairs
+ * attached to the group have been detached.
+ *
+ * Return: true if the port must undertake the additional adminstrative
+ * overhead of registering/unregistering with the SM and tracking of the
+ * total number of queue pairs attached to the multicast group.
+ */
+static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num)
+{
+       return rdma_cap_ib_sa(device, port_num);
+}
+
+/**
+ * rdma_cap_af_ib - Check if the port of device has the capability
+ * Native Infiniband Address.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
+ * GID.  RoCE uses a different mechanism, but still generates a GID via
+ * a prescribed mechanism and port specific data.
+ *
+ * Return: true if the port uses a GID address to identify devices on the
+ * network.
+ */
+static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
+}
+
+/**
+ * rdma_cap_eth_ah - Check if the port of device has the capability
+ * Ethernet Address Handle.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
+ * to fabricate GIDs over Ethernet/IP specific addresses native to the
+ * port.  Normally, packet headers are generated by the sending host
+ * adapter, but when sending connectionless datagrams, we must manually
+ * inject the proper headers for the fabric we are communicating over.
+ *
+ * Return: true if we are running as a RoCE port and must force the
+ * addition of a Global Route Header built from our Ethernet Address
+ * Handle into our header list for connectionless packets.
+ */
+static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num)
+{
+       return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
+}
+
+/**
+ * rdma_cap_read_multi_sge - Check if the port of device has the capability
+ * RDMA Read Multiple Scatter-Gather Entries.
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * iWARP has a restriction that RDMA READ requests may only have a single
+ * Scatter/Gather Entry (SGE) in the work request.
+ *
+ * NOTE: although the linux kernel currently assumes all devices are either
+ * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
+ * WRITEs, according to Tom Talpey, this is not accurate.  There are some
+ * devices out there that support more than a single SGE on RDMA READ
+ * requests, but do not support the same number of SGEs as they do on
+ * RDMA WRITE requests.  The linux kernel would need rearchitecting to
+ * support these imbalanced READ/WRITE SGEs allowed devices.  So, for now,
+ * suffice with either the device supports the same READ/WRITE SGEs, or
+ * it only gets one READ sge.
+ *
+ * Return: true for any device that allows more than one SGE in RDMA READ
+ * requests.
+ */
+static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
+                                          u8 port_num)
+{
+       return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
+}
+
 int ib_query_gid(struct ib_device *device,
                 u8 port_num, int index, union ib_gid *gid);
 
index 1ae84db..5be834d 100644 (file)
@@ -42,6 +42,7 @@
  */
 
 #include <linux/types.h>
+#include <scsi/scsi.h>
 
 enum {
        SRP_LOGIN_REQ   = 0x00,
@@ -179,7 +180,7 @@ struct srp_tsk_mgmt {
        u8      reserved1[6];
        u64     tag;
        u8      reserved2[4];
-       __be64  lun __attribute__((packed));
+       struct scsi_lun lun;
        u8      reserved3[2];
        u8      tsk_mgmt_func;
        u8      reserved4;
@@ -200,7 +201,7 @@ struct srp_cmd {
        u8      data_in_desc_cnt;
        u64     tag;
        u8      reserved2[4];
-       __be64  lun __attribute__((packed));
+       struct scsi_lun lun;
        u8      reserved3;
        u8      task_attr;
        u8      reserved4;
@@ -265,7 +266,7 @@ struct srp_aer_req {
        __be32  req_lim_delta;
        u64     tag;
        u32     reserved2;
-       __be64  lun;
+       struct scsi_lun lun;
        __be32  sense_data_len;
        u32     reserved3;
        u8      sense_data[0];
index f9f13a3..86b4416 100644 (file)
@@ -117,8 +117,8 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
 
 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
 {
-       if (rdma_node_get_transport(xprt->sc_cm_id->device->node_type) ==
-            RDMA_TRANSPORT_IWARP)
+       if (!rdma_cap_read_multi_sge(xprt->sc_cm_id->device,
+                                    xprt->sc_cm_id->port_num))
                return 1;
        else
                return min_t(int, sge_count, xprt->sc_max_sge);
index 13ee04f..88eb994 100644 (file)
@@ -858,7 +858,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        struct ib_qp_init_attr qp_attr;
        struct ib_device_attr devattr;
        int uninitialized_var(dma_mr_acc);
-       int need_dma_mr;
+       int need_dma_mr = 0;
        int ret;
        int i;
 
@@ -992,35 +992,26 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        /*
         * Determine if a DMA MR is required and if so, what privs are required
         */
-       switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
-       case RDMA_TRANSPORT_IWARP:
-               newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
-               if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc =
-                               (IB_ACCESS_LOCAL_WRITE |
-                                IB_ACCESS_REMOTE_WRITE);
-               } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
-               } else
-                       need_dma_mr = 0;
-               break;
-       case RDMA_TRANSPORT_IB:
-               if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
-               } else if (!(devattr.device_cap_flags &
-                            IB_DEVICE_LOCAL_DMA_LKEY)) {
-                       need_dma_mr = 1;
-                       dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
-               } else
-                       need_dma_mr = 0;
-               break;
-       default:
+       if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+                                newxprt->sc_cm_id->port_num) &&
+           !rdma_ib_or_roce(newxprt->sc_cm_id->device,
+                            newxprt->sc_cm_id->port_num))
                goto errout;
+
+       if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
+           !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
+               need_dma_mr = 1;
+               dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
+               if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+                                       newxprt->sc_cm_id->port_num) &&
+                   !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
+                       dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
        }
 
+       if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
+                               newxprt->sc_cm_id->port_num))
+               newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
+
        /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
        if (need_dma_mr) {
                /* Register all of physical memory */