Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Jul 2008 15:16:48 +0000 (08:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Jul 2008 15:16:48 +0000 (08:16 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6:
  Revert "SELinux: allow fstype unknown to policy to use xattrs if present"

126 files changed:
drivers/infiniband/core/addr.c
drivers/infiniband/core/agent.h
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/fmr_pool.c
drivers/infiniband/core/mad_priv.h
drivers/infiniband/core/mad_rmpp.c
drivers/infiniband/core/mad_rmpp.h
drivers/infiniband/core/packer.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ud_header.c
drivers/infiniband/core/umem.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/amso1100/c2_rnic.c
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/cxgb3/cxio_wr.h
drivers/infiniband/hw/cxgb3/iwch.c
drivers/infiniband/hw/cxgb3/iwch.h
drivers/infiniband/hw/cxgb3/iwch_cq.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.h
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_reqs.c
drivers/infiniband/hw/ehca/hcp_if.c
drivers/infiniband/hw/ehca/hcp_if.h
drivers/infiniband/hw/ipath/ipath_cq.c
drivers/infiniband/hw/ipath/ipath_iba7220.c
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/ipath/ipath_rc.c
drivers/infiniband/hw/ipath/ipath_ruc.c
drivers/infiniband/hw/ipath/ipath_uc.c
drivers/infiniband/hw/ipath/ipath_ud.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mthca/mthca_allocator.c
drivers/infiniband/hw/mthca/mthca_av.c
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.h
drivers/infiniband/hw/mthca/mthca_config_reg.h
drivers/infiniband/hw/mthca/mthca_cq.c
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_doorbell.h
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_mad.c
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_mcg.c
drivers/infiniband/hw/mthca/mthca_memfree.c
drivers/infiniband/hw/mthca/mthca_memfree.h
drivers/infiniband/hw/mthca/mthca_mr.c
drivers/infiniband/hw/mthca/mthca_pd.c
drivers/infiniband/hw/mthca/mthca_profile.c
drivers/infiniband/hw/mthca/mthca_profile.h
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.h
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_reset.c
drivers/infiniband/hw/mthca/mthca_srq.c
drivers/infiniband/hw/mthca/mthca_uar.c
drivers/infiniband/hw/mthca/mthca_user.h
drivers/infiniband/hw/mthca/mthca_wqe.h
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_utils.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/ulp/ipoib/Kconfig
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/ipoib/ipoib_fs.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.h
drivers/net/cxgb3/cxgb3_ctl_defs.h
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb3/version.h
drivers/net/mlx4/fw.c
drivers/net/mlx4/fw.h
drivers/net/mlx4/main.c
drivers/net/mlx4/mcg.c
firmware/Makefile
include/asm-x86/dwarf2.h
include/linux/mlx4/device.h
include/rdma/ib_addr.h
include/rdma/ib_cache.h
include/rdma/ib_cm.h
include/rdma/ib_fmr_pool.h
include/rdma/ib_mad.h
include/rdma/ib_pack.h
include/rdma/ib_sa.h
include/rdma/ib_smi.h
include/rdma/ib_user_cm.h
include/rdma/ib_user_mad.h
include/rdma/ib_user_verbs.h
include/rdma/ib_verbs.h
include/rdma/iw_cm.h
include/rdma/rdma_cm.h
include/rdma/rdma_cm_ib.h

index 781ea59..09a2bec 100644 (file)
@@ -4,28 +4,33 @@
  * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
  *
- * This Software is licensed under one of the following licenses:
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * 1) under the terms of the "Common Public License 1.0" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/cpl.php.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
  *
- * 2) under the terms of the "The BSD License" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/bsd-license.php.
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
  *
- * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
- *    copy of which is available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/gpl-license.php.
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
- * Licensee has the right to choose one of the above licenses.
- *
- * Redistributions of source code must retain the above copyright
- * notice and one of the license notices.
- *
- * Redistributions in binary form must reproduce both the above copyright
- * notice, one of the license notices in the documentation
- * and/or other materials provided with the distribution.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/mutex.h>
@@ -100,6 +105,7 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
        memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
        if (dst_dev_addr)
                memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
+       dev_addr->src_dev = dev;
        return 0;
 }
 EXPORT_SYMBOL(rdma_copy_addr);
index fb9ed14..6669287 100644 (file)
@@ -32,8 +32,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: agent.h 1389 2004-12-27 22:56:47Z roland $
  */
 
 #ifndef __AGENT_H_
index e85f701..6888356 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/module.h>
index a47fe64..55738ee 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
  */
 
 #include <linux/completion.h>
index 671f137..ae11d5c 100644 (file)
@@ -4,29 +4,33 @@
  * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
  *
- * This Software is licensed under one of the following licenses:
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * 1) under the terms of the "Common Public License 1.0" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/cpl.php.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
  *
- * 2) under the terms of the "The BSD License" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/bsd-license.php.
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
  *
- * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
- *    copy of which is available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/gpl-license.php.
- *
- * Licensee has the right to choose one of the above licenses.
- *
- * Redistributions of source code must retain the above copyright
- * notice and one of the license notices.
- *
- * Redistributions in binary form must reproduce both the above copyright
- * notice, one of the license notices in the documentation
- * and/or other materials provided with the distribution.
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/completion.h>
@@ -126,8 +130,7 @@ struct rdma_id_private {
 
        struct completion       comp;
        atomic_t                refcount;
-       wait_queue_head_t       wait_remove;
-       atomic_t                dev_remove;
+       struct mutex            handler_mutex;
 
        int                     backlog;
        int                     timeout_ms;
@@ -351,26 +354,15 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
                complete(&id_priv->comp);
 }
 
-static int cma_disable_remove(struct rdma_id_private *id_priv,
+static int cma_disable_callback(struct rdma_id_private *id_priv,
                              enum cma_state state)
 {
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&id_priv->lock, flags);
-       if (id_priv->state == state) {
-               atomic_inc(&id_priv->dev_remove);
-               ret = 0;
-       } else
-               ret = -EINVAL;
-       spin_unlock_irqrestore(&id_priv->lock, flags);
-       return ret;
-}
-
-static void cma_enable_remove(struct rdma_id_private *id_priv)
-{
-       if (atomic_dec_and_test(&id_priv->dev_remove))
-               wake_up(&id_priv->wait_remove);
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != state) {
+               mutex_unlock(&id_priv->handler_mutex);
+               return -EINVAL;
+       }
+       return 0;
 }
 
 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
@@ -395,8 +387,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
        mutex_init(&id_priv->qp_mutex);
        init_completion(&id_priv->comp);
        atomic_set(&id_priv->refcount, 1);
-       init_waitqueue_head(&id_priv->wait_remove);
-       atomic_set(&id_priv->dev_remove, 0);
+       mutex_init(&id_priv->handler_mutex);
        INIT_LIST_HEAD(&id_priv->listen_list);
        INIT_LIST_HEAD(&id_priv->mc_list);
        get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@@ -923,7 +914,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        struct rdma_cm_event event;
        int ret = 0;
 
-       if (cma_disable_remove(id_priv, CMA_CONNECT))
+       if (cma_disable_callback(id_priv, CMA_CONNECT))
                return 0;
 
        memset(&event, 0, sizeof event);
@@ -970,7 +961,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
                break;
        default:
-               printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
+               printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
                       ib_event->event);
                goto out;
        }
@@ -980,12 +971,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                /* Destroy the CM ID by returning a non-zero value. */
                id_priv->cm_id.ib = NULL;
                cma_exch(id_priv, CMA_DESTROYING);
-               cma_enable_remove(id_priv);
+               mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return ret;
        }
 out:
-       cma_enable_remove(id_priv);
+       mutex_unlock(&id_priv->handler_mutex);
        return ret;
 }
 
@@ -998,6 +989,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
        union cma_ip_addr *src, *dst;
        __be16 port;
        u8 ip_ver;
+       int ret;
 
        if (cma_get_net_info(ib_event->private_data, listen_id->ps,
                             &ip_ver, &port, &src, &dst))
@@ -1022,10 +1014,11 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
        if (rt->num_paths == 2)
                rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
 
-       ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
        ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
-       ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
-       rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
+       ret = rdma_translate_ip(&id->route.addr.src_addr,
+                               &id->route.addr.dev_addr);
+       if (ret)
+               goto destroy_id;
 
        id_priv = container_of(id, struct rdma_id_private, id);
        id_priv->state = CMA_CONNECT;
@@ -1095,7 +1088,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        int offset, ret;
 
        listen_id = cm_id->context;
-       if (cma_disable_remove(listen_id, CMA_LISTEN))
+       if (cma_disable_callback(listen_id, CMA_LISTEN))
                return -ECONNABORTED;
 
        memset(&event, 0, sizeof event);
@@ -1116,7 +1109,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                goto out;
        }
 
-       atomic_inc(&conn_id->dev_remove);
+       mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
        mutex_lock(&lock);
        ret = cma_acquire_dev(conn_id);
        mutex_unlock(&lock);
@@ -1138,7 +1131,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                    !cma_is_ud_ps(conn_id->id.ps))
                        ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
                mutex_unlock(&lock);
-               cma_enable_remove(conn_id);
+               mutex_unlock(&conn_id->handler_mutex);
                goto out;
        }
 
@@ -1147,11 +1140,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 
 release_conn_id:
        cma_exch(conn_id, CMA_DESTROYING);
-       cma_enable_remove(conn_id);
+       mutex_unlock(&conn_id->handler_mutex);
        rdma_destroy_id(&conn_id->id);
 
 out:
-       cma_enable_remove(listen_id);
+       mutex_unlock(&listen_id->handler_mutex);
        return ret;
 }
 
@@ -1217,7 +1210,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        struct sockaddr_in *sin;
        int ret = 0;
 
-       if (cma_disable_remove(id_priv, CMA_CONNECT))
+       if (cma_disable_callback(id_priv, CMA_CONNECT))
                return 0;
 
        memset(&event, 0, sizeof event);
@@ -1261,12 +1254,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
                /* Destroy the CM ID by returning a non-zero value. */
                id_priv->cm_id.iw = NULL;
                cma_exch(id_priv, CMA_DESTROYING);
-               cma_enable_remove(id_priv);
+               mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return ret;
        }
 
-       cma_enable_remove(id_priv);
+       mutex_unlock(&id_priv->handler_mutex);
        return ret;
 }
 
@@ -1282,7 +1275,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        struct ib_device_attr attr;
 
        listen_id = cm_id->context;
-       if (cma_disable_remove(listen_id, CMA_LISTEN))
+       if (cma_disable_callback(listen_id, CMA_LISTEN))
                return -ECONNABORTED;
 
        /* Create a new RDMA id for the new IW CM ID */
@@ -1294,19 +1287,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
                goto out;
        }
        conn_id = container_of(new_cm_id, struct rdma_id_private, id);
-       atomic_inc(&conn_id->dev_remove);
+       mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
        conn_id->state = CMA_CONNECT;
 
        dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
        if (!dev) {
                ret = -EADDRNOTAVAIL;
-               cma_enable_remove(conn_id);
+               mutex_unlock(&conn_id->handler_mutex);
                rdma_destroy_id(new_cm_id);
                goto out;
        }
        ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
        if (ret) {
-               cma_enable_remove(conn_id);
+               mutex_unlock(&conn_id->handler_mutex);
                rdma_destroy_id(new_cm_id);
                goto out;
        }
@@ -1315,7 +1308,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        ret = cma_acquire_dev(conn_id);
        mutex_unlock(&lock);
        if (ret) {
-               cma_enable_remove(conn_id);
+               mutex_unlock(&conn_id->handler_mutex);
                rdma_destroy_id(new_cm_id);
                goto out;
        }
@@ -1331,7 +1324,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
 
        ret = ib_query_device(conn_id->id.device, &attr);
        if (ret) {
-               cma_enable_remove(conn_id);
+               mutex_unlock(&conn_id->handler_mutex);
                rdma_destroy_id(new_cm_id);
                goto out;
        }
@@ -1347,14 +1340,17 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
                /* User wants to destroy the CM ID */
                conn_id->cm_id.iw = NULL;
                cma_exch(conn_id, CMA_DESTROYING);
-               cma_enable_remove(conn_id);
+               mutex_unlock(&conn_id->handler_mutex);
                rdma_destroy_id(&conn_id->id);
+               goto out;
        }
 
+       mutex_unlock(&conn_id->handler_mutex);
+
 out:
        if (dev)
                dev_put(dev);
-       cma_enable_remove(listen_id);
+       mutex_unlock(&listen_id->handler_mutex);
        return ret;
 }
 
@@ -1446,7 +1442,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        ret = rdma_listen(id, id_priv->backlog);
        if (ret)
                printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
-                      "listening on device %s", ret, cma_dev->device->name);
+                      "listening on device %s\n", ret, cma_dev->device->name);
 }
 
 static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -1586,7 +1582,7 @@ static void cma_work_handler(struct work_struct *_work)
        struct rdma_id_private *id_priv = work->id;
        int destroy = 0;
 
-       atomic_inc(&id_priv->dev_remove);
+       mutex_lock(&id_priv->handler_mutex);
        if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
                goto out;
 
@@ -1595,7 +1591,7 @@ static void cma_work_handler(struct work_struct *_work)
                destroy = 1;
        }
 out:
-       cma_enable_remove(id_priv);
+       mutex_unlock(&id_priv->handler_mutex);
        cma_deref_id(id_priv);
        if (destroy)
                rdma_destroy_id(&id_priv->id);
@@ -1758,7 +1754,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
        struct rdma_cm_event event;
 
        memset(&event, 0, sizeof event);
-       atomic_inc(&id_priv->dev_remove);
+       mutex_lock(&id_priv->handler_mutex);
 
        /*
         * Grab mutex to block rdma_destroy_id() from removing the device while
@@ -1787,13 +1783,13 @@ static void addr_handler(int status, struct sockaddr *src_addr,
 
        if (id_priv->id.event_handler(&id_priv->id, &event)) {
                cma_exch(id_priv, CMA_DESTROYING);
-               cma_enable_remove(id_priv);
+               mutex_unlock(&id_priv->handler_mutex);
                cma_deref_id(id_priv);
                rdma_destroy_id(&id_priv->id);
                return;
        }
 out:
-       cma_enable_remove(id_priv);
+       mutex_unlock(&id_priv->handler_mutex);
        cma_deref_id(id_priv);
 }
 
@@ -2120,7 +2116,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
        struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
        int ret = 0;
 
-       if (cma_disable_remove(id_priv, CMA_CONNECT))
+       if (cma_disable_callback(id_priv, CMA_CONNECT))
                return 0;
 
        memset(&event, 0, sizeof event);
@@ -2151,7 +2147,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
                event.status = 0;
                break;
        default:
-               printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
+               printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
                       ib_event->event);
                goto out;
        }
@@ -2161,12 +2157,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
                /* Destroy the CM ID by returning a non-zero value. */
                id_priv->cm_id.ib = NULL;
                cma_exch(id_priv, CMA_DESTROYING);
-               cma_enable_remove(id_priv);
+               mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return ret;
        }
 out:
-       cma_enable_remove(id_priv);
+       mutex_unlock(&id_priv->handler_mutex);
        return ret;
 }
 
@@ -2564,8 +2560,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
        int ret;
 
        id_priv = mc->id_priv;
-       if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
-           cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
+       if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
+           cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
                return 0;
 
        mutex_lock(&id_priv->qp_mutex);
@@ -2590,12 +2586,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
        ret = id_priv->id.event_handler(&id_priv->id, &event);
        if (ret) {
                cma_exch(id_priv, CMA_DESTROYING);
-               cma_enable_remove(id_priv);
+               mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return 0;
        }
 
-       cma_enable_remove(id_priv);
+       mutex_unlock(&id_priv->handler_mutex);
        return 0;
 }
 
@@ -2754,6 +2750,7 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
 {
        struct rdma_cm_event event;
        enum cma_state state;
+       int ret = 0;
 
        /* Record that we want to remove the device */
        state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
@@ -2761,15 +2758,18 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
                return 0;
 
        cma_cancel_operation(id_priv, state);
-       wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove));
+       mutex_lock(&id_priv->handler_mutex);
 
        /* Check for destruction from another callback. */
        if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
-               return 0;
+               goto out;
 
        memset(&event, 0, sizeof event);
        event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
-       return id_priv->id.event_handler(&id_priv->id, &event);
+       ret = id_priv->id.event_handler(&id_priv->id, &event);
+out:
+       mutex_unlock(&id_priv->handler_mutex);
+       return ret;
 }
 
 static void cma_process_remove(struct cma_device *cma_dev)
index 7ad47a4..05ac36e 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: core_priv.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef _CORE_PRIV_H
index 5ac5ffe..7913b80 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: device.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/module.h>
index 1286dc1..4507043 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: fmr_pool.c 2730 2005-06-28 16:43:03Z sean.hefty $
  */
 
 #include <linux/errno.h>
index 8b75010..05ce331 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mad_priv.h 5596 2006-03-03 01:00:07Z sean.hefty $
  */
 
 #ifndef __IB_MAD_PRIV_H__
index a5e2a31..d0ef7d6 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
  */
 
 #include "mad_priv.h"
index f0616fd..3d336bf 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mad_rmpp.h 1921 2005-02-25 22:58:44Z sean.hefty $
  */
 
 #ifndef __MAD_RMPP_H__
index c972d72..019bd4b 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/string.h>
index cf474ec..1341de7 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
  */
 
 #include <linux/module.h>
@@ -361,7 +359,7 @@ static void update_sm_ah(struct work_struct *work)
 {
        struct ib_sa_port *port =
                container_of(work, struct ib_sa_port, update_task);
-       struct ib_sa_sm_ah *new_ah, *old_ah;
+       struct ib_sa_sm_ah *new_ah;
        struct ib_port_attr port_attr;
        struct ib_ah_attr   ah_attr;
 
@@ -397,12 +395,9 @@ static void update_sm_ah(struct work_struct *work)
        }
 
        spin_lock_irq(&port->ah_lock);
-       old_ah = port->sm_ah;
        port->sm_ah = new_ah;
        spin_unlock_irq(&port->ah_lock);
 
-       if (old_ah)
-               kref_put(&old_ah->ref, free_sm_ah);
 }
 
 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
@@ -413,8 +408,17 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
            event->event == IB_EVENT_PKEY_CHANGE ||
            event->event == IB_EVENT_SM_CHANGE   ||
            event->event == IB_EVENT_CLIENT_REREGISTER) {
-               struct ib_sa_device *sa_dev;
-               sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
+               unsigned long flags;
+               struct ib_sa_device *sa_dev =
+                       container_of(handler, typeof(*sa_dev), event_handler);
+               struct ib_sa_port *port =
+                       &sa_dev->port[event->element.port_num - sa_dev->start_port];
+
+               spin_lock_irqsave(&port->ah_lock, flags);
+               if (port->sm_ah)
+                       kref_put(&port->sm_ah->ref, free_sm_ah);
+               port->sm_ah = NULL;
+               spin_unlock_irqrestore(&port->ah_lock, flags);
 
                schedule_work(&sa_dev->port[event->element.port_num -
                                            sa_dev->start_port].update_task);
@@ -519,6 +523,10 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
        unsigned long flags;
 
        spin_lock_irqsave(&query->port->ah_lock, flags);
+       if (!query->port->sm_ah) {
+               spin_unlock_irqrestore(&query->port->ah_lock, flags);
+               return -EAGAIN;
+       }
        kref_get(&query->port->sm_ah->ref);
        query->sm_ah = query->port->sm_ah;
        spin_unlock_irqrestore(&query->port->ah_lock, flags);
index 9575655..4d10421 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: sysfs.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include "core_priv.h"
@@ -665,6 +663,120 @@ static struct class ib_class = {
        .dev_uevent = ib_device_uevent,
 };
 
+/* Show a given an attribute in the statistics group */
+static ssize_t show_protocol_stat(const struct device *device,
+                           struct device_attribute *attr, char *buf,
+                           unsigned offset)
+{
+       struct ib_device *dev = container_of(device, struct ib_device, dev);
+       union rdma_protocol_stats stats;
+       ssize_t ret;
+
+       ret = dev->get_protocol_stats(dev, &stats);
+       if (ret)
+               return ret;
+
+       return sprintf(buf, "%llu\n",
+                      (unsigned long long) ((u64 *) &stats)[offset]);
+}
+
+/* generate a read-only iwarp statistics attribute */
+#define IW_STATS_ENTRY(name)                                           \
+static ssize_t show_##name(struct device *device,                      \
+                          struct device_attribute *attr, char *buf)    \
+{                                                                      \
+       return show_protocol_stat(device, attr, buf,                    \
+                                 offsetof(struct iw_protocol_stats, name) / \
+                                 sizeof (u64));                        \
+}                                                                      \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+IW_STATS_ENTRY(ipInReceives);
+IW_STATS_ENTRY(ipInHdrErrors);
+IW_STATS_ENTRY(ipInTooBigErrors);
+IW_STATS_ENTRY(ipInNoRoutes);
+IW_STATS_ENTRY(ipInAddrErrors);
+IW_STATS_ENTRY(ipInUnknownProtos);
+IW_STATS_ENTRY(ipInTruncatedPkts);
+IW_STATS_ENTRY(ipInDiscards);
+IW_STATS_ENTRY(ipInDelivers);
+IW_STATS_ENTRY(ipOutForwDatagrams);
+IW_STATS_ENTRY(ipOutRequests);
+IW_STATS_ENTRY(ipOutDiscards);
+IW_STATS_ENTRY(ipOutNoRoutes);
+IW_STATS_ENTRY(ipReasmTimeout);
+IW_STATS_ENTRY(ipReasmReqds);
+IW_STATS_ENTRY(ipReasmOKs);
+IW_STATS_ENTRY(ipReasmFails);
+IW_STATS_ENTRY(ipFragOKs);
+IW_STATS_ENTRY(ipFragFails);
+IW_STATS_ENTRY(ipFragCreates);
+IW_STATS_ENTRY(ipInMcastPkts);
+IW_STATS_ENTRY(ipOutMcastPkts);
+IW_STATS_ENTRY(ipInBcastPkts);
+IW_STATS_ENTRY(ipOutBcastPkts);
+IW_STATS_ENTRY(tcpRtoAlgorithm);
+IW_STATS_ENTRY(tcpRtoMin);
+IW_STATS_ENTRY(tcpRtoMax);
+IW_STATS_ENTRY(tcpMaxConn);
+IW_STATS_ENTRY(tcpActiveOpens);
+IW_STATS_ENTRY(tcpPassiveOpens);
+IW_STATS_ENTRY(tcpAttemptFails);
+IW_STATS_ENTRY(tcpEstabResets);
+IW_STATS_ENTRY(tcpCurrEstab);
+IW_STATS_ENTRY(tcpInSegs);
+IW_STATS_ENTRY(tcpOutSegs);
+IW_STATS_ENTRY(tcpRetransSegs);
+IW_STATS_ENTRY(tcpInErrs);
+IW_STATS_ENTRY(tcpOutRsts);
+
+static struct attribute *iw_proto_stats_attrs[] = {
+       &dev_attr_ipInReceives.attr,
+       &dev_attr_ipInHdrErrors.attr,
+       &dev_attr_ipInTooBigErrors.attr,
+       &dev_attr_ipInNoRoutes.attr,
+       &dev_attr_ipInAddrErrors.attr,
+       &dev_attr_ipInUnknownProtos.attr,
+       &dev_attr_ipInTruncatedPkts.attr,
+       &dev_attr_ipInDiscards.attr,
+       &dev_attr_ipInDelivers.attr,
+       &dev_attr_ipOutForwDatagrams.attr,
+       &dev_attr_ipOutRequests.attr,
+       &dev_attr_ipOutDiscards.attr,
+       &dev_attr_ipOutNoRoutes.attr,
+       &dev_attr_ipReasmTimeout.attr,
+       &dev_attr_ipReasmReqds.attr,
+       &dev_attr_ipReasmOKs.attr,
+       &dev_attr_ipReasmFails.attr,
+       &dev_attr_ipFragOKs.attr,
+       &dev_attr_ipFragFails.attr,
+       &dev_attr_ipFragCreates.attr,
+       &dev_attr_ipInMcastPkts.attr,
+       &dev_attr_ipOutMcastPkts.attr,
+       &dev_attr_ipInBcastPkts.attr,
+       &dev_attr_ipOutBcastPkts.attr,
+       &dev_attr_tcpRtoAlgorithm.attr,
+       &dev_attr_tcpRtoMin.attr,
+       &dev_attr_tcpRtoMax.attr,
+       &dev_attr_tcpMaxConn.attr,
+       &dev_attr_tcpActiveOpens.attr,
+       &dev_attr_tcpPassiveOpens.attr,
+       &dev_attr_tcpAttemptFails.attr,
+       &dev_attr_tcpEstabResets.attr,
+       &dev_attr_tcpCurrEstab.attr,
+       &dev_attr_tcpInSegs.attr,
+       &dev_attr_tcpOutSegs.attr,
+       &dev_attr_tcpRetransSegs.attr,
+       &dev_attr_tcpInErrs.attr,
+       &dev_attr_tcpOutRsts.attr,
+       NULL
+};
+
+static struct attribute_group iw_stats_group = {
+       .name   = "proto_stats",
+       .attrs  = iw_proto_stats_attrs,
+};
+
 int ib_device_register_sysfs(struct ib_device *device)
 {
        struct device *class_dev = &device->dev;
@@ -707,6 +819,12 @@ int ib_device_register_sysfs(struct ib_device *device)
                }
        }
 
+       if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) {
+               ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group);
+               if (ret)
+                       goto err_put;
+       }
+
        return 0;
 
 err_put:
index b25675f..9494005 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ucm.c 4311 2005-12-05 18:42:01Z sean.hefty $
  */
 
 #include <linux/completion.h>
index 997c07d..8ec7876 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ud_header.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/errno.h>
index a1768db..6f7c096 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: uverbs_mem.c 2743 2005-06-28 22:27:59Z roland $
  */
 
 #include <linux/mm.h>
index 208c7f3..268a2d2 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: user_mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
  */
 
 #include <linux/module.h>
index 376a57c..b3ea958 100644 (file)
@@ -32,8 +32,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: uverbs.h 2559 2005-06-06 19:43:16Z roland $
  */
 
 #ifndef UVERBS_H
index 2c3bff5..56feab6 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
  */
 
 #include <linux/file.h>
@@ -919,7 +917,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
                resp->wc[i].opcode         = wc[i].opcode;
                resp->wc[i].vendor_err     = wc[i].vendor_err;
                resp->wc[i].byte_len       = wc[i].byte_len;
-               resp->wc[i].imm_data       = (__u32 __force) wc[i].imm_data;
+               resp->wc[i].ex.imm_data    = (__u32 __force) wc[i].ex.imm_data;
                resp->wc[i].qp_num         = wc[i].qp->qp_num;
                resp->wc[i].src_qp         = wc[i].src_qp;
                resp->wc[i].wc_flags       = wc[i].wc_flags;
index 0f34858..aeee856 100644 (file)
@@ -32,8 +32,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: uverbs_main.c 2733 2005-06-28 19:14:34Z roland $
  */
 
 #include <linux/module.h>
index 0504208..a7da9be 100644 (file)
@@ -34,8 +34,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/errno.h>
@@ -317,7 +315,6 @@ static const struct {
 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
        [IB_QPS_RESET] = {
                [IB_QPS_RESET] = { .valid = 1 },
-               [IB_QPS_ERR]   = { .valid = 1 },
                [IB_QPS_INIT]  = {
                        .valid = 1,
                        .req_param = {
@@ -755,6 +752,52 @@ int ib_dereg_mr(struct ib_mr *mr)
 }
 EXPORT_SYMBOL(ib_dereg_mr);
 
+struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+{
+       struct ib_mr *mr;
+
+       if (!pd->device->alloc_fast_reg_mr)
+               return ERR_PTR(-ENOSYS);
+
+       mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
+
+       if (!IS_ERR(mr)) {
+               mr->device  = pd->device;
+               mr->pd      = pd;
+               mr->uobject = NULL;
+               atomic_inc(&pd->usecnt);
+               atomic_set(&mr->usecnt, 0);
+       }
+
+       return mr;
+}
+EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
+
+struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
+                                                         int max_page_list_len)
+{
+       struct ib_fast_reg_page_list *page_list;
+
+       if (!device->alloc_fast_reg_page_list)
+               return ERR_PTR(-ENOSYS);
+
+       page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
+
+       if (!IS_ERR(page_list)) {
+               page_list->device = device;
+               page_list->max_page_list_len = max_page_list_len;
+       }
+
+       return page_list;
+}
+EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
+
+void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
+{
+       page_list->device->free_fast_reg_page_list(page_list);
+}
+EXPORT_SYMBOL(ib_free_fast_reg_page_list);
+
 /* Memory windows */
 
 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
index b1441ae..dd05c48 100644 (file)
@@ -454,7 +454,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
            (IB_DEVICE_RESIZE_MAX_WR |
             IB_DEVICE_CURR_QP_STATE_MOD |
             IB_DEVICE_SYS_IMAGE_GUID |
-            IB_DEVICE_ZERO_STAG |
+            IB_DEVICE_LOCAL_DMA_LKEY |
             IB_DEVICE_MEM_WINDOW);
 
        /* Allocate the qptr_array */
index 3f441fc..f6d5747 100644 (file)
@@ -145,7 +145,9 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
        }
        wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
        memset(wqe, 0, sizeof(*wqe));
-       build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 0, qpid, 7);
+       build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
+                      T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
+                      T3_SOPEOP);
        wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
        sge_cmd = qpid << 8 | 3;
        wqe->sge_cmd = cpu_to_be64(sge_cmd);
@@ -276,7 +278,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
        if (!wq->qpid)
                return -ENOMEM;
 
-       wq->rq = kzalloc(depth * sizeof(u64), GFP_KERNEL);
+       wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL);
        if (!wq->rq)
                goto err1;
 
@@ -300,6 +302,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
        if (!kernel_domain)
                wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
                                        (wq->qpid << rdev_p->qpshift);
+       wq->rdev = rdev_p;
        PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
             wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
        return 0;
@@ -558,7 +561,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
        wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
        memset(wqe, 0, sizeof(*wqe));
        build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
-                      T3_CTL_QP_TID, 7);
+                      T3_CTL_QP_TID, 7, T3_SOPEOP);
        wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
        sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
        wqe->sge_cmd = cpu_to_be64(sge_cmd);
@@ -674,7 +677,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
                build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
                               Q_GENBIT(rdev_p->ctrl_qp.wptr,
                                        T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
-                              wr_len);
+                              wr_len, T3_SOPEOP);
                if (flag == T3_COMPLETION_FLAG)
                        ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
                len -= 96;
@@ -816,6 +819,13 @@ int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
                             0, 0);
 }
 
+int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
+{
+       *stag = T3_STAG_UNSET;
+       return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
+                            0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
+}
+
 int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
 {
        struct t3_rdma_init_wr *wqe;
@@ -1257,13 +1267,16 @@ proc_cqe:
                wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
                PDBG("%s completing sq idx %ld\n", __func__,
                     Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
-               *cookie = (wq->sq +
-                          Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
+               *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
                wq->sq_rptr++;
        } else {
                PDBG("%s completing rq idx %ld\n", __func__,
                     Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
-               *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
+               *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
+               if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
+                       cxio_hal_pblpool_free(wq->rdev,
+                               wq->rq[Q_PTR2IDX(wq->rq_rptr,
+                               wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
                wq->rq_rptr++;
        }
 
index 6e128f6..656fe47 100644 (file)
 #define T3_CTRL_QP_SIZE_LOG2  8
 #define T3_CTRL_CQ_ID    0
 
-/* TBD */
 #define T3_MAX_NUM_RI (1<<15)
 #define T3_MAX_NUM_QP (1<<15)
 #define T3_MAX_NUM_CQ (1<<15)
 #define T3_MAX_NUM_PD (1<<15)
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
+#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
+#define T3_MAX_CQ_DEPTH 8192
 #define T3_MAX_NUM_STAG (1<<15)
 #define T3_MAX_MR_SIZE 0x100000000ULL
+#define T3_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
 
 #define T3_STAG_UNSET 0xffffffff
 
@@ -165,6 +167,7 @@ int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
 int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
                   u32 pbl_addr);
 int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
+int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
 int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
 int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
 void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
index f1a25a8..04618f7 100644 (file)
@@ -39,6 +39,9 @@
 
 #define T3_MAX_SGE      4
 #define T3_MAX_INLINE  64
+#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
+#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
+#define T3_STAG0_PAGE_SHIFT 15
 
 #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
 #define Q_FULL(rptr,wptr,size_log2)  ( (((wptr)-(rptr))>>(size_log2)) && \
@@ -72,7 +75,8 @@ enum t3_wr_opcode {
        T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
        T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
        T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
-       T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
+       T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
+       T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
 } __attribute__ ((packed));
 
 enum t3_rdma_opcode {
@@ -89,7 +93,8 @@ enum t3_rdma_opcode {
        T3_FAST_REGISTER,
        T3_LOCAL_INV,
        T3_QP_MOD,
-       T3_BYPASS
+       T3_BYPASS,
+       T3_RDMA_READ_REQ_WITH_INV,
 } __attribute__ ((packed));
 
 static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
@@ -103,6 +108,7 @@ static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
                case T3_WR_BIND: return T3_BIND_MW;
                case T3_WR_INIT: return T3_RDMA_INIT;
                case T3_WR_QP_MOD: return T3_QP_MOD;
+               case T3_WR_FASTREG: return T3_FAST_REGISTER;
                default: break;
        }
        return -1;
@@ -170,11 +176,54 @@ struct t3_send_wr {
        struct t3_sge sgl[T3_MAX_SGE];  /* 4+ */
 };
 
+#define T3_MAX_FASTREG_DEPTH 24
+#define T3_MAX_FASTREG_FRAG 10
+
+struct t3_fastreg_wr {
+       struct fw_riwrh wrh;    /* 0 */
+       union t3_wrid wrid;     /* 1 */
+       __be32 stag;            /* 2 */
+       __be32 len;
+       __be32 va_base_hi;      /* 3 */
+       __be32 va_base_lo_fbo;
+       __be32 page_type_perms; /* 4 */
+       __be32 reserved1;
+       __be64 pbl_addrs[0];    /* 5+ */
+};
+
+/*
+ * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
+ */
+struct t3_pbl_frag {
+       struct fw_riwrh wrh;    /* 0 */
+       __be64 pbl_addrs[14];   /* 1..14 */
+};
+
+#define S_FR_PAGE_COUNT                24
+#define M_FR_PAGE_COUNT                0xff
+#define V_FR_PAGE_COUNT(x)     ((x) << S_FR_PAGE_COUNT)
+#define G_FR_PAGE_COUNT(x)     ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
+
+#define S_FR_PAGE_SIZE         16
+#define M_FR_PAGE_SIZE         0x1f
+#define V_FR_PAGE_SIZE(x)      ((x) << S_FR_PAGE_SIZE)
+#define G_FR_PAGE_SIZE(x)      ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
+
+#define S_FR_TYPE              8
+#define M_FR_TYPE              0x1
+#define V_FR_TYPE(x)           ((x) << S_FR_TYPE)
+#define G_FR_TYPE(x)           ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
+
+#define S_FR_PERMS             0
+#define M_FR_PERMS             0xff
+#define V_FR_PERMS(x)          ((x) << S_FR_PERMS)
+#define G_FR_PERMS(x)          ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
+
 struct t3_local_inv_wr {
        struct fw_riwrh wrh;    /* 0 */
        union t3_wrid wrid;     /* 1 */
        __be32 stag;            /* 2 */
-       __be32 reserved3;
+       __be32 reserved;
 };
 
 struct t3_rdma_write_wr {
@@ -193,7 +242,8 @@ struct t3_rdma_read_wr {
        struct fw_riwrh wrh;    /* 0 */
        union t3_wrid wrid;     /* 1 */
        u8 rdmaop;              /* 2 */
-       u8 reserved[3];
+       u8 local_inv;
+       u8 reserved[2];
        __be32 rem_stag;
        __be64 rem_to;          /* 3 */
        __be32 local_stag;      /* 4 */
@@ -201,18 +251,6 @@ struct t3_rdma_read_wr {
        __be64 local_to;        /* 5 */
 };
 
-enum t3_addr_type {
-       T3_VA_BASED_TO = 0x0,
-       T3_ZERO_BASED_TO = 0x1
-} __attribute__ ((packed));
-
-enum t3_mem_perms {
-       T3_MEM_ACCESS_LOCAL_READ = 0x1,
-       T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
-       T3_MEM_ACCESS_REM_READ = 0x4,
-       T3_MEM_ACCESS_REM_WRITE = 0x8
-} __attribute__ ((packed));
-
 struct t3_bind_mw_wr {
        struct fw_riwrh wrh;    /* 0 */
        union t3_wrid wrid;     /* 1 */
@@ -336,6 +374,11 @@ struct t3_genbit {
        __be64 genbit;
 };
 
+struct t3_wq_in_err {
+       u64 flit[13];
+       u64 err;
+};
+
 enum rdma_init_wr_flags {
        MPA_INITIATOR = (1<<0),
        PRIV_QP = (1<<1),
@@ -346,13 +389,16 @@ union t3_wr {
        struct t3_rdma_write_wr write;
        struct t3_rdma_read_wr read;
        struct t3_receive_wr recv;
+       struct t3_fastreg_wr fastreg;
+       struct t3_pbl_frag pbl_frag;
        struct t3_local_inv_wr local_inv;
        struct t3_bind_mw_wr bind;
        struct t3_bypass_wr bypass;
        struct t3_rdma_init_wr init;
        struct t3_modify_qp_wr qp_mod;
        struct t3_genbit genbit;
-       u64 flit[16];
+       struct t3_wq_in_err wq_in_err;
+       __be64 flit[16];
 };
 
 #define T3_SQ_CQE_FLIT   13
@@ -366,12 +412,18 @@ static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
        return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
 }
 
+enum t3_wr_hdr_bits {
+       T3_EOP = 1,
+       T3_SOP = 2,
+       T3_SOPEOP = T3_EOP|T3_SOP,
+};
+
 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
                                  enum t3_wr_flags flags, u8 genbit, u32 tid,
-                                 u8 len)
+                                 u8 len, u8 sopeop)
 {
        wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
-                                        V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
+                                        V_FW_RIWR_SOPEOP(sopeop) |
                                         V_FW_RIWR_FLAGS(flags));
        wmb();
        wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
@@ -404,6 +456,7 @@ enum tpt_addr_type {
 };
 
 enum tpt_mem_perm {
+       TPT_MW_BIND = 0x10,
        TPT_LOCAL_READ = 0x8,
        TPT_LOCAL_WRITE = 0x4,
        TPT_REMOTE_READ = 0x2,
@@ -615,6 +668,11 @@ struct t3_swsq {
        int                     signaled;
 };
 
+struct t3_swrq {
+       __u64                   wr_id;
+       __u32                   pbl_addr;
+};
+
 /*
  * A T3 WQ implements both the SQ and RQ.
  */
@@ -631,14 +689,15 @@ struct t3_wq {
        u32 sq_wptr;                    /* sq_wptr - sq_rptr == count of */
        u32 sq_rptr;                    /* pending wrs */
        u32 sq_size_log2;               /* sq size */
-       u64 *rq;                        /* SW RQ (holds consumer wr_ids */
+       struct t3_swrq *rq;             /* SW RQ (holds consumer wr_ids */
        u32 rq_wptr;                    /* rq_wptr - rq_rptr == count of */
        u32 rq_rptr;                    /* pending wrs */
-       u64 *rq_oldest_wr;              /* oldest wr on the SW RQ */
+       struct t3_swrq *rq_oldest_wr;   /* oldest wr on the SW RQ */
        u32 rq_size_log2;               /* rq size */
        u32 rq_addr;                    /* rq adapter address */
        void __iomem *doorbell;         /* kernel db */
        u64 udb;                        /* user db if any */
+       struct cxio_rdev *rdev;
 };
 
 struct t3_cq {
@@ -659,7 +718,7 @@ struct t3_cq {
 
 static inline void cxio_set_wq_in_error(struct t3_wq *wq)
 {
-       wq->queue->flit[13] = 1;
+       wq->queue->wq_in_err.err = 1;
 }
 
 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
index 71554ea..4489c89 100644 (file)
@@ -71,18 +71,16 @@ static void rnic_init(struct iwch_dev *rnicp)
        idr_init(&rnicp->mmidr);
        spin_lock_init(&rnicp->lock);
 
-       rnicp->attr.vendor_id = 0x168;
-       rnicp->attr.vendor_part_id = 7;
        rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
-       rnicp->attr.max_wrs = (1UL << 24) - 1;
+       rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
        rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
        rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
        rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
-       rnicp->attr.max_cqes_per_cq = (1UL << 24) - 1;
+       rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
        rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
        rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
        rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
-       rnicp->attr.mem_pgsizes_bitmask = 0x7FFF;       /* 4KB-128MB */
+       rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
        rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
        rnicp->attr.can_resize_wq = 0;
        rnicp->attr.max_rdma_reads_per_qp = 8;
index d2409a5..3773453 100644 (file)
@@ -48,8 +48,6 @@ struct iwch_qp;
 struct iwch_mr;
 
 struct iwch_rnic_attributes {
-       u32 vendor_id;
-       u32 vendor_part_id;
        u32 max_qps;
        u32 max_wrs;                            /* Max for any SQ/RQ */
        u32 max_sge_per_wr;
index 4ee8ccd..cf5474a 100644 (file)
@@ -81,6 +81,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
        wc->wr_id = cookie;
        wc->qp = &qhp->ibqp;
        wc->vendor_err = CQE_STATUS(cqe);
+       wc->wc_flags = 0;
 
        PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
             "lo 0x%x cookie 0x%llx\n", __func__,
@@ -94,6 +95,11 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
                else
                        wc->byte_len = 0;
                wc->opcode = IB_WC_RECV;
+               if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
+                   CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
+                       wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
+                       wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+               }
        } else {
                switch (CQE_OPCODE(cqe)) {
                case T3_RDMA_WRITE:
@@ -105,17 +111,20 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
                        break;
                case T3_SEND:
                case T3_SEND_WITH_SE:
+               case T3_SEND_WITH_INV:
+               case T3_SEND_WITH_SE_INV:
                        wc->opcode = IB_WC_SEND;
                        break;
                case T3_BIND_MW:
                        wc->opcode = IB_WC_BIND_MW;
                        break;
 
-               /* these aren't supported yet */
-               case T3_SEND_WITH_INV:
-               case T3_SEND_WITH_SE_INV:
                case T3_LOCAL_INV:
+                       wc->opcode = IB_WC_LOCAL_INV;
+                       break;
                case T3_FAST_REGISTER:
+                       wc->opcode = IB_WC_FAST_REG_MR;
+                       break;
                default:
                        printk(KERN_ERR MOD "Unexpected opcode %d "
                               "in the CQE received for QPID=0x%0x\n",
index 95f82cf..b89640a 100644 (file)
@@ -56,6 +56,7 @@
 #include "iwch_provider.h"
 #include "iwch_cm.h"
 #include "iwch_user.h"
+#include "common.h"
 
 static int iwch_modify_port(struct ib_device *ibdev,
                            u8 port, int port_modify_mask,
@@ -747,6 +748,7 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
        mhp->attr.type = TPT_MW;
        mhp->attr.stag = stag;
        mmid = (stag) >> 8;
+       mhp->ibmw.rkey = stag;
        insert_handle(rhp, &rhp->mmidr, mhp, mmid);
        PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
        return &(mhp->ibmw);
@@ -768,6 +770,68 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
        return 0;
 }
 
+static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+{
+       struct iwch_dev *rhp;
+       struct iwch_pd *php;
+       struct iwch_mr *mhp;
+       u32 mmid;
+       u32 stag = 0;
+       int ret;
+
+       php = to_iwch_pd(pd);
+       rhp = php->rhp;
+       mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+       if (!mhp)
+               return ERR_PTR(-ENOMEM);
+
+       mhp->rhp = rhp;
+       ret = iwch_alloc_pbl(mhp, pbl_depth);
+       if (ret) {
+               kfree(mhp);
+               return ERR_PTR(ret);
+       }
+       mhp->attr.pbl_size = pbl_depth;
+       ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
+                                mhp->attr.pbl_size, mhp->attr.pbl_addr);
+       if (ret) {
+               iwch_free_pbl(mhp);
+               kfree(mhp);
+               return ERR_PTR(ret);
+       }
+       mhp->attr.pdid = php->pdid;
+       mhp->attr.type = TPT_NON_SHARED_MR;
+       mhp->attr.stag = stag;
+       mhp->attr.state = 1;
+       mmid = (stag) >> 8;
+       mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+       insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+       PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+       return &(mhp->ibmr);
+}
+
+static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
+                                       struct ib_device *device,
+                                       int page_list_len)
+{
+       struct ib_fast_reg_page_list *page_list;
+
+       page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64),
+                           GFP_KERNEL);
+       if (!page_list)
+               return ERR_PTR(-ENOMEM);
+
+       page_list->page_list = (u64 *)(page_list + 1);
+       page_list->max_page_list_len = page_list_len;
+
+       return page_list;
+}
+
+static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list)
+{
+       kfree(page_list);
+}
+
 static int iwch_destroy_qp(struct ib_qp *ib_qp)
 {
        struct iwch_dev *rhp;
@@ -843,6 +907,15 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
         */
        sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
        wqsize = roundup_pow_of_two(rqsize + sqsize);
+
+       /*
+        * Kernel users need more wq space for fastreg WRs which can take
+        * 2 WR fragments.
+        */
+       ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
+       if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
+               wqsize = roundup_pow_of_two(rqsize +
+                               roundup_pow_of_two(attrs->cap.max_send_wr * 2));
        PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
             wqsize, sqsize, rqsize);
        qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
@@ -851,7 +924,6 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
        qhp->wq.size_log2 = ilog2(wqsize);
        qhp->wq.rq_size_log2 = ilog2(rqsize);
        qhp->wq.sq_size_log2 = ilog2(sqsize);
-       ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
        if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
                           ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
                kfree(qhp);
@@ -935,10 +1007,10 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
        qhp->ibqp.qp_num = qhp->wq.qpid;
        init_timer(&(qhp->timer));
        PDBG("%s sq_num_entries %d, rq_num_entries %d "
-            "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n",
+            "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
             __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
             qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
-            1 << qhp->wq.size_log2);
+            1 << qhp->wq.size_log2, qhp->wq.rq_addr);
        return &qhp->ibqp;
 }
 
@@ -1023,6 +1095,29 @@ static int iwch_query_gid(struct ib_device *ibdev, u8 port,
        return 0;
 }
 
+static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
+{
+       struct ethtool_drvinfo info;
+       struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
+       char *cp, *next;
+       unsigned fw_maj, fw_min, fw_mic;
+
+       rtnl_lock();
+       lldev->ethtool_ops->get_drvinfo(lldev, &info);
+       rtnl_unlock();
+
+       next = info.fw_version + 1;
+       cp = strsep(&next, ".");
+       sscanf(cp, "%i", &fw_maj);
+       cp = strsep(&next, ".");
+       sscanf(cp, "%i", &fw_min);
+       cp = strsep(&next, ".");
+       sscanf(cp, "%i", &fw_mic);
+
+       return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
+              (fw_mic & 0xffff);
+}
+
 static int iwch_query_device(struct ib_device *ibdev,
                             struct ib_device_attr *props)
 {
@@ -1033,7 +1128,10 @@ static int iwch_query_device(struct ib_device *ibdev,
        dev = to_iwch_dev(ibdev);
        memset(props, 0, sizeof *props);
        memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
+       props->hw_ver = dev->rdev.t3cdev_p->type;
+       props->fw_ver = fw_vers_string_to_u64(dev);
        props->device_cap_flags = dev->device_cap_flags;
+       props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
        props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
        props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
        props->max_mr_size = dev->attr.max_mr_size;
@@ -1048,6 +1146,7 @@ static int iwch_query_device(struct ib_device *ibdev,
        props->max_mr = dev->attr.max_mem_regs;
        props->max_pd = dev->attr.max_pds;
        props->local_ca_ack_delay = 0;
+       props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
 
        return 0;
 }
@@ -1088,6 +1187,28 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
 }
 
+static int fw_supports_fastreg(struct iwch_dev *iwch_dev)
+{
+       struct ethtool_drvinfo info;
+       struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
+       char *cp, *next;
+       unsigned fw_maj, fw_min;
+
+       rtnl_lock();
+       lldev->ethtool_ops->get_drvinfo(lldev, &info);
+       rtnl_unlock();
+
+       next = info.fw_version+1;
+       cp = strsep(&next, ".");
+       sscanf(cp, "%i", &fw_maj);
+       cp = strsep(&next, ".");
+       sscanf(cp, "%i", &fw_min);
+
+       PDBG("%s maj %u min %u\n", __func__, fw_maj, fw_min);
+
+       return fw_maj > 6 || (fw_maj == 6 && fw_min > 0);
+}
+
 static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
@@ -1127,6 +1248,61 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
                       iwch_dev->rdev.rnic_info.pdev->device);
 }
 
+static int iwch_get_mib(struct ib_device *ibdev,
+                       union rdma_protocol_stats *stats)
+{
+       struct iwch_dev *dev;
+       struct tp_mib_stats m;
+       int ret;
+
+       PDBG("%s ibdev %p\n", __func__, ibdev);
+       dev = to_iwch_dev(ibdev);
+       ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
+       if (ret)
+               return -ENOSYS;
+
+       memset(stats, 0, sizeof *stats);
+       stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
+                               m.ipInReceive_lo;
+       stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
+                                 m.ipInHdrErrors_lo;
+       stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
+                                  m.ipInAddrErrors_lo;
+       stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
+                                     m.ipInUnknownProtos_lo;
+       stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
+                                m.ipInDiscards_lo;
+       stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
+                                m.ipInDelivers_lo;
+       stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
+                                 m.ipOutRequests_lo;
+       stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
+                                 m.ipOutDiscards_lo;
+       stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
+                                 m.ipOutNoRoutes_lo;
+       stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
+       stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
+       stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
+       stats->iw.ipReasmFails = (u64) m.ipReasmFails;
+       stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
+       stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
+       stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
+       stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
+       stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
+       stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
+       stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
+                             m.tcpInSegs_lo;
+       stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
+                              m.tcpOutSegs_lo;
+       stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
+                                 m.tcpRetransSeg_lo;
+       stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
+                             m.tcpInErrs_lo;
+       stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
+       stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
+       return 0;
+}
+
 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
@@ -1136,7 +1312,7 @@ static struct device_attribute *iwch_class_attributes[] = {
        &dev_attr_hw_rev,
        &dev_attr_fw_ver,
        &dev_attr_hca_type,
-       &dev_attr_board_id
+       &dev_attr_board_id,
 };
 
 int iwch_register_device(struct iwch_dev *dev)
@@ -1149,8 +1325,12 @@ int iwch_register_device(struct iwch_dev *dev)
        memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
        memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
        dev->ibdev.owner = THIS_MODULE;
-       dev->device_cap_flags =
-           (IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW);
+       dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
+
+       /* cxgb3 supports STag 0. */
+       dev->ibdev.local_dma_lkey = 0;
+       if (fw_supports_fastreg(dev))
+               dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
 
        dev->ibdev.uverbs_cmd_mask =
            (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1202,15 +1382,16 @@ int iwch_register_device(struct iwch_dev *dev)
        dev->ibdev.alloc_mw = iwch_alloc_mw;
        dev->ibdev.bind_mw = iwch_bind_mw;
        dev->ibdev.dealloc_mw = iwch_dealloc_mw;
-
+       dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
+       dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
+       dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
        dev->ibdev.attach_mcast = iwch_multicast_attach;
        dev->ibdev.detach_mcast = iwch_multicast_detach;
        dev->ibdev.process_mad = iwch_process_mad;
-
        dev->ibdev.req_notify_cq = iwch_arm_cq;
        dev->ibdev.post_send = iwch_post_send;
        dev->ibdev.post_recv = iwch_post_receive;
-
+       dev->ibdev.get_protocol_stats = iwch_get_mib;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index 836163f..f5ceca0 100644 (file)
@@ -296,14 +296,6 @@ static inline u32 iwch_ib_to_tpt_access(int acc)
               TPT_LOCAL_READ;
 }
 
-static inline u32 iwch_ib_to_mwbind_access(int acc)
-{
-       return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
-              (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
-              (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
-              T3_MEM_ACCESS_LOCAL_READ;
-}
-
 enum iwch_mmid_state {
        IWCH_STAG_STATE_VALID,
        IWCH_STAG_STATE_INVALID
index 9926137..9a3be3a 100644 (file)
 #include "iwch.h"
 #include "iwch_cm.h"
 #include "cxio_hal.h"
+#include "cxio_resource.h"
 
 #define NO_SUPPORT -1
 
-static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
+static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
                                u8 * flit_cnt)
 {
        int i;
@@ -44,59 +45,44 @@ static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
 
        switch (wr->opcode) {
        case IB_WR_SEND:
-       case IB_WR_SEND_WITH_IMM:
                if (wr->send_flags & IB_SEND_SOLICITED)
                        wqe->send.rdmaop = T3_SEND_WITH_SE;
                else
                        wqe->send.rdmaop = T3_SEND;
                wqe->send.rem_stag = 0;
                break;
-#if 0                          /* Not currently supported */
-       case TYPE_SEND_INVALIDATE:
-       case TYPE_SEND_INVALIDATE_IMMEDIATE:
-               wqe->send.rdmaop = T3_SEND_WITH_INV;
-               wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
-               break;
-       case TYPE_SEND_SE_INVALIDATE:
-               wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
-               wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
+       case IB_WR_SEND_WITH_INV:
+               if (wr->send_flags & IB_SEND_SOLICITED)
+                       wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
+               else
+                       wqe->send.rdmaop = T3_SEND_WITH_INV;
+               wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
                break;
-#endif
        default:
-               break;
+               return -EINVAL;
        }
        if (wr->num_sge > T3_MAX_SGE)
                return -EINVAL;
        wqe->send.reserved[0] = 0;
        wqe->send.reserved[1] = 0;
        wqe->send.reserved[2] = 0;
-       if (wr->opcode == IB_WR_SEND_WITH_IMM) {
-               plen = 4;
-               wqe->send.sgl[0].stag = wr->ex.imm_data;
-               wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
-               wqe->send.num_sgle = __constant_cpu_to_be32(0);
-               *flit_cnt = 5;
-       } else {
-               plen = 0;
-               for (i = 0; i < wr->num_sge; i++) {
-                       if ((plen + wr->sg_list[i].length) < plen) {
-                               return -EMSGSIZE;
-                       }
-                       plen += wr->sg_list[i].length;
-                       wqe->send.sgl[i].stag =
-                           cpu_to_be32(wr->sg_list[i].lkey);
-                       wqe->send.sgl[i].len =
-                           cpu_to_be32(wr->sg_list[i].length);
-                       wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
-               }
-               wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
-               *flit_cnt = 4 + ((wr->num_sge) << 1);
+       plen = 0;
+       for (i = 0; i < wr->num_sge; i++) {
+               if ((plen + wr->sg_list[i].length) < plen)
+                       return -EMSGSIZE;
+
+               plen += wr->sg_list[i].length;
+               wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
+               wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
+               wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
        }
+       wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
+       *flit_cnt = 4 + ((wr->num_sge) << 1);
        wqe->send.plen = cpu_to_be32(plen);
        return 0;
 }
 
-static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
+static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
                                 u8 *flit_cnt)
 {
        int i;
@@ -137,15 +123,18 @@ static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
-static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
+static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
                                u8 *flit_cnt)
 {
        if (wr->num_sge > 1)
                return -EINVAL;
        wqe->read.rdmaop = T3_READ_REQ;
+       if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+               wqe->read.local_inv = 1;
+       else
+               wqe->read.local_inv = 0;
        wqe->read.reserved[0] = 0;
        wqe->read.reserved[1] = 0;
-       wqe->read.reserved[2] = 0;
        wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
        wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
        wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
@@ -155,6 +144,57 @@ static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
+static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
+                               u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
+{
+       int i;
+       __be64 *p;
+
+       if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
+               return -EINVAL;
+       *wr_cnt = 1;
+       wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
+       wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
+       wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
+       wqe->fastreg.va_base_lo_fbo =
+                               cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
+       wqe->fastreg.page_type_perms = cpu_to_be32(
+               V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
+               V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
+               V_FR_TYPE(TPT_VATO) |
+               V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
+       p = &wqe->fastreg.pbl_addrs[0];
+       for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
+
+               /* If we need a 2nd WR, then set it up */
+               if (i == T3_MAX_FASTREG_FRAG) {
+                       *wr_cnt = 2;
+                       wqe = (union t3_wr *)(wq->queue +
+                               Q_PTR2IDX((wq->wptr+1), wq->size_log2));
+                       build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
+                              Q_GENBIT(wq->wptr + 1, wq->size_log2),
+                              0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
+                              T3_EOP);
+
+                       p = &wqe->pbl_frag.pbl_addrs[0];
+               }
+               *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
+       }
+       *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
+       if (*flit_cnt > 15)
+               *flit_cnt = 15;
+       return 0;
+}
+
+static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
+                               u8 *flit_cnt)
+{
+       wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
+       wqe->local_inv.reserved = 0;
+       *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
+       return 0;
+}
+
 /*
  * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
  */
@@ -205,23 +245,106 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
        return 0;
 }
 
-static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
+static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
                                struct ib_recv_wr *wr)
 {
-       int i;
-       if (wr->num_sge > T3_MAX_SGE)
-               return -EINVAL;
+       int i, err = 0;
+       u32 pbl_addr[T3_MAX_SGE];
+       u8 page_size[T3_MAX_SGE];
+
+       err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
+                              page_size);
+       if (err)
+               return err;
+       wqe->recv.pagesz[0] = page_size[0];
+       wqe->recv.pagesz[1] = page_size[1];
+       wqe->recv.pagesz[2] = page_size[2];
+       wqe->recv.pagesz[3] = page_size[3];
        wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
        for (i = 0; i < wr->num_sge; i++) {
                wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
                wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
+
+               /* to in the WQE == the offset into the page */
+               wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
+                               (1UL << (12 + page_size[i])));
+
+               /* pbl_addr is the adapters address in the PBL */
+               wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
+       }
+       for (; i < T3_MAX_SGE; i++) {
+               wqe->recv.sgl[i].stag = 0;
+               wqe->recv.sgl[i].len = 0;
+               wqe->recv.sgl[i].to = 0;
+               wqe->recv.pbl_addr[i] = 0;
+       }
+       qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
+                            qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
+       qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
+                            qhp->wq.rq_size_log2)].pbl_addr = 0;
+       return 0;
+}
+
+static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
+                               struct ib_recv_wr *wr)
+{
+       int i;
+       u32 pbl_addr;
+       u32 pbl_offset;
+
+
+       /*
+        * The T3 HW requires the PBL in the HW recv descriptor to reference
+        * a PBL entry.  So we allocate the max needed PBL memory here and pass
+        * it to the uP in the recv WR.  The uP will build the PBL and setup
+        * the HW recv descriptor.
+        */
+       pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
+       if (!pbl_addr)
+               return -ENOMEM;
+
+       /*
+        * Compute the 8B aligned offset.
+        */
+       pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
+
+       wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
+
+       for (i = 0; i < wr->num_sge; i++) {
+
+               /*
+                * Use a 128MB page size. This and an imposed 128MB
+                * sge length limit allows us to require only a 2-entry HW
+                * PBL for each SGE.  This restriction is acceptable since
+                * since it is not possible to allocate 128MB of contiguous
+                * DMA coherent memory!
+                */
+               if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
+                       return -EINVAL;
+               wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
+
+               /*
+                * T3 restricts a recv to all zero-stag or all non-zero-stag.
+                */
+               if (wr->sg_list[i].lkey != 0)
+                       return -EINVAL;
+               wqe->recv.sgl[i].stag = 0;
+               wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
                wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
+               wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
+               pbl_offset += 2;
        }
        for (; i < T3_MAX_SGE; i++) {
+               wqe->recv.pagesz[i] = 0;
                wqe->recv.sgl[i].stag = 0;
                wqe->recv.sgl[i].len = 0;
                wqe->recv.sgl[i].to = 0;
+               wqe->recv.pbl_addr[i] = 0;
        }
+       qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
+                            qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
+       qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
+                            qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
        return 0;
 }
 
@@ -238,6 +361,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        u32 num_wrs;
        unsigned long flag;
        struct t3_swsq *sqp;
+       int wr_cnt = 1;
 
        qhp = to_iwch_qp(ibqp);
        spin_lock_irqsave(&qhp->lock, flag);
@@ -262,33 +386,45 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                t3_wr_flags = 0;
                if (wr->send_flags & IB_SEND_SOLICITED)
                        t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
-               if (wr->send_flags & IB_SEND_FENCE)
-                       t3_wr_flags |= T3_READ_FENCE_FLAG;
                if (wr->send_flags & IB_SEND_SIGNALED)
                        t3_wr_flags |= T3_COMPLETION_FLAG;
                sqp = qhp->wq.sq +
                      Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
                switch (wr->opcode) {
                case IB_WR_SEND:
-               case IB_WR_SEND_WITH_IMM:
+               case IB_WR_SEND_WITH_INV:
+                       if (wr->send_flags & IB_SEND_FENCE)
+                               t3_wr_flags |= T3_READ_FENCE_FLAG;
                        t3_wr_opcode = T3_WR_SEND;
-                       err = iwch_build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
+                       err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
                        break;
                case IB_WR_RDMA_WRITE:
                case IB_WR_RDMA_WRITE_WITH_IMM:
                        t3_wr_opcode = T3_WR_WRITE;
-                       err = iwch_build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
+                       err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
                        break;
                case IB_WR_RDMA_READ:
+               case IB_WR_RDMA_READ_WITH_INV:
                        t3_wr_opcode = T3_WR_READ;
                        t3_wr_flags = 0; /* T3 reads are always signaled */
-                       err = iwch_build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
+                       err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
                        if (err)
                                break;
                        sqp->read_len = wqe->read.local_len;
                        if (!qhp->wq.oldest_read)
                                qhp->wq.oldest_read = sqp;
                        break;
+               case IB_WR_FAST_REG_MR:
+                       t3_wr_opcode = T3_WR_FASTREG;
+                       err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
+                                                &wr_cnt, &qhp->wq);
+                       break;
+               case IB_WR_LOCAL_INV:
+                       if (wr->send_flags & IB_SEND_FENCE)
+                               t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
+                       t3_wr_opcode = T3_WR_INV_STAG;
+                       err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
+                       break;
                default:
                        PDBG("%s post of type=%d TBD!\n", __func__,
                             wr->opcode);
@@ -307,14 +443,15 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
                               Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
-                              0, t3_wr_flit_cnt);
+                              0, t3_wr_flit_cnt,
+                              (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
                PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
                     __func__, (unsigned long long) wr->wr_id, idx,
                     Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
                     sqp->opcode);
                wr = wr->next;
                num_wrs--;
-               ++(qhp->wq.wptr);
+               qhp->wq.wptr += wr_cnt;
                ++(qhp->wq.sq_wptr);
        }
        spin_unlock_irqrestore(&qhp->lock, flag);
@@ -345,21 +482,27 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                return -EINVAL;
        }
        while (wr) {
+               if (wr->num_sge > T3_MAX_SGE) {
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       break;
+               }
                idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
                wqe = (union t3_wr *) (qhp->wq.queue + idx);
                if (num_wrs)
-                       err = iwch_build_rdma_recv(qhp->rhp, wqe, wr);
+                       if (wr->sg_list[0].lkey)
+                               err = build_rdma_recv(qhp, wqe, wr);
+                       else
+                               err = build_zero_stag_recv(qhp, wqe, wr);
                else
                        err = -ENOMEM;
                if (err) {
                        *bad_wr = wr;
                        break;
                }
-               qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, qhp->wq.rq_size_log2)] =
-                       wr->wr_id;
                build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
                               Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
-                              0, sizeof(struct t3_receive_wr) >> 3);
+                              0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
                PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
                     "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
                     idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
@@ -419,10 +562,10 @@ int iwch_bind_mw(struct ib_qp *qp,
        sgl.lkey = mw_bind->mr->lkey;
        sgl.length = mw_bind->length;
        wqe->bind.reserved = 0;
-       wqe->bind.type = T3_VA_BASED_TO;
+       wqe->bind.type = TPT_VATO;
 
        /* TBD: check perms */
-       wqe->bind.perms = iwch_ib_to_mwbind_access(mw_bind->mw_access_flags);
+       wqe->bind.perms = iwch_ib_to_tpt_access(mw_bind->mw_access_flags);
        wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
        wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
        wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
@@ -430,7 +573,7 @@ int iwch_bind_mw(struct ib_qp *qp,
        err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
        if (err) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               return err;
+               return err;
        }
        wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
        sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
@@ -441,10 +584,9 @@ int iwch_bind_mw(struct ib_qp *qp,
        sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
        wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
        wqe->bind.mr_pagesz = page_size;
-       wqe->flit[T3_SQ_COOKIE_FLIT] = mw_bind->wr_id;
        build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
                       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
-                               sizeof(struct t3_bind_mw_wr) >> 3);
+                      sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
        ++(qhp->wq.wptr);
        ++(qhp->wq.sq_wptr);
        spin_unlock_irqrestore(&qhp->lock, flag);
@@ -758,7 +900,8 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
        init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
        init_attr.rqe_count = iwch_rqes_posted(qhp);
        init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
-       init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
+       if (!qhp->ibqp.uobject)
+               init_attr.flags |= PRIV_QP;
        if (peer2peer) {
                init_attr.rtr_type = RTR_READ;
                if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
index ce1ab05..0792d93 100644 (file)
@@ -531,7 +531,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
 {
        struct ehca_eq *eq = &shca->eq;
        struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
-       u64 eqe_value;
+       u64 eqe_value, ret;
        unsigned long flags;
        int eqe_cnt, i;
        int eq_empty = 0;
@@ -583,8 +583,13 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
                        ehca_dbg(&shca->ib_device,
                                 "No eqe found for irq event");
                goto unlock_irq_spinlock;
-       } else if (!is_irq)
+       } else if (!is_irq) {
+               ret = hipz_h_eoi(eq->ist);
+               if (ret != H_SUCCESS)
+                       ehca_err(&shca->ib_device,
+                                "bad return code EOI -rc = %ld\n", ret);
                ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
+       }
        if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
                ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
        /* enable irq for new packets */
index 482103e..598844d 100644 (file)
@@ -923,6 +923,7 @@ static struct of_device_id ehca_device_table[] =
        },
        {},
 };
+MODULE_DEVICE_TABLE(of, ehca_device_table);
 
 static struct of_platform_driver ehca_driver = {
        .name        = "ehca",
index f093b00..dd9bc68 100644 (file)
@@ -544,8 +544,16 @@ int ehca_post_recv(struct ib_qp *qp,
                   struct ib_recv_wr *recv_wr,
                   struct ib_recv_wr **bad_recv_wr)
 {
-       return internal_post_recv(container_of(qp, struct ehca_qp, ib_qp),
-                                 qp->device, recv_wr, bad_recv_wr);
+       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+
+       /* Reject WR if QP is in RESET state */
+       if (unlikely(my_qp->state == IB_QPS_RESET)) {
+               ehca_err(qp->device, "Invalid QP state  qp_state=%d qpn=%x",
+                        my_qp->state, qp->qp_num);
+               return -EINVAL;
+       }
+
+       return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
 }
 
 int ehca_post_srq_recv(struct ib_srq *srq,
@@ -681,7 +689,7 @@ poll_cq_one_read_cqe:
        wc->dlid_path_bits = cqe->dlid;
        wc->src_qp = cqe->remote_qp_number;
        wc->wc_flags = cqe->w_completion_flags;
-       wc->imm_data = cpu_to_be32(cqe->immediate_data);
+       wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
        wc->sl = cqe->service_level;
 
 poll_cq_one_exit0:
index 5245e13..415d3a4 100644 (file)
@@ -933,3 +933,13 @@ u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
                                       r_cb,
                                       0, 0, 0, 0);
 }
+
+u64 hipz_h_eoi(int irq)
+{
+       unsigned long xirr;
+
+       iosync();
+       xirr = (0xffULL << 24) | irq;
+
+       return plpar_hcall_norets(H_EOI, xirr);
+}
index 60ce02b..2c3c6e0 100644 (file)
@@ -260,5 +260,6 @@ u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
                      const u64 ressource_handle,
                      void *rblock,
                      unsigned long *byte_count);
+u64 hipz_h_eoi(int irq);
 
 #endif /* __HCP_IF_H__ */
index a03bd28..d385e41 100644 (file)
@@ -82,7 +82,7 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
                wc->uqueue[head].opcode = entry->opcode;
                wc->uqueue[head].vendor_err = entry->vendor_err;
                wc->uqueue[head].byte_len = entry->byte_len;
-               wc->uqueue[head].imm_data = (__u32 __force)entry->imm_data;
+               wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
                wc->uqueue[head].qp_num = entry->qp->qp_num;
                wc->uqueue[head].src_qp = entry->src_qp;
                wc->uqueue[head].wc_flags = entry->wc_flags;
index 8eee783..fb70712 100644 (file)
@@ -2228,8 +2228,8 @@ static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
                0x40000001, 0x1388, 0x15e, /* rest 0's */
                };
-       dcnt = sizeof(madpayload_start)/sizeof(madpayload_start[0]);
-       hcnt = sizeof(hdr)/sizeof(hdr[0]);
+       dcnt = ARRAY_SIZE(madpayload_start);
+       hcnt = ARRAY_SIZE(hdr);
        if (!swapped) {
                /* for maintainability, do it at runtime */
                for (i = 0; i < hcnt; i++) {
index 5f9315d..be4fc9a 100644 (file)
@@ -111,9 +111,9 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
        nip->revision = cpu_to_be32((majrev << 16) | minrev);
        nip->local_port_num = port;
        vendor = dd->ipath_vendorid;
-       nip->vendor_id[0] = 0;
-       nip->vendor_id[1] = vendor >> 8;
-       nip->vendor_id[2] = vendor;
+       nip->vendor_id[0] = IPATH_SRC_OUI_1;
+       nip->vendor_id[1] = IPATH_SRC_OUI_2;
+       nip->vendor_id[2] = IPATH_SRC_OUI_3;
 
        return reply(smp);
 }
index 108df66..9771052 100644 (file)
@@ -1703,11 +1703,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        case OP(SEND_LAST_WITH_IMMEDIATE):
        send_last_imm:
                if (header_in_data) {
-                       wc.imm_data = *(__be32 *) data;
+                       wc.ex.imm_data = *(__be32 *) data;
                        data += sizeof(__be32);
                } else {
                        /* Immediate data comes after BTH */
-                       wc.imm_data = ohdr->u.imm_data;
+                       wc.ex.imm_data = ohdr->u.imm_data;
                }
                hdrsize += 4;
                wc.wc_flags = IB_WC_WITH_IMM;
index a4b5521..af051f7 100644 (file)
@@ -331,7 +331,7 @@ again:
        switch (wqe->wr.opcode) {
        case IB_WR_SEND_WITH_IMM:
                wc.wc_flags = IB_WC_WITH_IMM;
-               wc.imm_data = wqe->wr.ex.imm_data;
+               wc.ex.imm_data = wqe->wr.ex.imm_data;
                /* FALLTHROUGH */
        case IB_WR_SEND:
                if (!ipath_get_rwqe(qp, 0))
@@ -342,7 +342,7 @@ again:
                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
                        goto inv_err;
                wc.wc_flags = IB_WC_WITH_IMM;
-               wc.imm_data = wqe->wr.ex.imm_data;
+               wc.ex.imm_data = wqe->wr.ex.imm_data;
                if (!ipath_get_rwqe(qp, 1))
                        goto rnr_nak;
                /* FALLTHROUGH */
index 0596ec1..82cc588 100644 (file)
@@ -379,11 +379,11 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        case OP(SEND_LAST_WITH_IMMEDIATE):
        send_last_imm:
                if (header_in_data) {
-                       wc.imm_data = *(__be32 *) data;
+                       wc.ex.imm_data = *(__be32 *) data;
                        data += sizeof(__be32);
                } else {
                        /* Immediate data comes after BTH */
-                       wc.imm_data = ohdr->u.imm_data;
+                       wc.ex.imm_data = ohdr->u.imm_data;
                }
                hdrsize += 4;
                wc.wc_flags = IB_WC_WITH_IMM;
@@ -483,11 +483,11 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
        rdma_last_imm:
                if (header_in_data) {
-                       wc.imm_data = *(__be32 *) data;
+                       wc.ex.imm_data = *(__be32 *) data;
                        data += sizeof(__be32);
                } else {
                        /* Immediate data comes after BTH */
-                       wc.imm_data = ohdr->u.imm_data;
+                       wc.ex.imm_data = ohdr->u.imm_data;
                }
                hdrsize += 4;
                wc.wc_flags = IB_WC_WITH_IMM;
index 77ca8ca..36aa242 100644 (file)
@@ -96,7 +96,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
 
        if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
                wc.wc_flags = IB_WC_WITH_IMM;
-               wc.imm_data = swqe->wr.ex.imm_data;
+               wc.ex.imm_data = swqe->wr.ex.imm_data;
        }
 
        /*
@@ -492,14 +492,14 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        if (qp->ibqp.qp_num > 1 &&
            opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
                if (header_in_data) {
-                       wc.imm_data = *(__be32 *) data;
+                       wc.ex.imm_data = *(__be32 *) data;
                        data += sizeof(__be32);
                } else
-                       wc.imm_data = ohdr->u.ud.imm_data;
+                       wc.ex.imm_data = ohdr->u.ud.imm_data;
                wc.wc_flags = IB_WC_WITH_IMM;
                hdrsize += sizeof(u32);
        } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
-               wc.imm_data = 0;
+               wc.ex.imm_data = 0;
                wc.wc_flags = 0;
        } else {
                dev->n_pkt_drops++;
index 7779165..9e23ab0 100644 (file)
@@ -1497,7 +1497,8 @@ static int ipath_query_device(struct ib_device *ibdev,
                IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
                IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
        props->page_size_cap = PAGE_SIZE;
-       props->vendor_id = dev->dd->ipath_vendorid;
+       props->vendor_id =
+               IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3;
        props->vendor_part_id = dev->dd->ipath_deviceid;
        props->hw_ver = dev->dd->ipath_pcirev;
 
index 4521319..299f208 100644 (file)
@@ -663,18 +663,18 @@ repoll:
 
                switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
                case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
-                       wc->opcode   = IB_WC_RECV_RDMA_WITH_IMM;
-                       wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->imm_data = cqe->immed_rss_invalid;
+                       wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
+                       wc->wc_flags    = IB_WC_WITH_IMM;
+                       wc->ex.imm_data = cqe->immed_rss_invalid;
                        break;
                case MLX4_RECV_OPCODE_SEND:
                        wc->opcode   = IB_WC_RECV;
                        wc->wc_flags = 0;
                        break;
                case MLX4_RECV_OPCODE_SEND_IMM:
-                       wc->opcode   = IB_WC_RECV;
-                       wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->imm_data = cqe->immed_rss_invalid;
+                       wc->opcode      = IB_WC_RECV;
+                       wc->wc_flags    = IB_WC_WITH_IMM;
+                       wc->ex.imm_data = cqe->immed_rss_invalid;
                        break;
                }
 
index 4c1e72f..cdca3a5 100644 (file)
@@ -255,7 +255,8 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,     u8 port_num,
                        return IB_MAD_RESULT_SUCCESS;
        } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
                   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
-                  in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2) {
+                  in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2   ||
+                  in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
                if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
                    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
                        return IB_MAD_RESULT_SUCCESS;
index 4d61e32..bcf5064 100644 (file)
@@ -90,7 +90,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
                IB_DEVICE_PORT_ACTIVE_EVENT             |
                IB_DEVICE_SYS_IMAGE_GUID                |
-               IB_DEVICE_RC_RNR_NAK_GEN;
+               IB_DEVICE_RC_RNR_NAK_GEN                |
+               IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -437,7 +438,9 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
        return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
-                                    &to_mqp(ibqp)->mqp, gid->raw);
+                                    &to_mqp(ibqp)->mqp, gid->raw,
+                                    !!(to_mqp(ibqp)->flags &
+                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
 }
 
 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
index 5cf9947..c4cf5b6 100644 (file)
@@ -101,7 +101,8 @@ struct mlx4_ib_wq {
 };
 
 enum mlx4_ib_qp_flags {
-       MLX4_IB_QP_LSO          = 1 << 0
+       MLX4_IB_QP_LSO                          = 1 << 0,
+       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK     = 1 << 1,
 };
 
 struct mlx4_ib_qp {
index a80df22..89eb6cb 100644 (file)
@@ -129,9 +129,10 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
        int ind;
        void *buf;
        __be32 stamp;
+       struct mlx4_wqe_ctrl_seg *ctrl;
 
-       s = roundup(size, 1U << qp->sq.wqe_shift);
        if (qp->sq_max_wqes_per_wr > 1) {
+               s = roundup(size, 1U << qp->sq.wqe_shift);
                for (i = 0; i < s; i += 64) {
                        ind = (i >> qp->sq.wqe_shift) + n;
                        stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
@@ -141,7 +142,8 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
                        *wqe = stamp;
                }
        } else {
-               buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+               ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
+               s = (ctrl->fence_size & 0x3f) << 4;
                for (i = 64; i < s; i += 64) {
                        wqe = buf + i;
                        *wqe = cpu_to_be32(0xffffffff);
@@ -452,19 +454,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        spin_lock_init(&qp->rq.lock);
 
        qp->state        = IB_QPS_RESET;
-       qp->atomic_rd_en = 0;
-       qp->resp_depth   = 0;
-
-       qp->rq.head         = 0;
-       qp->rq.tail         = 0;
-       qp->sq.head         = 0;
-       qp->sq.tail         = 0;
-       qp->sq_next_wqe     = 0;
-
        if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
                qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
-       else
-               qp->sq_signal_bits = 0;
 
        err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
        if (err)
@@ -509,6 +500,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        } else {
                qp->sq_no_prefetch = 0;
 
+               if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+                       qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+
                if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
                        qp->flags |= MLX4_IB_QP_LSO;
 
@@ -682,10 +676,15 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
        struct mlx4_ib_qp *qp;
        int err;
 
-       /* We only support LSO, and only for kernel UD QPs. */
-       if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
+       /*
+        * We only support LSO and multicast loopback blocking, and
+        * only for kernel UD QPs.
+        */
+       if (init_attr->create_flags & ~(IB_QP_CREATE_IPOIB_UD_LSO |
+                                       IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
                return ERR_PTR(-EINVAL);
-       if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
+
+       if (init_attr->create_flags &&
            (pd->uobject || init_attr->qp_type != IB_QPT_UD))
                return ERR_PTR(-EINVAL);
 
@@ -694,7 +693,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
        case IB_QPT_UC:
        case IB_QPT_UD:
        {
-               qp = kmalloc(sizeof *qp, GFP_KERNEL);
+               qp = kzalloc(sizeof *qp, GFP_KERNEL);
                if (!qp)
                        return ERR_PTR(-ENOMEM);
 
@@ -715,7 +714,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
                if (pd->uobject)
                        return ERR_PTR(-EINVAL);
 
-               sqp = kmalloc(sizeof *sqp, GFP_KERNEL);
+               sqp = kzalloc(sizeof *sqp, GFP_KERNEL);
                if (!sqp)
                        return ERR_PTR(-ENOMEM);
 
@@ -906,7 +905,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                               attr->path_mtu);
                        goto out;
                }
-               context->mtu_msgmax = (attr->path_mtu << 5) | 31;
+               context->mtu_msgmax = (attr->path_mtu << 5) |
+                       ilog2(dev->dev->caps.max_msg_sz);
        }
 
        if (qp->rq.wqe_cnt)
@@ -1063,6 +1063,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                for (i = 0; i < qp->sq.wqe_cnt; ++i) {
                        ctrl = get_send_wqe(qp, i);
                        ctrl->owner_opcode = cpu_to_be32(1 << 31);
+                       if (qp->sq_max_wqes_per_wr == 1)
+                               ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);
 
                        stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
                }
@@ -1127,23 +1129,6 @@ out:
        return err;
 }
 
-static const struct ib_qp_attr mlx4_ib_qp_attr = { .port_num = 1 };
-static const int mlx4_ib_qp_attr_mask_table[IB_QPT_UD + 1] = {
-               [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
-                               IB_QP_PORT                      |
-                               IB_QP_QKEY),
-               [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
-                               IB_QP_PORT                      |
-                               IB_QP_ACCESS_FLAGS),
-               [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
-                               IB_QP_PORT                      |
-                               IB_QP_ACCESS_FLAGS),
-               [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
-                               IB_QP_QKEY),
-               [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
-                               IB_QP_QKEY),
-};
-
 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                      int attr_mask, struct ib_udata *udata)
 {
@@ -1186,15 +1171,6 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
        }
 
-       if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
-               err = __mlx4_ib_modify_qp(ibqp, &mlx4_ib_qp_attr,
-                                         mlx4_ib_qp_attr_mask_table[ibqp->qp_type],
-                                         IB_QPS_RESET, IB_QPS_INIT);
-               if (err)
-                       goto out;
-               cur_state = IB_QPS_INIT;
-       }
-
        err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
 
 out:
@@ -1865,6 +1841,13 @@ done:
 
        qp_init_attr->cap            = qp_attr->cap;
 
+       qp_init_attr->create_flags = 0;
+       if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
+               qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
+
+       if (qp->flags & MLX4_IB_QP_LSO)
+               qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
+
 out:
        mutex_unlock(&qp->mutex);
        return err;
index a763067..c5ccc2d 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_allocator.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/errno.h>
index 4b111a8..32f6c63 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/string.h>
index e948158..cc440f9 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id$
  */
 
 #include <linux/jiffies.h>
@@ -128,7 +126,6 @@ static void handle_catas(struct mthca_dev *dev)
 static void poll_catas(unsigned long dev_ptr)
 {
        struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
-       unsigned long flags;
        int i;
 
        for (i = 0; i < dev->catas_err.size; ++i)
@@ -137,13 +134,8 @@ static void poll_catas(unsigned long dev_ptr)
                        return;
                }
 
-       spin_lock_irqsave(&catas_lock, flags);
-       if (!dev->catas_err.stop)
-               mod_timer(&dev->catas_err.timer,
-                         jiffies + MTHCA_CATAS_POLL_INTERVAL);
-       spin_unlock_irqrestore(&catas_lock, flags);
-
-       return;
+       mod_timer(&dev->catas_err.timer,
+                 round_jiffies(jiffies + MTHCA_CATAS_POLL_INTERVAL));
 }
 
 void mthca_start_catas_poll(struct mthca_dev *dev)
@@ -151,7 +143,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
        unsigned long addr;
 
        init_timer(&dev->catas_err.timer);
-       dev->catas_err.stop = 0;
        dev->catas_err.map  = NULL;
 
        addr = pci_resource_start(dev->pdev, 0) +
@@ -182,10 +173,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
 
 void mthca_stop_catas_poll(struct mthca_dev *dev)
 {
-       spin_lock_irq(&catas_lock);
-       dev->catas_err.stop = 1;
-       spin_unlock_irq(&catas_lock);
-
        del_timer_sync(&dev->catas_err.timer);
 
        if (dev->catas_err.map) {
index 54d230e..c33e1c5 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_cmd.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/completion.h>
index 8928ca4..6efd326 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_cmd.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef MTHCA_CMD_H
index afa56bf..75671f7 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_config_reg.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef MTHCA_CONFIG_REG_H
index 20401d2..d9f4735 100644 (file)
@@ -32,8 +32,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
  */
 
 #include <linux/hardirq.h>
@@ -622,13 +620,13 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
                case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
                case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
                        entry->wc_flags = IB_WC_WITH_IMM;
-                       entry->imm_data = cqe->imm_etype_pkey_eec;
+                       entry->ex.imm_data = cqe->imm_etype_pkey_eec;
                        entry->opcode = IB_WC_RECV;
                        break;
                case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
                case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
                        entry->wc_flags = IB_WC_WITH_IMM;
-                       entry->imm_data = cqe->imm_etype_pkey_eec;
+                       entry->ex.imm_data = cqe->imm_etype_pkey_eec;
                        entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
                        break;
                default:
index 7bc32f8..ee4d073 100644 (file)
@@ -32,8 +32,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_dev.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef MTHCA_DEV_H
@@ -279,7 +277,6 @@ struct mthca_mcg_table {
 struct mthca_catas_err {
        u64                     addr;
        u32 __iomem            *map;
-       unsigned long           stop;
        u32                     size;
        struct timer_list       timer;
        struct list_head        list;
index b374dc3..14f51ef 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_doorbell.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/types.h>
index 8bde7f9..4e36aa7 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
  */
 
 #include <linux/errno.h>
index 8b7e83e..6404495 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/string.h>
index 200cf13..fb9f91b 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_main.c 1396 2004-12-28 04:10:27Z roland $
  */
 
 #include <linux/module.h>
index a8ad072..3f5f948 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/string.h>
index d5862e5..1f7d1a2 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id$
  */
 
 #include <linux/mm.h>
index a1ab068..da9b8f9 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id$
  */
 
 #ifndef MTHCA_MEMFREE_H
index 820205d..8489b1e 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_mr.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/slab.h>
index c1e9507..266f14e 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/errno.h>
index 605a8d5..d168c25 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_profile.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/module.h>
index e76cb62..62b009c 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_profile.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef MTHCA_PROFILE_H
index be34f99..87ad889 100644 (file)
@@ -32,8 +32,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $
  */
 
 #include <rdma/ib_smi.h>
index 934bf95..c621f87 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef MTHCA_PROVIDER_H
index 09dc361..f5081bf 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
  */
 
 #include <linux/string.h>
@@ -850,23 +848,6 @@ out:
        return err;
 }
 
-static const struct ib_qp_attr dummy_init_attr = { .port_num = 1 };
-static const int dummy_init_attr_mask[] = {
-       [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
-                       IB_QP_PORT                      |
-                       IB_QP_QKEY),
-       [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
-                       IB_QP_PORT                      |
-                       IB_QP_ACCESS_FLAGS),
-       [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
-                       IB_QP_PORT                      |
-                       IB_QP_ACCESS_FLAGS),
-       [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
-                       IB_QP_QKEY),
-       [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
-                       IB_QP_QKEY),
-};
-
 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
                    struct ib_udata *udata)
 {
@@ -928,15 +909,6 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
                goto out;
        }
 
-       if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
-               err = __mthca_modify_qp(ibqp, &dummy_init_attr,
-                                       dummy_init_attr_mask[ibqp->qp_type],
-                                       IB_QPS_RESET, IB_QPS_INIT);
-               if (err)
-                       goto out;
-               cur_state = IB_QPS_INIT;
-       }
-
        err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
 
 out:
@@ -1277,10 +1249,10 @@ static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
                return -EINVAL;
 
        /*
-        * For MLX transport we need 2 extra S/G entries:
+        * For MLX transport we need 2 extra send gather entries:
         * one for the header and one for the checksum at the end
         */
-       if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
+       if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
                return -EINVAL;
 
        if (mthca_is_memfree(dev)) {
index 91934f2..acb6817 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_reset.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/init.h>
index a5ffff6..4fabe62 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
  */
 
 #include <linux/slab.h>
index 8b72848..ca5900c 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id$
  */
 
 #include <asm/page.h>          /* PAGE_SHIFT */
index e1262c9..5fe56e8 100644 (file)
@@ -29,7 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
  */
 
 #ifndef MTHCA_USER_H
index b3551a8..341a5ae 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $
  */
 
 #ifndef MTHCA_WQE_H
index a4e9269..d2884e7 100644 (file)
@@ -328,7 +328,7 @@ void nes_rem_ref(struct ib_qp *ibqp)
                set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
                u64temp = (u64)nesqp->nesqp_context_pbase;
                set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
-               nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+               nes_post_cqp_request(nesdev, cqp_request);
        }
 }
 
index 61b46e9..39bd897 100644 (file)
@@ -94,9 +94,6 @@
 
 #define MAX_DPC_ITERATIONS               128
 
-#define NES_CQP_REQUEST_NO_DOORBELL_RING 0
-#define NES_CQP_REQUEST_RING_DOORBELL    1
-
 #define NES_DRV_OPT_ENABLE_MPA_VER_0     0x00000001
 #define NES_DRV_OPT_DISABLE_MPA_CRC      0x00000002
 #define NES_DRV_OPT_DISABLE_FIRST_WRITE  0x00000004
@@ -538,7 +535,11 @@ void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
 void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16, u16);
 void nes_read_10G_phy_reg(struct nes_device *, u8, u8, u16);
 struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
-void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int);
+void nes_free_cqp_request(struct nes_device *nesdev,
+                         struct nes_cqp_request *cqp_request);
+void nes_put_cqp_request(struct nes_device *nesdev,
+                        struct nes_cqp_request *cqp_request);
+void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *);
 int nes_arp_table(struct nes_device *, u32, u8 *, u32);
 void nes_mh_fix(unsigned long);
 void nes_clc(unsigned long);
index 9a4b40f..6aa531d 100644 (file)
@@ -1603,7 +1603,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
                        return NULL;
                }
 
-               memset(listener, 0, sizeof(struct nes_cm_listener));
                listener->loc_addr = htonl(cm_info->loc_addr);
                listener->loc_port = htons(cm_info->loc_port);
                listener->reused_node = 0;
index d3278f1..85f26d1 100644 (file)
@@ -398,7 +398,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
        nesadapter->base_pd = 1;
 
        nesadapter->device_cap_flags =
-               IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW;
+               IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
 
        nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
                        [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -2710,39 +2710,11 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
                                        barrier();
                                        cqp_request->request_done = 1;
                                        wake_up(&cqp_request->waitq);
-                                       if (atomic_dec_and_test(&cqp_request->refcount)) {
-                                               nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
-                                                               cqp_request,
-                                                               le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
-                                               if (cqp_request->dynamic) {
-                                                       kfree(cqp_request);
-                                               } else {
-                                                       spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                                                       list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                                                       spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                                               }
-                                       }
-                               } else if (cqp_request->callback) {
-                                       /* Envoke the callback routine */
-                                       cqp_request->cqp_callback(nesdev, cqp_request);
-                                       if (cqp_request->dynamic) {
-                                               kfree(cqp_request);
-                                       } else {
-                                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                                       }
+                                       nes_put_cqp_request(nesdev, cqp_request);
                                } else {
-                                       nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
-                                                       cqp_request,
-                                                       le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX]) & 0x3f);
-                                       if (cqp_request->dynamic) {
-                                               kfree(cqp_request);
-                                       } else {
-                                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                                       }
+                                       if (cqp_request->callback)
+                                               cqp_request->cqp_callback(nesdev, cqp_request);
+                                       nes_free_cqp_request(nesdev, cqp_request);
                                }
                        } else {
                                wake_up(&nesdev->cqp.waitq);
@@ -3149,7 +3121,6 @@ int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port,
 {
        struct nes_device *nesdev = nesvnic->nesdev;
        struct nes_hw_cqp_wqe *cqp_wqe;
-       unsigned long flags;
        struct nes_cqp_request *cqp_request;
        int ret = 0;
        u16 major_code;
@@ -3176,7 +3147,7 @@ int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port,
        nes_debug(NES_DBG_QP, "Waiting for CQP completion for APBVT.\n");
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        if (add_port == NES_MANAGE_APBVT_ADD)
                ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
@@ -3184,15 +3155,9 @@ int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port,
        nes_debug(NES_DBG_QP, "Completed, ret=%u,  CQP Major:Minor codes = 0x%04X:0x%04X\n",
                        ret, cqp_request->major_code, cqp_request->minor_code);
        major_code = cqp_request->major_code;
-       if (atomic_dec_and_test(&cqp_request->refcount)) {
-               if (cqp_request->dynamic) {
-                       kfree(cqp_request);
-               } else {
-                       spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                       list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                       spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-               }
-       }
+
+       nes_put_cqp_request(nesdev, cqp_request);
+
        if (!ret)
                return -ETIME;
        else if (major_code)
@@ -3252,7 +3217,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
                        nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
 
        atomic_set(&cqp_request->refcount, 1);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 }
 
 
@@ -3262,7 +3227,6 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
 void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
                u32 which_wq, u32 wait_completion)
 {
-       unsigned long flags;
        struct nes_cqp_request *cqp_request;
        struct nes_hw_cqp_wqe *cqp_wqe;
        int ret;
@@ -3285,7 +3249,7 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
                        cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
        cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
 
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        if (wait_completion) {
                /* Wait for CQP */
@@ -3294,14 +3258,6 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
                nes_debug(NES_DBG_QP, "Flush SQ QP WQEs completed, ret=%u,"
                                " CQP Major:Minor codes = 0x%04X:0x%04X\n",
                                ret, cqp_request->major_code, cqp_request->minor_code);
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
+               nes_put_cqp_request(nesdev, cqp_request);
        }
 }
index 745bf94..7b81e0a 100644 (file)
@@ -1172,7 +1172,7 @@ struct nes_vnic {
        u32    mcrq_qp_id;
        struct nes_ucontext *mcrq_ucontext;
        struct nes_cqp_request* (*get_cqp_request)(struct nes_device *nesdev);
-       void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *, int);
+       void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *);
        int (*mcrq_mcast_filter)( struct nes_vnic* nesvnic, __u8* dmi_addr );
        struct net_device_stats netstats;
        /* used to put the netdev on the adapters logical port list */
index fe83d1b..fb8cbd7 100644 (file)
@@ -567,12 +567,36 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
        return cqp_request;
 }
 
+void nes_free_cqp_request(struct nes_device *nesdev,
+                         struct nes_cqp_request *cqp_request)
+{
+       unsigned long flags;
+
+       nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
+                 cqp_request,
+                 le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX]) & 0x3f);
+
+       if (cqp_request->dynamic) {
+               kfree(cqp_request);
+       } else {
+               spin_lock_irqsave(&nesdev->cqp.lock, flags);
+               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+       }
+}
+
+void nes_put_cqp_request(struct nes_device *nesdev,
+                        struct nes_cqp_request *cqp_request)
+{
+       if (atomic_dec_and_test(&cqp_request->refcount))
+               nes_free_cqp_request(nesdev, cqp_request);
+}
 
 /**
  * nes_post_cqp_request
  */
 void nes_post_cqp_request(struct nes_device *nesdev,
-               struct nes_cqp_request *cqp_request, int ring_doorbell)
+                         struct nes_cqp_request *cqp_request)
 {
        struct nes_hw_cqp_wqe *cqp_wqe;
        unsigned long flags;
@@ -600,10 +624,9 @@ void nes_post_cqp_request(struct nes_device *nesdev,
                                nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
                                cqp_request->waiting, atomic_read(&cqp_request->refcount));
                barrier();
-               if (ring_doorbell) {
-                       /* Ring doorbell (1 WQEs) */
-                       nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
-               }
+
+               /* Ring doorbell (1 WQEs) */
+               nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
 
                barrier();
        } else {
index d617da9..e3939d1 100644 (file)
@@ -55,7 +55,6 @@ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
  * nes_alloc_mw
  */
 static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
-       unsigned long flags;
        struct nes_pd *nespd = to_nespd(ibpd);
        struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
        struct nes_device *nesdev = nesvnic->nesdev;
@@ -119,7 +118,7 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
        set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
@@ -128,15 +127,7 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
                        " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
                        stag, ret, cqp_request->major_code, cqp_request->minor_code);
        if ((!ret) || (cqp_request->major_code)) {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
+               nes_put_cqp_request(nesdev, cqp_request);
                kfree(nesmr);
                nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
                if (!ret) {
@@ -144,17 +135,8 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
                } else {
                        return ERR_PTR(-ENOMEM);
                }
-       } else {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
        }
+       nes_put_cqp_request(nesdev, cqp_request);
 
        nesmr->ibmw.rkey = stag;
        nesmr->mode = IWNES_MEMREG_TYPE_MW;
@@ -178,7 +160,6 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
        struct nes_hw_cqp_wqe *cqp_wqe;
        struct nes_cqp_request *cqp_request;
        int err = 0;
-       unsigned long flags;
        int ret;
 
        /* Deallocate the window with the adapter */
@@ -194,7 +175,7 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
        set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ibmw->rkey);
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X to complete.\n",
@@ -204,32 +185,12 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
        nes_debug(NES_DBG_MR, "Deallocate STag completed, wait_event_timeout ret = %u,"
                        " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
                        ret, cqp_request->major_code, cqp_request->minor_code);
-       if ((!ret) || (cqp_request->major_code)) {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
-               if (!ret) {
-                       err = -ETIME;
-               } else {
-                       err = -EIO;
-               }
-       } else {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
-       }
+       if (!ret)
+               err = -ETIME;
+       else if (cqp_request->major_code)
+               err = -EIO;
+
+       nes_put_cqp_request(nesdev, cqp_request);
 
        nes_free_resource(nesadapter, nesadapter->allocated_mrs,
                        (ibmw->rkey & 0x0fffff00) >> 8);
@@ -516,7 +477,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
                        (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used);
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
@@ -526,29 +487,11 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
                        stag, ret, cqp_request->major_code, cqp_request->minor_code);
 
        if ((!ret) || (cqp_request->major_code)) {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
+               nes_put_cqp_request(nesdev, cqp_request);
                ret = (!ret) ? -ETIME : -EIO;
                goto failed_leaf_vpbl_pages_alloc;
-       } else {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
        }
-
+       nes_put_cqp_request(nesdev, cqp_request);
        nesfmr->nesmr.ibfmr.lkey = stag;
        nesfmr->nesmr.ibfmr.rkey = stag;
        nesfmr->attr = *ibfmr_attr;
@@ -1474,7 +1417,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
                        set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
 
                        atomic_set(&cqp_request->refcount, 2);
-                       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+                       nes_post_cqp_request(nesdev, cqp_request);
 
                        /* Wait for CQP */
                        nes_debug(NES_DBG_QP, "Waiting for create iWARP QP%u to complete.\n",
@@ -1487,15 +1430,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
                                        nesqp->hwqp.qp_id, ret, nesdev->cqp.sq_head, nesdev->cqp.sq_tail,
                                        cqp_request->major_code, cqp_request->minor_code);
                        if ((!ret) || (cqp_request->major_code)) {
-                               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                                       if (cqp_request->dynamic) {
-                                               kfree(cqp_request);
-                                       } else {
-                                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                                       }
-                               }
+                               nes_put_cqp_request(nesdev, cqp_request);
                                nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
                                nes_free_qp_mem(nesdev, nesqp,virt_wqs);
                                kfree(nesqp->allocated_buffer);
@@ -1504,18 +1439,10 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
                                } else {
                                        return ERR_PTR(-EIO);
                                }
-                       } else {
-                               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                                       if (cqp_request->dynamic) {
-                                               kfree(cqp_request);
-                                       } else {
-                                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                                       }
-                               }
                        }
 
+                       nes_put_cqp_request(nesdev, cqp_request);
+
                        if (ibpd->uobject) {
                                uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
                                uresp.actual_sq_size = sq_size;
@@ -1817,7 +1744,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
                        cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        nes_debug(NES_DBG_CQ, "Waiting for create iWARP CQ%u to complete.\n",
@@ -1827,32 +1754,15 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
        nes_debug(NES_DBG_CQ, "Create iWARP CQ%u completed, wait_event_timeout ret = %d.\n",
                        nescq->hw_cq.cq_number, ret);
        if ((!ret) || (cqp_request->major_code)) {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
+               nes_put_cqp_request(nesdev, cqp_request);
                if (!context)
                        pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
                                        nescq->hw_cq.cq_pbase);
                nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
                kfree(nescq);
                return ERR_PTR(-EIO);
-       } else {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
        }
+       nes_put_cqp_request(nesdev, cqp_request);
 
        if (context) {
                /* free the nespbl */
@@ -1931,7 +1841,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
                (nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16)));
        nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n",
@@ -1942,37 +1852,18 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
                        " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
                        nescq->hw_cq.cq_number, ret, cqp_request->major_code,
                        cqp_request->minor_code);
-       if ((!ret) || (cqp_request->major_code)) {
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
-               if (!ret) {
-                       nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n",
+       if (!ret) {
+               nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n",
                                        nescq->hw_cq.cq_number);
-                       ret = -ETIME;
-               } else {
-                       nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n",
+               ret = -ETIME;
+       } else if (cqp_request->major_code) {
+               nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n",
                                        nescq->hw_cq.cq_number);
-                       ret = -EIO;
-               }
+               ret = -EIO;
        } else {
                ret = 0;
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
        }
+       nes_put_cqp_request(nesdev, cqp_request);
 
        if (nescq->cq_mem_size)
                pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
@@ -2096,7 +1987,7 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
        barrier();
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
@@ -2105,15 +1996,8 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
                        " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
                        stag, ret, cqp_request->major_code, cqp_request->minor_code);
        major_code = cqp_request->major_code;
-       if (atomic_dec_and_test(&cqp_request->refcount)) {
-               if (cqp_request->dynamic) {
-                       kfree(cqp_request);
-               } else {
-                       spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                       list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                       spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-               }
-       }
+       nes_put_cqp_request(nesdev, cqp_request);
+
        if (!ret)
                return -ETIME;
        else if (major_code)
@@ -2754,7 +2638,7 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
        set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ib_mr->rkey);
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X completed\n", ib_mr->rkey);
@@ -2771,15 +2655,9 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
 
        major_code = cqp_request->major_code;
        minor_code = cqp_request->minor_code;
-       if (atomic_dec_and_test(&cqp_request->refcount)) {
-               if (cqp_request->dynamic) {
-                       kfree(cqp_request);
-               } else {
-                       spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                       list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                       spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-               }
-       }
+
+       nes_put_cqp_request(nesdev, cqp_request);
+
        if (!ret) {
                nes_debug(NES_DBG_MR, "Timeout waiting to destroy STag,"
                                " ib_mr=%p, rkey = 0x%08X\n",
@@ -2904,7 +2782,6 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
        /* struct iw_cm_id *cm_id = nesqp->cm_id; */
        /* struct iw_cm_event cm_event; */
        struct nes_cqp_request *cqp_request;
-       unsigned long flags;
        int ret;
        u16 major_code;
 
@@ -2932,7 +2809,7 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
        set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
 
        atomic_set(&cqp_request->refcount, 2);
-       nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+       nes_post_cqp_request(nesdev, cqp_request);
 
        /* Wait for CQP */
        if (wait_completion) {
@@ -2950,15 +2827,9 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
                                        nesqp->hwqp.qp_id, cqp_request->major_code,
                                        cqp_request->minor_code, next_iwarp_state);
                }
-               if (atomic_dec_and_test(&cqp_request->refcount)) {
-                       if (cqp_request->dynamic) {
-                               kfree(cqp_request);
-                       } else {
-                               spin_lock_irqsave(&nesdev->cqp.lock, flags);
-                               list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
-                               spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
-                       }
-               }
+
+               nes_put_cqp_request(nesdev, cqp_request);
+
                if (!ret)
                        return -ETIME;
                else if (major_code)
index 1f76bad..691525c 100644 (file)
@@ -1,6 +1,7 @@
 config INFINIBAND_IPOIB
        tristate "IP-over-InfiniBand"
        depends on NETDEVICES && INET && (IPV6 || IPV6=n)
+       select INET_LRO
        ---help---
          Support for the IP-over-InfiniBand protocol (IPoIB). This
          transports IP packets over InfiniBand so you can use your IB
index ca126fc..b0ffc9a 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib.h 1358 2004-12-17 22:00:11Z roland $
  */
 
 #ifndef _IPOIB_H
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_sa.h>
+#include <linux/inet_lro.h>
 
 /* constants */
 
+enum ipoib_flush_level {
+       IPOIB_FLUSH_LIGHT,
+       IPOIB_FLUSH_NORMAL,
+       IPOIB_FLUSH_HEAVY
+};
+
 enum {
        IPOIB_ENCAP_LEN           = 4,
 
@@ -65,8 +70,8 @@ enum {
        IPOIB_CM_BUF_SIZE         = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
        IPOIB_CM_HEAD_SIZE        = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
        IPOIB_CM_RX_SG            = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
-       IPOIB_RX_RING_SIZE        = 128,
-       IPOIB_TX_RING_SIZE        = 64,
+       IPOIB_RX_RING_SIZE        = 256,
+       IPOIB_TX_RING_SIZE        = 128,
        IPOIB_MAX_QUEUE_SIZE      = 8192,
        IPOIB_MIN_QUEUE_SIZE      = 2,
        IPOIB_CM_MAX_CONN_QP      = 4096,
@@ -84,7 +89,6 @@ enum {
        IPOIB_FLAG_SUBINTERFACE   = 5,
        IPOIB_MCAST_RUN           = 6,
        IPOIB_STOP_REAPER         = 7,
-       IPOIB_MCAST_STARTED       = 8,
        IPOIB_FLAG_ADMIN_CM       = 9,
        IPOIB_FLAG_UMCAST         = 10,
        IPOIB_FLAG_CSUM           = 11,
@@ -96,7 +100,11 @@ enum {
        IPOIB_MCAST_FLAG_BUSY     = 2,  /* joining or already joined */
        IPOIB_MCAST_FLAG_ATTACHED = 3,
 
+       IPOIB_MAX_LRO_DESCRIPTORS = 8,
+       IPOIB_LRO_MAX_AGGR        = 64,
+
        MAX_SEND_CQE              = 16,
+       IPOIB_CM_COPYBREAK        = 256,
 };
 
 #define        IPOIB_OP_RECV   (1ul << 31)
@@ -149,6 +157,11 @@ struct ipoib_tx_buf {
        u64             mapping[MAX_SKB_FRAGS + 1];
 };
 
+struct ipoib_cm_tx_buf {
+       struct sk_buff *skb;
+       u64             mapping;
+};
+
 struct ib_cm_id;
 
 struct ipoib_cm_data {
@@ -207,7 +220,7 @@ struct ipoib_cm_tx {
        struct net_device   *dev;
        struct ipoib_neigh  *neigh;
        struct ipoib_path   *path;
-       struct ipoib_tx_buf *tx_ring;
+       struct ipoib_cm_tx_buf *tx_ring;
        unsigned             tx_head;
        unsigned             tx_tail;
        unsigned long        flags;
@@ -249,6 +262,11 @@ struct ipoib_ethtool_st {
        u16     max_coalesced_frames;
 };
 
+struct ipoib_lro {
+       struct net_lro_mgr lro_mgr;
+       struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
+};
+
 /*
  * Device private locking: tx_lock protects members used in TX fast
  * path (and we use LLTX so upper layers don't do extra locking).
@@ -264,7 +282,6 @@ struct ipoib_dev_priv {
 
        unsigned long flags;
 
-       struct mutex mcast_mutex;
        struct mutex vlan_mutex;
 
        struct rb_root  path_tree;
@@ -276,10 +293,11 @@ struct ipoib_dev_priv {
 
        struct delayed_work pkey_poll_task;
        struct delayed_work mcast_task;
-       struct work_struct flush_task;
+       struct work_struct flush_light;
+       struct work_struct flush_normal;
+       struct work_struct flush_heavy;
        struct work_struct restart_task;
        struct delayed_work ah_reap_task;
-       struct work_struct pkey_event_task;
 
        struct ib_device *ca;
        u8                port;
@@ -335,6 +353,8 @@ struct ipoib_dev_priv {
        int     hca_caps;
        struct ipoib_ethtool_st ethtool;
        struct timer_list poll_timer;
+
+       struct ipoib_lro lro;
 };
 
 struct ipoib_ah {
@@ -359,6 +379,7 @@ struct ipoib_path {
 
        struct rb_node        rb_node;
        struct list_head      list;
+       int                   valid;
 };
 
 struct ipoib_neigh {
@@ -423,11 +444,14 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                struct ipoib_ah *address, u32 qpn);
 void ipoib_reap_ah(struct work_struct *work);
 
+void ipoib_mark_paths_invalid(struct net_device *dev);
 void ipoib_flush_paths(struct net_device *dev);
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
 
 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(struct work_struct *work);
+void ipoib_ib_dev_flush_light(struct work_struct *work);
+void ipoib_ib_dev_flush_normal(struct work_struct *work);
+void ipoib_ib_dev_flush_heavy(struct work_struct *work);
 void ipoib_pkey_event(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
@@ -466,9 +490,7 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
 #endif
 
 int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
-                      union ib_gid *mgid);
-int ipoib_mcast_detach(struct net_device *dev, u16 mlid,
-                      union ib_gid *mgid);
+                      union ib_gid *mgid, int set_qkey);
 
 int ipoib_init_qp(struct net_device *dev);
 int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
index 97e67d3..0f2d304 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id$
  */
 
 #include <rdma/ib_cm.h>
@@ -113,18 +111,20 @@ static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
 }
 
 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
-                                       struct ipoib_cm_rx *rx, int id)
+                                       struct ipoib_cm_rx *rx,
+                                       struct ib_recv_wr *wr,
+                                       struct ib_sge *sge, int id)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_recv_wr *bad_wr;
        int i, ret;
 
-       priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
+       wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
 
        for (i = 0; i < IPOIB_CM_RX_SG; ++i)
-               priv->cm.rx_sge[i].addr = rx->rx_ring[id].mapping[i];
+               sge[i].addr = rx->rx_ring[id].mapping[i];
 
-       ret = ib_post_recv(rx->qp, &priv->cm.rx_wr, &bad_wr);
+       ret = ib_post_recv(rx->qp, wr, &bad_wr);
        if (unlikely(ret)) {
                ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
                ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
@@ -322,10 +322,33 @@ static int ipoib_cm_modify_rx_qp(struct net_device *dev,
        return 0;
 }
 
+static void ipoib_cm_init_rx_wr(struct net_device *dev,
+                               struct ib_recv_wr *wr,
+                               struct ib_sge *sge)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       int i;
+
+       for (i = 0; i < priv->cm.num_frags; ++i)
+               sge[i].lkey = priv->mr->lkey;
+
+       sge[0].length = IPOIB_CM_HEAD_SIZE;
+       for (i = 1; i < priv->cm.num_frags; ++i)
+               sge[i].length = PAGE_SIZE;
+
+       wr->next    = NULL;
+       wr->sg_list = priv->cm.rx_sge;
+       wr->num_sge = priv->cm.num_frags;
+}
+
 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
                                   struct ipoib_cm_rx *rx)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct {
+               struct ib_recv_wr wr;
+               struct ib_sge sge[IPOIB_CM_RX_SG];
+       } *t;
        int ret;
        int i;
 
@@ -333,6 +356,14 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
        if (!rx->rx_ring)
                return -ENOMEM;
 
+       t = kmalloc(sizeof *t, GFP_KERNEL);
+       if (!t) {
+               ret = -ENOMEM;
+               goto err_free;
+       }
+
+       ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
+
        spin_lock_irq(&priv->lock);
 
        if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
@@ -351,8 +382,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
                        ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
                                ret = -ENOMEM;
                                goto err_count;
-                       }
-               ret = ipoib_cm_post_receive_nonsrq(dev, rx, i);
+               }
+               ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
                if (ret) {
                        ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
                                   "failed for buf %d\n", i);
@@ -363,6 +394,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
 
        rx->recv_count = ipoib_recvq_size;
 
+       kfree(t);
+
        return 0;
 
 err_count:
@@ -371,6 +404,7 @@ err_count:
        spin_unlock_irq(&priv->lock);
 
 err_free:
+       kfree(t);
        ipoib_cm_free_rx_ring(dev, rx->rx_ring);
 
        return ret;
@@ -525,6 +559,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        u64 mapping[IPOIB_CM_RX_SG];
        int frags;
        int has_srq;
+       struct sk_buff *small_skb;
 
        ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
                       wr_id, wc->status);
@@ -579,6 +614,23 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                }
        }
 
+       if (wc->byte_len < IPOIB_CM_COPYBREAK) {
+               int dlen = wc->byte_len;
+
+               small_skb = dev_alloc_skb(dlen + 12);
+               if (small_skb) {
+                       skb_reserve(small_skb, 12);
+                       ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
+                                                  dlen, DMA_FROM_DEVICE);
+                       skb_copy_from_linear_data(skb, small_skb->data, dlen);
+                       ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
+                                                     dlen, DMA_FROM_DEVICE);
+                       skb_put(small_skb, dlen);
+                       skb = small_skb;
+                       goto copied;
+               }
+       }
+
        frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
                                              (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
 
@@ -601,6 +653,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 
        skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
 
+copied:
        skb->protocol = ((struct ipoib_header *) skb->data)->proto;
        skb_reset_mac_header(skb);
        skb_pull(skb, IPOIB_ENCAP_LEN);
@@ -620,7 +673,10 @@ repost:
                        ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
                                   "for buf %d\n", wr_id);
        } else {
-               if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, wr_id))) {
+               if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
+                                                         &priv->cm.rx_wr,
+                                                         priv->cm.rx_sge,
+                                                         wr_id))) {
                        --p->recv_count;
                        ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
                                   "for buf %d\n", wr_id);
@@ -647,7 +703,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct ipoib_tx_buf *tx_req;
+       struct ipoib_cm_tx_buf *tx_req;
        u64 addr;
 
        if (unlikely(skb->len > tx->mtu)) {
@@ -678,7 +734,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                return;
        }
 
-       tx_req->mapping[0] = addr;
+       tx_req->mapping = addr;
 
        if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
                               addr, skb->len))) {
@@ -703,7 +759,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_cm_tx *tx = wc->qp->qp_context;
        unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
-       struct ipoib_tx_buf *tx_req;
+       struct ipoib_cm_tx_buf *tx_req;
        unsigned long flags;
 
        ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -717,7 +773,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        tx_req = &tx->tx_ring[wr_id];
 
-       ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE);
+       ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
 
        /* FIXME: is this right? Shouldn't we only increment on success? */
        ++dev->stats.tx_packets;
@@ -1087,7 +1143,7 @@ err_tx:
 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
 {
        struct ipoib_dev_priv *priv = netdev_priv(p->dev);
-       struct ipoib_tx_buf *tx_req;
+       struct ipoib_cm_tx_buf *tx_req;
        unsigned long flags;
        unsigned long begin;
 
@@ -1115,7 +1171,7 @@ timeout:
 
        while ((int) p->tx_tail - (int) p->tx_head < 0) {
                tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
-               ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len,
+               ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
                                    DMA_TO_DEVICE);
                dev_kfree_skb_any(tx_req->skb);
                ++p->tx_tail;
@@ -1384,7 +1440,9 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
                ipoib_warn(priv, "enabling connected mode "
                           "will cause multicast packet drops\n");
 
+               rtnl_lock();
                dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+               rtnl_unlock();
                priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
 
                ipoib_flush_paths(dev);
@@ -1393,14 +1451,16 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 
        if (!strcmp(buf, "datagram\n")) {
                clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
-               dev->mtu = min(priv->mcast_mtu, dev->mtu);
-               ipoib_flush_paths(dev);
 
+               rtnl_lock();
                if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
                        dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
                        if (priv->hca_caps & IB_DEVICE_UD_TSO)
                                dev->features |= NETIF_F_TSO;
                }
+               dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+               rtnl_unlock();
+               ipoib_flush_paths(dev);
 
                return count;
        }
@@ -1485,15 +1545,7 @@ int ipoib_cm_dev_init(struct net_device *dev)
                priv->cm.num_frags  = IPOIB_CM_RX_SG;
        }
 
-       for (i = 0; i < priv->cm.num_frags; ++i)
-               priv->cm.rx_sge[i].lkey = priv->mr->lkey;
-
-       priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
-       for (i = 1; i < priv->cm.num_frags; ++i)
-               priv->cm.rx_sge[i].length = PAGE_SIZE;
-       priv->cm.rx_wr.next = NULL;
-       priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
-       priv->cm.rx_wr.num_sge = priv->cm.num_frags;
+       ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
 
        if (ipoib_cm_has_srq(dev)) {
                for (i = 0; i < ipoib_recvq_size; ++i) {
index 10279b7..66af5c1 100644 (file)
@@ -86,11 +86,57 @@ static int ipoib_set_coalesce(struct net_device *dev,
        return 0;
 }
 
+static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
+       "LRO aggregated", "LRO flushed",
+       "LRO avg aggr", "LRO no desc"
+};
+
+static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+       switch (stringset) {
+       case ETH_SS_STATS:
+               memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys));
+               break;
+       }
+}
+
+static int ipoib_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(ipoib_stats_keys);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void ipoib_get_ethtool_stats(struct net_device *dev,
+                               struct ethtool_stats *stats, uint64_t *data)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       int index = 0;
+
+       /* Get LRO statistics */
+       data[index++] = priv->lro.lro_mgr.stats.aggregated;
+       data[index++] = priv->lro.lro_mgr.stats.flushed;
+       if (priv->lro.lro_mgr.stats.flushed)
+               data[index++] = priv->lro.lro_mgr.stats.aggregated /
+                               priv->lro.lro_mgr.stats.flushed;
+       else
+               data[index++] = 0;
+       data[index++] = priv->lro.lro_mgr.stats.no_desc;
+}
+
 static const struct ethtool_ops ipoib_ethtool_ops = {
        .get_drvinfo            = ipoib_get_drvinfo,
        .get_tso                = ethtool_op_get_tso,
        .get_coalesce           = ipoib_get_coalesce,
        .set_coalesce           = ipoib_set_coalesce,
+       .get_flags              = ethtool_op_get_flags,
+       .set_flags              = ethtool_op_set_flags,
+       .get_strings            = ipoib_get_strings,
+       .get_sset_count         = ipoib_get_sset_count,
+       .get_ethtool_stats      = ipoib_get_ethtool_stats,
 };
 
 void ipoib_set_ethtool_ops(struct net_device *dev)
index 8b882bb..961c585 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib_fs.c 1389 2004-12-27 22:56:47Z roland $
  */
 
 #include <linux/err.h>
index f429bce..66cafa2 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
  */
 
 #include <linux/delay.h>
@@ -290,7 +288,10 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       netif_receive_skb(skb);
+       if (dev->features & NETIF_F_LRO)
+               lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
+       else
+               netif_receive_skb(skb);
 
 repost:
        if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -442,6 +443,9 @@ poll_more:
        }
 
        if (done < budget) {
+               if (dev->features & NETIF_F_LRO)
+                       lro_flush_all(&priv->lro.lro_mgr);
+
                netif_rx_complete(dev, napi);
                if (unlikely(ib_req_notify_cq(priv->recv_cq,
                                              IB_CQ_NEXT_COMP |
@@ -898,7 +902,8 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
        return 0;
 }
 
-static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
+static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+                               enum ipoib_flush_level level)
 {
        struct ipoib_dev_priv *cpriv;
        struct net_device *dev = priv->dev;
@@ -911,7 +916,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
         * the parent is down.
         */
        list_for_each_entry(cpriv, &priv->child_intfs, list)
-               __ipoib_ib_dev_flush(cpriv, pkey_event);
+               __ipoib_ib_dev_flush(cpriv, level);
 
        mutex_unlock(&priv->vlan_mutex);
 
@@ -925,7 +930,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
                return;
        }
 
-       if (pkey_event) {
+       if (level == IPOIB_FLUSH_HEAVY) {
                if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
                        clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
                        ipoib_ib_dev_down(dev, 0);
@@ -943,11 +948,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
                priv->pkey_index = new_index;
        }
 
-       ipoib_dbg(priv, "flushing\n");
+       if (level == IPOIB_FLUSH_LIGHT) {
+               ipoib_mark_paths_invalid(dev);
+               ipoib_mcast_dev_flush(dev);
+       }
 
-       ipoib_ib_dev_down(dev, 0);
+       if (level >= IPOIB_FLUSH_NORMAL)
+               ipoib_ib_dev_down(dev, 0);
 
-       if (pkey_event) {
+       if (level == IPOIB_FLUSH_HEAVY) {
                ipoib_ib_dev_stop(dev, 0);
                ipoib_ib_dev_open(dev);
        }
@@ -957,27 +966,34 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
         * we get here, don't bring it back up if it's not configured up
         */
        if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
-               ipoib_ib_dev_up(dev);
+               if (level >= IPOIB_FLUSH_NORMAL)
+                       ipoib_ib_dev_up(dev);
                ipoib_mcast_restart_task(&priv->restart_task);
        }
 }
 
-void ipoib_ib_dev_flush(struct work_struct *work)
+void ipoib_ib_dev_flush_light(struct work_struct *work)
+{
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, flush_light);
+
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
+}
+
+void ipoib_ib_dev_flush_normal(struct work_struct *work)
 {
        struct ipoib_dev_priv *priv =
-               container_of(work, struct ipoib_dev_priv, flush_task);
+               container_of(work, struct ipoib_dev_priv, flush_normal);
 
-       ipoib_dbg(priv, "Flushing %s\n", priv->dev->name);
-       __ipoib_ib_dev_flush(priv, 0);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
 }
 
-void ipoib_pkey_event(struct work_struct *work)
+void ipoib_ib_dev_flush_heavy(struct work_struct *work)
 {
        struct ipoib_dev_priv *priv =
-               container_of(work, struct ipoib_dev_priv, pkey_event_task);
+               container_of(work, struct ipoib_dev_priv, flush_heavy);
 
-       ipoib_dbg(priv, "Flushing %s and restarting its QP\n", priv->dev->name);
-       __ipoib_ib_dev_flush(priv, 1);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
 }
 
 void ipoib_ib_dev_cleanup(struct net_device *dev)
index 2442090..8be9ea0 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
  */
 
 #include "ipoib.h"
@@ -62,6 +60,15 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
 
+static int lro;
+module_param(lro, bool, 0444);
+MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
+
+static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
+module_param(lro_max_aggr, int, 0644);
+MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
+               "(default = 64)");
+
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 int ipoib_debug_level;
 
@@ -350,6 +357,23 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
 
 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
 
+void ipoib_mark_paths_invalid(struct net_device *dev)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_path *path, *tp;
+
+       spin_lock_irq(&priv->lock);
+
+       list_for_each_entry_safe(path, tp, &priv->path_list, list) {
+               ipoib_dbg(priv, "mark path LID 0x%04x GID " IPOIB_GID_FMT " invalid\n",
+                       be16_to_cpu(path->pathrec.dlid),
+                       IPOIB_GID_ARG(path->pathrec.dgid));
+               path->valid =  0;
+       }
+
+       spin_unlock_irq(&priv->lock);
+}
+
 void ipoib_flush_paths(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -386,6 +410,7 @@ static void path_rec_completion(int status,
        struct net_device *dev = path->dev;
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_ah *ah = NULL;
+       struct ipoib_ah *old_ah;
        struct ipoib_neigh *neigh, *tn;
        struct sk_buff_head skqueue;
        struct sk_buff *skb;
@@ -409,6 +434,7 @@ static void path_rec_completion(int status,
 
        spin_lock_irqsave(&priv->lock, flags);
 
+       old_ah   = path->ah;
        path->ah = ah;
 
        if (ah) {
@@ -421,6 +447,17 @@ static void path_rec_completion(int status,
                        __skb_queue_tail(&skqueue, skb);
 
                list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
+                       if (neigh->ah) {
+                               WARN_ON(neigh->ah != old_ah);
+                               /*
+                                * Dropping the ah reference inside
+                                * priv->lock is safe here, because we
+                                * will hold one more reference from
+                                * the original value of path->ah (ie
+                                * old_ah).
+                                */
+                               ipoib_put_ah(neigh->ah);
+                       }
                        kref_get(&path->ah->ref);
                        neigh->ah = path->ah;
                        memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
@@ -443,6 +480,7 @@ static void path_rec_completion(int status,
                        while ((skb = __skb_dequeue(&neigh->queue)))
                                __skb_queue_tail(&skqueue, skb);
                }
+               path->valid = 1;
        }
 
        path->query = NULL;
@@ -450,6 +488,9 @@ static void path_rec_completion(int status,
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
+       if (old_ah)
+               ipoib_put_ah(old_ah);
+
        while ((skb = __skb_dequeue(&skqueue))) {
                skb->dev = dev;
                if (dev_queue_xmit(skb))
@@ -623,8 +664,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
        spin_lock(&priv->lock);
 
        path = __path_find(dev, phdr->hwaddr + 4);
-       if (!path) {
-               path = path_rec_create(dev, phdr->hwaddr + 4);
+       if (!path || !path->valid) {
+               if (!path)
+                       path = path_rec_create(dev, phdr->hwaddr + 4);
                if (path) {
                        /* put pseudoheader back on for next time */
                        skb_push(skb, sizeof *phdr);
@@ -938,6 +980,54 @@ static const struct header_ops ipoib_header_ops = {
        .create = ipoib_hard_header,
 };
 
+static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
+                      void **tcph, u64 *hdr_flags, void *priv)
+{
+       unsigned int ip_len;
+       struct iphdr *iph;
+
+       if (unlikely(skb->protocol != htons(ETH_P_IP)))
+               return -1;
+
+       /*
+        * In the future we may add an else clause that verifies the
+        * checksum and allows devices which do not calculate checksum
+        * to use LRO.
+        */
+       if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
+               return -1;
+
+       /* Check for non-TCP packet */
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
+       if (iph->protocol != IPPROTO_TCP)
+               return -1;
+
+       ip_len = ip_hdrlen(skb);
+       skb_set_transport_header(skb, ip_len);
+       *tcph = tcp_hdr(skb);
+
+       /* check if IP header and TCP header are complete */
+       if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
+               return -1;
+
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+       *iphdr = iph;
+
+       return 0;
+}
+
+static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
+{
+       priv->lro.lro_mgr.max_aggr       = lro_max_aggr;
+       priv->lro.lro_mgr.max_desc       = IPOIB_MAX_LRO_DESCRIPTORS;
+       priv->lro.lro_mgr.lro_arr        = priv->lro.lro_desc;
+       priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
+       priv->lro.lro_mgr.features       = LRO_F_NAPI;
+       priv->lro.lro_mgr.dev            = priv->dev;
+       priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+}
+
 static void ipoib_setup(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -977,10 +1067,11 @@ static void ipoib_setup(struct net_device *dev)
 
        priv->dev = dev;
 
+       ipoib_lro_setup(priv);
+
        spin_lock_init(&priv->lock);
        spin_lock_init(&priv->tx_lock);
 
-       mutex_init(&priv->mcast_mutex);
        mutex_init(&priv->vlan_mutex);
 
        INIT_LIST_HEAD(&priv->path_list);
@@ -989,9 +1080,10 @@ static void ipoib_setup(struct net_device *dev)
        INIT_LIST_HEAD(&priv->multicast_list);
 
        INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
-       INIT_WORK(&priv->pkey_event_task, ipoib_pkey_event);
        INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
-       INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
+       INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
+       INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
+       INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
        INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
        INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
 }
@@ -1154,6 +1246,9 @@ static struct net_device *ipoib_add_port(const char *format,
                priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
        }
 
+       if (lro)
+               priv->dev->features |= NETIF_F_LRO;
+
        /*
         * Set the full membership bit, so that we join the right
         * broadcast group, etc.
@@ -1304,6 +1399,12 @@ static int __init ipoib_init_module(void)
        ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
 #endif
 
+       /*
+        * When copying small received packets, we only copy from the
+        * linear data part of the SKB, so we rely on this condition.
+        */
+       BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
+
        ret = ipoib_register_debugfs();
        if (ret)
                return ret;
index 3f663fb..1fcc9a8 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $
  */
 
 #include <linux/skbuff.h>
@@ -188,6 +186,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_ah *ah;
        int ret;
+       int set_qkey = 0;
 
        mcast->mcmember = *mcmember;
 
@@ -202,6 +201,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
                spin_unlock_irq(&priv->lock);
                priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
+               set_qkey = 1;
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -214,7 +214,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                }
 
                ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
-                                        &mcast->mcmember.mgid);
+                                        &mcast->mcmember.mgid, set_qkey);
                if (ret < 0) {
                        ipoib_warn(priv, "couldn't attach QP to multicast group "
                                   IPOIB_GID_FMT "\n",
@@ -575,8 +575,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
 
        priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
 
-       if (!ipoib_cm_admin_enabled(dev))
-               dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+       if (!ipoib_cm_admin_enabled(dev)) {
+               rtnl_lock();
+               dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+               rtnl_unlock();
+       }
 
        ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
 
@@ -594,10 +597,6 @@ int ipoib_mcast_start_thread(struct net_device *dev)
                queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
 
-       spin_lock_irq(&priv->lock);
-       set_bit(IPOIB_MCAST_STARTED, &priv->flags);
-       spin_unlock_irq(&priv->lock);
-
        return 0;
 }
 
@@ -607,10 +606,6 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
 
        ipoib_dbg_mcast(priv, "stopping multicast thread\n");
 
-       spin_lock_irq(&priv->lock);
-       clear_bit(IPOIB_MCAST_STARTED, &priv->flags);
-       spin_unlock_irq(&priv->lock);
-
        mutex_lock(&mcast_mutex);
        clear_bit(IPOIB_MCAST_RUN, &priv->flags);
        cancel_delayed_work(&priv->mcast_task);
@@ -635,10 +630,10 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
                                IPOIB_GID_ARG(mcast->mcmember.mgid));
 
                /* Remove ourselves from the multicast group */
-               ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
-                                        &mcast->mcmember.mgid);
+               ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
+                                     be16_to_cpu(mcast->mcmember.mlid));
                if (ret)
-                       ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
+                       ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
        }
 
        return 0;
index 8766d29..6832511 100644 (file)
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include "ipoib.h"
 
-int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
+int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct ib_qp_attr *qp_attr;
+       struct ib_qp_attr *qp_attr = NULL;
        int ret;
        u16 pkey_index;
 
-       ret = -ENOMEM;
-       qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
-       if (!qp_attr)
-               goto out;
-
        if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
                clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
                ret = -ENXIO;
@@ -54,18 +47,23 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
        }
        set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
 
-       /* set correct QKey for QP */
-       qp_attr->qkey = priv->qkey;
-       ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
-       if (ret) {
-               ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
-               goto out;
+       if (set_qkey) {
+               ret = -ENOMEM;
+               qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
+               if (!qp_attr)
+                       goto out;
+
+               /* set correct QKey for QP */
+               qp_attr->qkey = priv->qkey;
+               ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
+               if (ret) {
+                       ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
+                       goto out;
+               }
        }
 
        /* attach QP to multicast group */
-       mutex_lock(&priv->mcast_mutex);
        ret = ib_attach_mcast(priv->qp, mgid, mlid);
-       mutex_unlock(&priv->mcast_mutex);
        if (ret)
                ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
 
@@ -74,20 +72,6 @@ out:
        return ret;
 }
 
-int ipoib_mcast_detach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
-{
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
-       int ret;
-
-       mutex_lock(&priv->mcast_mutex);
-       ret = ib_detach_mcast(priv->qp, mgid, mlid);
-       mutex_unlock(&priv->mcast_mutex);
-       if (ret)
-               ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
-
-       return ret;
-}
-
 int ipoib_init_qp(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -201,7 +185,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        init_attr.recv_cq = priv->recv_cq;
 
        if (priv->hca_caps & IB_DEVICE_UD_TSO)
-               init_attr.create_flags = IB_QP_CREATE_IPOIB_UD_LSO;
+               init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
+
+       if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK)
+               init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
 
        if (dev->features & NETIF_F_SG)
                init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
@@ -289,15 +276,17 @@ void ipoib_event(struct ib_event_handler *handler,
        if (record->element.port_num != priv->port)
                return;
 
-       if (record->event == IB_EVENT_PORT_ERR    ||
-           record->event == IB_EVENT_PORT_ACTIVE ||
-           record->event == IB_EVENT_LID_CHANGE  ||
-           record->event == IB_EVENT_SM_CHANGE   ||
+       ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
+                 record->device->name, record->element.port_num);
+
+       if (record->event == IB_EVENT_SM_CHANGE ||
            record->event == IB_EVENT_CLIENT_REREGISTER) {
-               ipoib_dbg(priv, "Port state change event\n");
-               queue_work(ipoib_workqueue, &priv->flush_task);
+               queue_work(ipoib_workqueue, &priv->flush_light);
+       } else if (record->event == IB_EVENT_PORT_ERR ||
+                  record->event == IB_EVENT_PORT_ACTIVE ||
+                  record->event == IB_EVENT_LID_CHANGE) {
+               queue_work(ipoib_workqueue, &priv->flush_normal);
        } else if (record->event == IB_EVENT_PKEY_CHANGE) {
-               ipoib_dbg(priv, "P_Key change event on port:%d\n", priv->port);
-               queue_work(ipoib_workqueue, &priv->pkey_event_task);
+               queue_work(ipoib_workqueue, &priv->flush_heavy);
        }
 }
index 1cdb5cf..b08eb56 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $
  */
 
 #include <linux/module.h>
index aeb58ca..356fac6 100644 (file)
@@ -42,9 +42,6 @@
  *     Zhenyu Wang
  * Modified by:
  *      Erez Zilber
- *
- *
- * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
  */
 
 #include <linux/types.h>
index a8c1b30..0e10703 100644 (file)
@@ -36,8 +36,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
  */
 #ifndef __ISCSI_ISER_H__
 #define __ISCSI_ISER_H__
index 08dc81c..31ad498 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
  */
 #include <linux/kernel.h>
 #include <linux/slab.h>
index cac50c4..81e49cb 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
index d19cfe6..77cabee 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
index 4351457..ed7c5f7 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
  */
 
 #include <linux/module.h>
@@ -49,8 +47,6 @@
 #include <scsi/srp.h>
 #include <scsi/scsi_transport_srp.h>
 
-#include <rdma/ib_cache.h>
-
 #include "ib_srp.h"
 
 #define DRV_NAME       "ib_srp"
@@ -183,10 +179,10 @@ static int srp_init_qp(struct srp_target_port *target,
        if (!attr)
                return -ENOMEM;
 
-       ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
-                                 target->srp_host->port,
-                                 be16_to_cpu(target->path.pkey),
-                                 &attr->pkey_index);
+       ret = ib_find_pkey(target->srp_host->srp_dev->dev,
+                          target->srp_host->port,
+                          be16_to_cpu(target->path.pkey),
+                          &attr->pkey_index);
        if (ret)
                goto out;
 
@@ -1883,8 +1879,7 @@ static ssize_t srp_create_target(struct device *dev,
        if (ret)
                goto err;
 
-       ib_get_cached_gid(host->srp_dev->dev, host->port, 0,
-                         &target->path.sgid);
+       ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid);
 
        shost_printk(KERN_DEBUG, target->scsi_host, PFX
                     "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
index 63d2ae7..e185b90 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_srp.h 3932 2005-11-01 17:19:29Z roland $
  */
 
 #ifndef IB_SRP_H
index 6c4f320..ed0ecd9 100644 (file)
@@ -54,6 +54,7 @@ enum {
        RDMA_CQ_DISABLE         = 16,
        RDMA_CTRL_QP_SETUP      = 17,
        RDMA_GET_MEM            = 18,
+       RDMA_GET_MIB            = 19,
 
        GET_RX_PAGE_INFO        = 50,
 };
index ff9c013..cf26968 100644 (file)
@@ -303,6 +303,12 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
                spin_unlock_irq(&adapter->sge.reg_lock);
                break;
        }
+       case RDMA_GET_MIB: {
+               spin_lock(&adapter->stats_lock);
+               t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
+               spin_unlock(&adapter->stats_lock);
+               break;
+       }
        default:
                ret = -EOPNOTSUPP;
        }
@@ -381,6 +387,7 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
        case RDMA_CQ_DISABLE:
        case RDMA_CTRL_QP_SETUP:
        case RDMA_GET_MEM:
+       case RDMA_GET_MIB:
                if (!offload_running(adapter))
                        return -EAGAIN;
                return cxgb_rdma_ctl(adapter, req, data);
index a0177fc..29db711 100644 (file)
@@ -38,7 +38,7 @@
 #define DRV_VERSION "1.0-ko"
 
 /* Firmware version */
-#define FW_VERSION_MAJOR 6
+#define FW_VERSION_MAJOR 7
 #define FW_VERSION_MINOR 0
 #define FW_VERSION_MICRO 0
 #endif                         /* __CHELSIO_VERSION_H */
index d82f275..2b5006b 100644 (file)
@@ -101,6 +101,34 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
                        mlx4_dbg(dev, "    %s\n", fname[i]);
 }
 
+int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 *inbox;
+       int err = 0;
+
+#define MOD_STAT_CFG_IN_SIZE           0x100
+
+#define MOD_STAT_CFG_PG_SZ_M_OFFSET    0x002
+#define MOD_STAT_CFG_PG_SZ_OFFSET      0x003
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       inbox = mailbox->buf;
+
+       memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
+
+       MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
+       MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
+
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
+                       MLX4_CMD_TIME_CLASS_A);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
        struct mlx4_cmd_mailbox *mailbox;
index 306cb9b..a0e046c 100644 (file)
 #include "mlx4.h"
 #include "icm.h"
 
+struct mlx4_mod_stat_cfg {
+       u8 log_pg_sz;
+       u8 log_pg_sz_m;
+};
+
 struct mlx4_dev_cap {
        int max_srq_sz;
        int max_qp_sz;
@@ -162,5 +167,6 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
 int mlx4_NOP(struct mlx4_dev *dev);
+int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
 
 #endif /* MLX4_FW_H */
index a6aa49f..d373601 100644 (file)
@@ -485,6 +485,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        struct mlx4_priv          *priv = mlx4_priv(dev);
        struct mlx4_adapter        adapter;
        struct mlx4_dev_cap        dev_cap;
+       struct mlx4_mod_stat_cfg   mlx4_cfg;
        struct mlx4_profile        profile;
        struct mlx4_init_hca_param init_hca;
        u64 icm_size;
@@ -502,6 +503,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                return err;
        }
 
+       mlx4_cfg.log_pg_sz_m = 1;
+       mlx4_cfg.log_pg_sz = 0;
+       err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+       if (err)
+               mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+
        err = mlx4_dev_cap(dev, &dev_cap);
        if (err) {
                mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
index 57f7f1f..b4b5787 100644 (file)
@@ -38,6 +38,9 @@
 
 #include "mlx4.h"
 
+#define MGM_QPN_MASK       0x00FFFFFF
+#define MGM_BLCK_LB_BIT    30
+
 struct mlx4_mgm {
        __be32                  next_gid_index;
        __be32                  members_count;
@@ -153,7 +156,8 @@ static int find_mgm(struct mlx4_dev *dev,
        return err;
 }
 
-int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         int block_mcast_loopback)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
@@ -202,13 +206,18 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
        }
 
        for (i = 0; i < members_count; ++i)
-               if (mgm->qp[i] == cpu_to_be32(qp->qpn)) {
+               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
                        mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
                        err = 0;
                        goto out;
                }
 
-       mgm->qp[members_count++] = cpu_to_be32(qp->qpn);
+       if (block_mcast_loopback)
+               mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
+                                                      (1 << MGM_BLCK_LB_BIT));
+       else
+               mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
+
        mgm->members_count       = cpu_to_be32(members_count);
 
        err = mlx4_WRITE_MCG(dev, index, mailbox);
@@ -283,7 +292,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
 
        members_count = be32_to_cpu(mgm->members_count);
        for (loc = -1, i = 0; i < members_count; ++i)
-               if (mgm->qp[i] == cpu_to_be32(qp->qpn))
+               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
                        loc = i;
 
        if (loc == -1) {
index 809a526..e4f2fb3 100644 (file)
@@ -34,8 +34,6 @@ fw-shipped-$(CONFIG_SND_SB16_CSP) += sb16/mulaw_main.csp sb16/alaw_main.csp \
                                     sb16/ima_adpcm_capture.csp
 fw-shipped-$(CONFIG_SND_YMFPCI) += yamaha/ds1_ctrl.fw yamaha/ds1_dsp.fw \
                                   yamaha/ds1e_ctrl.fw
-fw-shipped-$(CONFIG_TIGON3) += tigon/tg3.bin tigon/tg3_tso.bin \
-                              tigon/tg3_tso5.bin
 fw-shipped-$(CONFIG_USB_DABUSB) += dabusb/firmware.fw dabusb/bitstream.bin
 fw-shipped-$(CONFIG_USB_EMI26) += emi26/loader.fw emi26/firmware.fw \
                                  emi26/bitstream.fw
index 0bfe250..fd4a6a0 100644 (file)
 
 /* Due to the structure of pre-exisiting code, don't use assembler line
    comment character # to ignore the arguments. Instead, use a dummy macro. */
-.macro ignore a=0, b=0, c=0, d=0
+.macro __cfi_ignore a=0, b=0, c=0, d=0
 .endm
 
-#define CFI_STARTPROC  ignore
-#define CFI_ENDPROC    ignore
-#define CFI_DEF_CFA    ignore
-#define CFI_DEF_CFA_REGISTER   ignore
-#define CFI_DEF_CFA_OFFSET     ignore
-#define CFI_ADJUST_CFA_OFFSET  ignore
-#define CFI_OFFSET     ignore
-#define CFI_REL_OFFSET ignore
-#define CFI_REGISTER   ignore
-#define CFI_RESTORE    ignore
-#define CFI_REMEMBER_STATE ignore
-#define CFI_RESTORE_STATE ignore
-#define CFI_UNDEFINED ignore
-#define CFI_SIGNAL_FRAME ignore
+#define CFI_STARTPROC  __cfi_ignore
+#define CFI_ENDPROC    __cfi_ignore
+#define CFI_DEF_CFA    __cfi_ignore
+#define CFI_DEF_CFA_REGISTER   __cfi_ignore
+#define CFI_DEF_CFA_OFFSET     __cfi_ignore
+#define CFI_ADJUST_CFA_OFFSET  __cfi_ignore
+#define CFI_OFFSET     __cfi_ignore
+#define CFI_REL_OFFSET __cfi_ignore
+#define CFI_REGISTER   __cfi_ignore
+#define CFI_RESTORE    __cfi_ignore
+#define CFI_REMEMBER_STATE __cfi_ignore
+#define CFI_RESTORE_STATE __cfi_ignore
+#define CFI_UNDEFINED __cfi_ignore
+#define CFI_SIGNAL_FRAME __cfi_ignore
 
 #endif
 
index a744383..81b3dd5 100644 (file)
@@ -398,7 +398,8 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
-int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         int block_mcast_loopback);
 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
 
 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
index c36750f..483057b 100644 (file)
@@ -2,29 +2,33 @@
  * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
  *
- * This Software is licensed under one of the following licenses:
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * 1) under the terms of the "Common Public License 1.0" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/cpl.php.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
  *
- * 2) under the terms of the "The BSD License" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/bsd-license.php.
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
  *
- * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
- *    copy of which is available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/gpl-license.php.
- *
- * Licensee has the right to choose one of the above licenses.
- *
- * Redistributions of source code must retain the above copyright
- * notice and one of the license notices.
- *
- * Redistributions in binary form must reproduce both the above copyright
- * notice, one of the license notices in the documentation
- * and/or other materials provided with the distribution.
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #if !defined(IB_ADDR_H)
@@ -57,6 +61,7 @@ struct rdma_dev_addr {
        unsigned char dst_dev_addr[MAX_ADDR_LEN];
        unsigned char broadcast[MAX_ADDR_LEN];
        enum rdma_node_type dev_type;
+       struct net_device *src_dev;
 };
 
 /**
index f179d23..00a2b8e 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_cache.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef _IB_CACHE_H
index a627c86..ec7c6d9 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_cm.h 4311 2005-12-05 18:42:01Z sean.hefty $
  */
 #if !defined(IB_CM_H)
 #define IB_CM_H
index 00dadbf..f62b842 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_fmr_pool.h 2730 2005-06-28 16:43:03Z sean.hefty $
  */
 
 #if !defined(IB_FMR_POOL_H)
@@ -61,7 +59,7 @@ struct ib_fmr_pool_param {
        int                     pool_size;
        int                     dirty_watermark;
        void                  (*flush_function)(struct ib_fmr_pool *pool,
-                                               void *              arg);
+                                               void               *arg);
        void                   *flush_arg;
        unsigned                cache:1;
 };
index 7228c05..5f6c40f 100644 (file)
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_mad.h 5596 2006-03-03 01:00:07Z sean.hefty $
  */
 
-#if !defined( IB_MAD_H )
+#if !defined(IB_MAD_H)
 #define IB_MAD_H
 
 #include <linux/list.h>
@@ -194,8 +192,7 @@ struct ib_vendor_mad {
        u8                      data[IB_MGMT_VENDOR_DATA];
 };
 
-struct ib_class_port_info
-{
+struct ib_class_port_info {
        u8                      base_version;
        u8                      class_version;
        __be16                  capability_mask;
@@ -614,11 +611,11 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
  * any class specific header, and MAD data area.
  * If @rmpp_active is set, the RMPP header will be initialized for sending.
  */
-struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
-                                           u32 remote_qpn, u16 pkey_index,
-                                           int rmpp_active,
-                                           int hdr_len, int data_len,
-                                           gfp_t gfp_mask);
+struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
+                                          u32 remote_qpn, u16 pkey_index,
+                                          int rmpp_active,
+                                          int hdr_len, int data_len,
+                                          gfp_t gfp_mask);
 
 /**
  * ib_is_mad_class_rmpp - returns whether given management class
index f926020..d7fc45c 100644 (file)
@@ -28,8 +28,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #ifndef IB_PACK_H
index 942692b..3841c1a 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_sa.h 2811 2005-07-06 18:11:43Z halr $
  */
 
 #ifndef IB_SA_H
index f29af13..aaca087 100644 (file)
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_smi.h 1389 2004-12-27 22:56:47Z roland $
  */
 
-#if !defined( IB_SMI_H )
+#if !defined(IB_SMI_H)
 #define IB_SMI_H
 
 #include <rdma/ib_mad.h>
index 37650af..bd3d380 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_user_cm.h 4019 2005-11-11 00:33:09Z sean.hefty $
  */
 
 #ifndef IB_USER_CM_H
index 29d2c72..d6fce1c 100644 (file)
@@ -29,8 +29,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_user_mad.h 2814 2005-07-06 19:14:09Z halr $
  */
 
 #ifndef IB_USER_MAD_H
index 8d65bf0..a17f771 100644 (file)
@@ -31,8 +31,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_user_verbs.h 4019 2005-11-11 00:33:09Z sean.hefty $
  */
 
 #ifndef IB_USER_VERBS_H
@@ -291,7 +289,10 @@ struct ib_uverbs_wc {
        __u32 opcode;
        __u32 vendor_err;
        __u32 byte_len;
-       __u32 imm_data;
+       union {
+               __u32 imm_data;
+               __u32 invalidate_rkey;
+       } ex;
        __u32 qp_num;
        __u32 src_qp;
        __u32 wc_flags;
index 31d30b1..90b529f 100644 (file)
@@ -34,8 +34,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #if !defined(IB_VERBS_H)
@@ -93,7 +91,7 @@ enum ib_device_cap_flags {
        IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
        IB_DEVICE_SRQ_RESIZE            = (1<<13),
        IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
-       IB_DEVICE_ZERO_STAG             = (1<<15),
+       IB_DEVICE_LOCAL_DMA_LKEY        = (1<<15),
        IB_DEVICE_RESERVED              = (1<<16), /* old SEND_W_INV */
        IB_DEVICE_MEM_WINDOW            = (1<<17),
        /*
@@ -105,6 +103,8 @@ enum ib_device_cap_flags {
         */
        IB_DEVICE_UD_IP_CSUM            = (1<<18),
        IB_DEVICE_UD_TSO                = (1<<19),
+       IB_DEVICE_MEM_MGT_EXTENSIONS    = (1<<21),
+       IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 };
 
 enum ib_atomic_cap {
@@ -150,6 +150,7 @@ struct ib_device_attr {
        int                     max_srq;
        int                     max_srq_wr;
        int                     max_srq_sge;
+       unsigned int            max_fast_reg_page_list_len;
        u16                     max_pkeys;
        u8                      local_ca_ack_delay;
 };
@@ -226,6 +227,57 @@ static inline int ib_width_enum_to_int(enum ib_port_width width)
        }
 }
 
+struct ib_protocol_stats {
+       /* TBD... */
+};
+
+struct iw_protocol_stats {
+       u64     ipInReceives;
+       u64     ipInHdrErrors;
+       u64     ipInTooBigErrors;
+       u64     ipInNoRoutes;
+       u64     ipInAddrErrors;
+       u64     ipInUnknownProtos;
+       u64     ipInTruncatedPkts;
+       u64     ipInDiscards;
+       u64     ipInDelivers;
+       u64     ipOutForwDatagrams;
+       u64     ipOutRequests;
+       u64     ipOutDiscards;
+       u64     ipOutNoRoutes;
+       u64     ipReasmTimeout;
+       u64     ipReasmReqds;
+       u64     ipReasmOKs;
+       u64     ipReasmFails;
+       u64     ipFragOKs;
+       u64     ipFragFails;
+       u64     ipFragCreates;
+       u64     ipInMcastPkts;
+       u64     ipOutMcastPkts;
+       u64     ipInBcastPkts;
+       u64     ipOutBcastPkts;
+
+       u64     tcpRtoAlgorithm;
+       u64     tcpRtoMin;
+       u64     tcpRtoMax;
+       u64     tcpMaxConn;
+       u64     tcpActiveOpens;
+       u64     tcpPassiveOpens;
+       u64     tcpAttemptFails;
+       u64     tcpEstabResets;
+       u64     tcpCurrEstab;
+       u64     tcpInSegs;
+       u64     tcpOutSegs;
+       u64     tcpRetransSegs;
+       u64     tcpInErrs;
+       u64     tcpOutRsts;
+};
+
+union rdma_protocol_stats {
+       struct ib_protocol_stats        ib;
+       struct iw_protocol_stats        iw;
+};
+
 struct ib_port_attr {
        enum ib_port_state      state;
        enum ib_mtu             max_mtu;
@@ -413,6 +465,8 @@ enum ib_wc_opcode {
        IB_WC_FETCH_ADD,
        IB_WC_BIND_MW,
        IB_WC_LSO,
+       IB_WC_LOCAL_INV,
+       IB_WC_FAST_REG_MR,
 /*
  * Set value of IB_WC_RECV so consumers can test if a completion is a
  * receive by testing (opcode & IB_WC_RECV).
@@ -423,7 +477,8 @@ enum ib_wc_opcode {
 
 enum ib_wc_flags {
        IB_WC_GRH               = 1,
-       IB_WC_WITH_IMM          = (1<<1)
+       IB_WC_WITH_IMM          = (1<<1),
+       IB_WC_WITH_INVALIDATE   = (1<<2),
 };
 
 struct ib_wc {
@@ -433,7 +488,10 @@ struct ib_wc {
        u32                     vendor_err;
        u32                     byte_len;
        struct ib_qp           *qp;
-       __be32                  imm_data;
+       union {
+               __be32          imm_data;
+               u32             invalidate_rkey;
+       } ex;
        u32                     src_qp;
        int                     wc_flags;
        u16                     pkey_index;
@@ -498,7 +556,8 @@ enum ib_qp_type {
 };
 
 enum ib_qp_create_flags {
-       IB_QP_CREATE_IPOIB_UD_LSO       = 1 << 0,
+       IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
+       IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
 };
 
 struct ib_qp_init_attr {
@@ -627,6 +686,9 @@ enum ib_wr_opcode {
        IB_WR_ATOMIC_FETCH_AND_ADD,
        IB_WR_LSO,
        IB_WR_SEND_WITH_INV,
+       IB_WR_RDMA_READ_WITH_INV,
+       IB_WR_LOCAL_INV,
+       IB_WR_FAST_REG_MR,
 };
 
 enum ib_send_flags {
@@ -643,6 +705,12 @@ struct ib_sge {
        u32     lkey;
 };
 
+struct ib_fast_reg_page_list {
+       struct ib_device       *device;
+       u64                    *page_list;
+       unsigned int            max_page_list_len;
+};
+
 struct ib_send_wr {
        struct ib_send_wr      *next;
        u64                     wr_id;
@@ -675,6 +743,15 @@ struct ib_send_wr {
                        u16     pkey_index; /* valid for GSI only */
                        u8      port_num;   /* valid for DR SMPs on switch only */
                } ud;
+               struct {
+                       u64                             iova_start;
+                       struct ib_fast_reg_page_list   *page_list;
+                       unsigned int                    page_shift;
+                       unsigned int                    page_list_len;
+                       u32                             length;
+                       int                             access_flags;
+                       u32                             rkey;
+               } fast_reg;
        } wr;
 };
 
@@ -777,7 +854,7 @@ struct ib_cq {
        struct ib_uobject      *uobject;
        ib_comp_handler         comp_handler;
        void                  (*event_handler)(struct ib_event *, void *);
-       void *                  cq_context;
+       void                   *cq_context;
        int                     cqe;
        atomic_t                usecnt; /* count number of work queues */
 };
@@ -883,7 +960,7 @@ struct ib_dma_mapping_ops {
        void            (*sync_single_for_cpu)(struct ib_device *dev,
                                               u64 dma_handle,
                                               size_t size,
-                                              enum dma_data_direction dir);
+                                              enum dma_data_direction dir);
        void            (*sync_single_for_device)(struct ib_device *dev,
                                                  u64 dma_handle,
                                                  size_t size,
@@ -919,6 +996,8 @@ struct ib_device {
 
        struct iw_cm_verbs           *iwcm;
 
+       int                        (*get_protocol_stats)(struct ib_device *device,
+                                                        union rdma_protocol_stats *stats);
        int                        (*query_device)(struct ib_device *device,
                                                   struct ib_device_attr *device_attr);
        int                        (*query_port)(struct ib_device *device,
@@ -1013,6 +1092,11 @@ struct ib_device {
        int                        (*query_mr)(struct ib_mr *mr,
                                               struct ib_mr_attr *mr_attr);
        int                        (*dereg_mr)(struct ib_mr *mr);
+       struct ib_mr *             (*alloc_fast_reg_mr)(struct ib_pd *pd,
+                                              int max_page_list_len);
+       struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
+                                                                  int page_list_len);
+       void                       (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
        int                        (*rereg_phys_mr)(struct ib_mr *mr,
                                                    int mr_rereg_mask,
                                                    struct ib_pd *pd,
@@ -1065,6 +1149,7 @@ struct ib_device {
 
        char                         node_desc[64];
        __be64                       node_guid;
+       u32                          local_dma_lkey;
        u8                           node_type;
        u8                           phys_port_cnt;
 };
@@ -1806,6 +1891,54 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
  */
 int ib_dereg_mr(struct ib_mr *mr);
 
+/**
+ * ib_alloc_fast_reg_mr - Allocates memory region usable with the
+ *   IB_WR_FAST_REG_MR send work request.
+ * @pd: The protection domain associated with the region.
+ * @max_page_list_len: requested max physical buffer list length to be
+ *   used with fast register work requests for this MR.
+ */
+struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+
+/**
+ * ib_alloc_fast_reg_page_list - Allocates a page list array
+ * @device - ib device pointer.
+ * @page_list_len - size of the page list array to be allocated.
+ *
+ * This allocates and returns a struct ib_fast_reg_page_list * and a
+ * page_list array that is at least page_list_len in size.  The actual
+ * size is returned in max_page_list_len.  The caller is responsible
+ * for initializing the contents of the page_list array before posting
+ * a send work request with the IB_WC_FAST_REG_MR opcode.
+ *
+ * The page_list array entries must be translated using one of the
+ * ib_dma_*() functions just like the addresses passed to
+ * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
+ * ib_fast_reg_page_list must not be modified by the caller until the
+ * IB_WC_FAST_REG_MR work request completes.
+ */
+struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
+                               struct ib_device *device, int page_list_len);
+
+/**
+ * ib_free_fast_reg_page_list - Deallocates a previously allocated
+ *   page list array.
+ * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
+ */
+void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
+
+/**
+ * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
+ *   R_Key and L_Key.
+ * @mr - struct ib_mr pointer to be updated.
+ * @newkey - new key to be used.
+ */
+static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
+{
+       mr->lkey = (mr->lkey & 0xffffff00) | newkey;
+       mr->rkey = (mr->rkey & 0xffffff00) | newkey;
+}
+
 /**
  * ib_alloc_mw - Allocates a memory window.
  * @pd: The protection domain associated with the memory window.
index aeefa9b..cbb822e 100644 (file)
@@ -62,7 +62,7 @@ struct iw_cm_event {
        struct sockaddr_in remote_addr;
        void *private_data;
        u8 private_data_len;
-       voidprovider_data;
+       void *provider_data;
 };
 
 /**
index 010f876..22bb2e7 100644 (file)
@@ -2,29 +2,33 @@
  * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
  *
- * This Software is licensed under one of the following licenses:
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * 1) under the terms of the "Common Public License 1.0" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/cpl.php.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
  *
- * 2) under the terms of the "The BSD License" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/bsd-license.php.
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
  *
- * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
- *    copy of which is available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/gpl-license.php.
- *
- * Licensee has the right to choose one of the above licenses.
- *
- * Redistributions of source code must retain the above copyright
- * notice and one of the license notices.
- *
- * Redistributions in binary form must reproduce both the above copyright
- * notice, one of the license notices in the documentation
- * and/or other materials provided with the distribution.
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #if !defined(RDMA_CM_H)
@@ -57,11 +61,11 @@ enum rdma_cm_event_type {
 };
 
 enum rdma_port_space {
-       RDMA_PS_SDP  = 0x0001,
-       RDMA_PS_IPOIB= 0x0002,
-       RDMA_PS_TCP  = 0x0106,
-       RDMA_PS_UDP  = 0x0111,
-       RDMA_PS_SCTP = 0x0183
+       RDMA_PS_SDP   = 0x0001,
+       RDMA_PS_IPOIB = 0x0002,
+       RDMA_PS_TCP   = 0x0106,
+       RDMA_PS_UDP   = 0x0111,
+       RDMA_PS_SCTP  = 0x0183
 };
 
 struct rdma_addr {
index 950424b..2389c3b 100644 (file)
@@ -1,29 +1,33 @@
 /*
  * Copyright (c) 2006 Intel Corporation.  All rights reserved.
  *
- * This Software is licensed under one of the following licenses:
- *
- * 1) under the terms of the "Common Public License 1.0" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/cpl.php.
- *
- * 2) under the terms of the "The BSD License" a copy of which is
- *    available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/bsd-license.php.
- *
- * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
- *    copy of which is available from the Open Source Initiative, see
- *    http://www.opensource.org/licenses/gpl-license.php.
- *
- * Licensee has the right to choose one of the above licenses.
- *
- * Redistributions of source code must retain the above copyright
- * notice and one of the license notices.
- *
- * Redistributions in binary form must reproduce both the above copyright
- * notice, one of the license notices in the documentation
- * and/or other materials provided with the distribution.
- *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #if !defined(RDMA_CM_IB_H)