Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[pandora-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
index bd07f02..8be9ea0 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
  */
 
 #include "ipoib.h"
@@ -62,6 +60,15 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
 
+static int lro;
+module_param(lro, bool, 0444);
+MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
+
+static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
+module_param(lro_max_aggr, int, 0644);
+MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
+               "(default = 64)");
+
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 int ipoib_debug_level;
 
@@ -195,7 +202,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
                return 0;
        }
 
-       if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+       if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
                return -EINVAL;
 
        priv->admin_mtu = new_mtu;
@@ -350,6 +357,23 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
 
 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
 
+void ipoib_mark_paths_invalid(struct net_device *dev)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_path *path, *tp;
+
+       spin_lock_irq(&priv->lock);
+
+       list_for_each_entry_safe(path, tp, &priv->path_list, list) {
+               ipoib_dbg(priv, "mark path LID 0x%04x GID " IPOIB_GID_FMT " invalid\n",
+                       be16_to_cpu(path->pathrec.dlid),
+                       IPOIB_GID_ARG(path->pathrec.dgid));
+               path->valid =  0;
+       }
+
+       spin_unlock_irq(&priv->lock);
+}
+
 void ipoib_flush_paths(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -386,6 +410,7 @@ static void path_rec_completion(int status,
        struct net_device *dev = path->dev;
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_ah *ah = NULL;
+       struct ipoib_ah *old_ah;
        struct ipoib_neigh *neigh, *tn;
        struct sk_buff_head skqueue;
        struct sk_buff *skb;
@@ -409,6 +434,7 @@ static void path_rec_completion(int status,
 
        spin_lock_irqsave(&priv->lock, flags);
 
+       old_ah   = path->ah;
        path->ah = ah;
 
        if (ah) {
@@ -421,6 +447,17 @@ static void path_rec_completion(int status,
                        __skb_queue_tail(&skqueue, skb);
 
                list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
+                       if (neigh->ah) {
+                               WARN_ON(neigh->ah != old_ah);
+                               /*
+                                * Dropping the ah reference inside
+                                * priv->lock is safe here, because we
+                                * will hold one more reference from
+                                * the original value of path->ah (ie
+                                * old_ah).
+                                */
+                               ipoib_put_ah(neigh->ah);
+                       }
                        kref_get(&path->ah->ref);
                        neigh->ah = path->ah;
                        memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
@@ -443,6 +480,7 @@ static void path_rec_completion(int status,
                        while ((skb = __skb_dequeue(&neigh->queue)))
                                __skb_queue_tail(&skqueue, skb);
                }
+               path->valid = 1;
        }
 
        path->query = NULL;
@@ -450,6 +488,9 @@ static void path_rec_completion(int status,
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
+       if (old_ah)
+               ipoib_put_ah(old_ah);
+
        while ((skb = __skb_dequeue(&skqueue))) {
                skb->dev = dev;
                if (dev_queue_xmit(skb))
@@ -623,8 +664,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
        spin_lock(&priv->lock);
 
        path = __path_find(dev, phdr->hwaddr + 4);
-       if (!path) {
-               path = path_rec_create(dev, phdr->hwaddr + 4);
+       if (!path || !path->valid) {
+               if (!path)
+                       path = path_rec_create(dev, phdr->hwaddr + 4);
                if (path) {
                        /* put pseudoheader back on for next time */
                        skb_push(skb, sizeof *phdr);
@@ -938,6 +980,54 @@ static const struct header_ops ipoib_header_ops = {
        .create = ipoib_hard_header,
 };
 
+static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
+                      void **tcph, u64 *hdr_flags, void *priv)
+{
+       unsigned int ip_len;
+       struct iphdr *iph;
+
+       if (unlikely(skb->protocol != htons(ETH_P_IP)))
+               return -1;
+
+       /*
+        * In the future we may add an else clause that verifies the
+        * checksum and allows devices which do not calculate checksum
+        * to use LRO.
+        */
+       if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
+               return -1;
+
+       /* Check for non-TCP packet */
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
+       if (iph->protocol != IPPROTO_TCP)
+               return -1;
+
+       ip_len = ip_hdrlen(skb);
+       skb_set_transport_header(skb, ip_len);
+       *tcph = tcp_hdr(skb);
+
+       /* check if IP header and TCP header are complete */
+       if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
+               return -1;
+
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+       *iphdr = iph;
+
+       return 0;
+}
+
+static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
+{
+       priv->lro.lro_mgr.max_aggr       = lro_max_aggr;
+       priv->lro.lro_mgr.max_desc       = IPOIB_MAX_LRO_DESCRIPTORS;
+       priv->lro.lro_mgr.lro_arr        = priv->lro.lro_desc;
+       priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
+       priv->lro.lro_mgr.features       = LRO_F_NAPI;
+       priv->lro.lro_mgr.dev            = priv->dev;
+       priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+}
+
 static void ipoib_setup(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -971,20 +1061,17 @@ static void ipoib_setup(struct net_device *dev)
                                    NETIF_F_LLTX                |
                                    NETIF_F_HIGHDMA);
 
-       /* MTU will be reset when mcast join happens */
-       dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
-       priv->mcast_mtu          = priv->admin_mtu = dev->mtu;
-
        memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
 
        netif_carrier_off(dev);
 
        priv->dev = dev;
 
+       ipoib_lro_setup(priv);
+
        spin_lock_init(&priv->lock);
        spin_lock_init(&priv->tx_lock);
 
-       mutex_init(&priv->mcast_mutex);
        mutex_init(&priv->vlan_mutex);
 
        INIT_LIST_HEAD(&priv->path_list);
@@ -993,9 +1080,10 @@ static void ipoib_setup(struct net_device *dev)
        INIT_LIST_HEAD(&priv->multicast_list);
 
        INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
-       INIT_WORK(&priv->pkey_event_task, ipoib_pkey_event);
        INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
-       INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
+       INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
+       INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
+       INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
        INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
        INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
 }
@@ -1107,6 +1195,7 @@ static struct net_device *ipoib_add_port(const char *format,
 {
        struct ipoib_dev_priv *priv;
        struct ib_device_attr *device_attr;
+       struct ib_port_attr attr;
        int result = -ENOMEM;
 
        priv = ipoib_intf_alloc(format);
@@ -1115,6 +1204,18 @@ static struct net_device *ipoib_add_port(const char *format,
 
        SET_NETDEV_DEV(priv->dev, hca->dma_device);
 
+       if (!ib_query_port(hca, port, &attr))
+               priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+       else {
+               printk(KERN_WARNING "%s: ib_query_port %d failed\n",
+                      hca->name, port);
+               goto device_init_failed;
+       }
+
+       /* MTU will be reset when mcast join happens */
+       priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
+       priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
+
        result = ib_query_pkey(hca, port, 0, &priv->pkey);
        if (result) {
                printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
@@ -1145,6 +1246,9 @@ static struct net_device *ipoib_add_port(const char *format,
                priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
        }
 
+       if (lro)
+               priv->dev->features |= NETIF_F_LRO;
+
        /*
         * Set the full membership bit, so that we join the right
         * broadcast group, etc.
@@ -1289,11 +1393,18 @@ static int __init ipoib_init_module(void)
 
        ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
        ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
-       ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
+       ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
+                                                    IPOIB_MIN_QUEUE_SIZE));
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
        ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
 #endif
 
+       /*
+        * When copying small received packets, we only copy from the
+        * linear data part of the SKB, so we rely on this condition.
+        */
+       BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
+
        ret = ipoib_register_debugfs();
        if (ret)
                return ret;