IPoIB: Use netif_tx_lock() and get rid of private tx_lock, LLTX
[pandora-kernel.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
index 1fcc9a8..d9d1223 100644 (file)
@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
        struct net_device *dev = mcast->dev;
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_neigh *neigh, *tmp;
-       unsigned long flags;
        int tx_dropped = 0;
 
        ipoib_dbg_mcast(netdev_priv(dev),
                        "deleting multicast group " IPOIB_GID_FMT "\n",
                        IPOIB_GID_ARG(mcast->mcmember.mgid));
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irq(&priv->lock);
 
        list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
                /*
@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
                ipoib_neigh_free(dev, neigh);
        }
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irq(&priv->lock);
 
        if (mcast->ah)
                ipoib_put_ah(mcast->ah);
@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
                dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
        }
 
-       spin_lock_irqsave(&priv->tx_lock, flags);
+       netif_tx_lock_bh(dev);
        dev->stats.tx_dropped += tx_dropped;
-       spin_unlock_irqrestore(&priv->tx_lock, flags);
+       netif_tx_unlock_bh(dev);
 
        kfree(mcast);
 }
@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
        }
 
        /* actually send any queued packets */
-       spin_lock_irq(&priv->tx_lock);
+       netif_tx_lock_bh(dev);
        while (!skb_queue_empty(&mcast->pkt_queue)) {
                struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
-               spin_unlock_irq(&priv->tx_lock);
+               netif_tx_unlock_bh(dev);
 
                skb->dev = dev;
 
@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
 
                if (dev_queue_xmit(skb))
                        ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
-               spin_lock_irq(&priv->tx_lock);
+               netif_tx_lock_bh(dev);
        }
-       spin_unlock_irq(&priv->tx_lock);
+       netif_tx_unlock_bh(dev);
 
        return 0;
 }
@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status,
 {
        struct ipoib_mcast *mcast = multicast->context;
        struct net_device *dev = mcast->dev;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
 
        /* We trap for port events ourselves. */
        if (status == -ENETRESET)
@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status,
                                        IPOIB_GID_ARG(mcast->mcmember.mgid), status);
 
                /* Flush out any queued packets */
-               spin_lock_irq(&priv->tx_lock);
+               netif_tx_lock_bh(dev);
                while (!skb_queue_empty(&mcast->pkt_queue)) {
                        ++dev->stats.tx_dropped;
                        dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
                }
-               spin_unlock_irq(&priv->tx_lock);
+               netif_tx_unlock_bh(dev);
 
                /* Clear the busy flag so we try again */
                status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
@@ -366,6 +364,21 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        return ret;
 }
 
+void ipoib_mcast_carrier_on_task(struct work_struct *work)
+{
+       struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+                                                  carrier_on_task);
+
+       /*
+        * Take rtnl_lock to avoid racing with ipoib_stop() and
+        * turning the carrier back on while a device is being
+        * removed.
+        */
+       rtnl_lock();
+       netif_carrier_on(priv->dev);
+       rtnl_unlock();
+}
+
 static int ipoib_mcast_join_complete(int status,
                                     struct ib_sa_multicast *multicast)
 {
@@ -392,8 +405,12 @@ static int ipoib_mcast_join_complete(int status,
                                           &priv->mcast_task, 0);
                mutex_unlock(&mcast_mutex);
 
+               /*
+                * Defer carrier on work to ipoib_workqueue to avoid a
+                * deadlock on rtnl_lock here.
+                */
                if (mcast == priv->broadcast)
-                       netif_carrier_on(dev);
+                       queue_work(ipoib_workqueue, &priv->carrier_on_task);
 
                return 0;
        }
@@ -643,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_mcast *mcast;
+       unsigned long flags;
 
-       /*
-        * We can only be called from ipoib_start_xmit, so we're
-        * inside tx_lock -- no need to save/restore flags.
-        */
-       spin_lock(&priv->lock);
+       spin_lock_irqsave(&priv->lock, flags);
 
        if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)         ||
            !priv->broadcast                                    ||
@@ -719,7 +733,7 @@ out:
        }
 
 unlock:
-       spin_unlock(&priv->lock);
+       spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 void ipoib_mcast_dev_flush(struct net_device *dev)
@@ -769,7 +783,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        ipoib_mcast_stop_thread(dev, 0);
 
        local_irq_save(flags);
-       netif_tx_lock(dev);
+       netif_addr_lock(dev);
        spin_lock(&priv->lock);
 
        /*
@@ -846,7 +860,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        }
 
        spin_unlock(&priv->lock);
-       netif_tx_unlock(dev);
+       netif_addr_unlock(dev);
        local_irq_restore(flags);
 
        /* We have to cancel outside of the spinlock */