netpoll: revert 6bdb7fe3104 and fix be_poll() instead
[pandora-kernel.git] / net / core / netpoll.c
index b4c90e4..e4ba3e7 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/if_vlan.h>
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <asm/unaligned.h>
@@ -54,7 +55,7 @@ static atomic_t trapped;
         MAX_UDP_CHUNK)
 
 static void zap_completion_queue(void);
-static void arp_reply(struct sk_buff *skb);
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
 
 static unsigned int carrier_timeout = 4;
 module_param(carrier_timeout, uint, 0644);
@@ -170,7 +171,8 @@ static void poll_napi(struct net_device *dev)
        list_for_each_entry(napi, &dev->napi_list, dev_list) {
                if (napi->poll_owner != smp_processor_id() &&
                    spin_trylock(&napi->poll_lock)) {
-                       budget = poll_one_napi(dev->npinfo, napi, budget);
+                       budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
+                                              napi, budget);
                        spin_unlock(&napi->poll_lock);
 
                        if (!budget)
@@ -185,13 +187,14 @@ static void service_arp_queue(struct netpoll_info *npi)
                struct sk_buff *skb;
 
                while ((skb = skb_dequeue(&npi->arp_tx)))
-                       arp_reply(skb);
+                       netpoll_arp_reply(skb, npi);
        }
 }
 
 static void netpoll_poll_dev(struct net_device *dev)
 {
        const struct net_device_ops *ops;
+       struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
 
        if (!dev || !netif_running(dev))
                return;
@@ -206,17 +209,18 @@ static void netpoll_poll_dev(struct net_device *dev)
        poll_napi(dev);
 
        if (dev->flags & IFF_SLAVE) {
-               if (dev->npinfo) {
+               if (ni) {
                        struct net_device *bond_dev = dev->master;
                        struct sk_buff *skb;
-                       while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+                       struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
+                       while ((skb = skb_dequeue(&ni->arp_tx))) {
                                skb->dev = bond_dev;
-                               skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+                               skb_queue_tail(&bond_ni->arp_tx, skb);
                        }
                }
        }
 
-       service_arp_queue(dev->npinfo);
+       service_arp_queue(ni);
 
        zap_completion_queue();
 }
@@ -302,6 +306,7 @@ static int netpoll_owner_active(struct net_device *dev)
        return 0;
 }
 
+/* call with IRQ disabled */
 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                             struct net_device *dev)
 {
@@ -309,8 +314,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        unsigned long tries;
        const struct net_device_ops *ops = dev->netdev_ops;
        /* It is up to the caller to keep npinfo alive. */
-       struct netpoll_info *npinfo = np->dev->npinfo;
+       struct netpoll_info *npinfo;
+
+       WARN_ON_ONCE(!irqs_disabled());
 
+       npinfo = rcu_dereference_bh(np->dev->npinfo);
        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
                __kfree_skb(skb);
                return;
@@ -319,16 +327,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        /* don't get messages out of order, and no recursion */
        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
                struct netdev_queue *txq;
-               unsigned long flags;
 
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 
-               local_irq_save(flags);
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
                                if (!netif_xmit_stopped(txq)) {
+                                       if (vlan_tx_tag_present(skb) &&
+                                           !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
+                                               skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
+                                               if (unlikely(!skb))
+                                                       break;
+                                               skb->vlan_tci = 0;
+                                       }
+
                                        status = ops->ndo_start_xmit(skb, dev);
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
@@ -347,10 +361,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                }
 
                WARN_ONCE(!irqs_disabled(),
-                       "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+                       "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
                        dev->name, ops->ndo_start_xmit);
 
-               local_irq_restore(flags);
        }
 
        if (status != NETDEV_TX_OK) {
@@ -423,9 +436,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 }
 EXPORT_SYMBOL(netpoll_send_udp);
 
-static void arp_reply(struct sk_buff *skb)
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
 {
-       struct netpoll_info *npinfo = skb->dev->npinfo;
        struct arphdr *arp;
        unsigned char *arp_ptr;
        int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
@@ -543,13 +555,12 @@ static void arp_reply(struct sk_buff *skb)
        spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 }
 
-int __netpoll_rx(struct sk_buff *skb)
+int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
 {
        int proto, len, ulen;
        int hits = 0;
        const struct iphdr *iph;
        struct udphdr *uh;
-       struct netpoll_info *npinfo = skb->dev->npinfo;
        struct netpoll *np, *tmp;
 
        if (list_empty(&npinfo->rx_np))
@@ -565,6 +576,12 @@ int __netpoll_rx(struct sk_buff *skb)
                return 1;
        }
 
+       if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+               skb = vlan_untag(skb);
+               if (unlikely(!skb))
+                       goto out;
+       }
+
        proto = ntohs(eth_hdr(skb)->h_proto);
        if (proto != ETH_P_IP)
                goto out;
@@ -715,7 +732,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
 }
 EXPORT_SYMBOL(netpoll_parse_options);
 
-int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
 {
        struct netpoll_info *npinfo;
        const struct net_device_ops *ops;
@@ -734,7 +751,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
        }
 
        if (!ndev->npinfo) {
-               npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
+               npinfo = kmalloc(sizeof(*npinfo), gfp);
                if (!npinfo) {
                        err = -ENOMEM;
                        goto out;
@@ -752,7 +769,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
 
                ops = np->dev->netdev_ops;
                if (ops->ndo_netpoll_setup) {
-                       err = ops->ndo_netpoll_setup(ndev, npinfo);
+                       err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
                        if (err)
                                goto free_npinfo;
                }
@@ -857,7 +874,7 @@ int netpoll_setup(struct netpoll *np)
        refill_skbs();
 
        rtnl_lock();
-       err = __netpoll_setup(np, ndev);
+       err = __netpoll_setup(np, ndev, GFP_KERNEL);
        rtnl_unlock();
 
        if (err)
@@ -878,6 +895,24 @@ static int __init netpoll_init(void)
 }
 core_initcall(netpoll_init);
 
+static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
+{
+       struct netpoll_info *npinfo =
+                       container_of(rcu_head, struct netpoll_info, rcu);
+
+       skb_queue_purge(&npinfo->arp_tx);
+       skb_queue_purge(&npinfo->txq);
+
+       /* we can't call cancel_delayed_work_sync here, as we are in softirq */
+       cancel_delayed_work(&npinfo->tx_work);
+
+       /* clean after last, unfinished work */
+       __skb_queue_purge(&npinfo->txq);
+       /* now cancel it again */
+       cancel_delayed_work(&npinfo->tx_work);
+       kfree(npinfo);
+}
+
 void __netpoll_cleanup(struct netpoll *np)
 {
        struct netpoll_info *npinfo;
@@ -903,20 +938,24 @@ void __netpoll_cleanup(struct netpoll *np)
                        ops->ndo_netpoll_cleanup(np->dev);
 
                RCU_INIT_POINTER(np->dev->npinfo, NULL);
+               call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
+       }
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 
-               /* avoid racing with NAPI reading npinfo */
-               synchronize_rcu_bh();
+static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
+{
+       struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
 
-               skb_queue_purge(&npinfo->arp_tx);
-               skb_queue_purge(&npinfo->txq);
-               cancel_delayed_work_sync(&npinfo->tx_work);
+       __netpoll_cleanup(np);
+       kfree(np);
+}
 
-               /* clean after last, unfinished work */
-               __skb_queue_purge(&npinfo->txq);
-               kfree(npinfo);
-       }
+void __netpoll_free_rcu(struct netpoll *np)
+{
+       call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
 }
-EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
 
 void netpoll_cleanup(struct netpoll *np)
 {