rps: add __rcu annotations
[pandora-kernel.git] / net / core / dev.c
index 1bfd96b..e8a8dc1 100644 (file)
 #include <linux/jhash.h>
 #include <linux/random.h>
 #include <trace/events/napi.h>
+#include <trace/events/net.h>
+#include <trace/events/skb.h>
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
 
@@ -1484,7 +1486,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        nf_reset(skb);
 
        if (unlikely(!(dev->flags & IFF_UP) ||
-                    (skb->len > (dev->mtu + dev->hard_header_len)))) {
+                    (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) {
                atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
@@ -2042,6 +2044,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                }
 
                rc = ops->ndo_start_xmit(skb, dev);
+               trace_net_dev_xmit(skb, rc);
                if (rc == NETDEV_TX_OK)
                        txq_trans_update(txq);
                return rc;
@@ -2062,6 +2065,7 @@ gso:
                        skb_dst_drop(nskb);
 
                rc = ops->ndo_start_xmit(nskb, dev);
+               trace_net_dev_xmit(nskb, rc);
                if (unlikely(rc != NETDEV_TX_OK)) {
                        if (rc & ~NETDEV_TX_MASK)
                                goto out_kfree_gso_skb;
@@ -2209,7 +2213,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 }
 
 static DEFINE_PER_CPU(int, xmit_recursion);
-#define RECURSION_LIMIT 3
+#define RECURSION_LIMIT 10
 
 /**
  *     dev_queue_xmit - transmit a buffer
@@ -2254,6 +2258,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 #ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
 #endif
+       trace_net_dev_queue(skb);
        if (q->enqueue) {
                rc = __dev_xmit_skb(skb, q, dev, txq);
                goto out;
@@ -2408,7 +2413,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 #ifdef CONFIG_RPS
 
 /* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
 /*
@@ -2420,7 +2425,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                       struct rps_dev_flow **rflowp)
 {
        struct netdev_rx_queue *rxqueue;
-       struct rps_map *map = NULL;
+       struct rps_map *map;
        struct rps_dev_flow_table *flow_table;
        struct rps_sock_flow_table *sock_flow_table;
        int cpu = -1;
@@ -2439,15 +2444,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        } else
                rxqueue = dev->_rx;
 
-       if (rxqueue->rps_map) {
-               map = rcu_dereference(rxqueue->rps_map);
-               if (map && map->len == 1) {
+       map = rcu_dereference(rxqueue->rps_map);
+       if (map) {
+               if (map->len == 1) {
                        tcpu = map->cpus[0];
                        if (cpu_online(tcpu))
                                cpu = tcpu;
                        goto done;
                }
-       } else if (!rxqueue->rps_flow_table) {
+       } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
                goto done;
        }
 
@@ -2610,6 +2615,7 @@ int netif_rx(struct sk_buff *skb)
        if (netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
+       trace_netif_rx(skb);
 #ifdef CONFIG_RPS
        {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -2669,6 +2675,7 @@ static void net_tx_action(struct softirq_action *h)
                        clist = clist->next;
 
                        WARN_ON(atomic_read(&skb->users));
+                       trace_kfree_skb(skb, net_tx_action);
                        __kfree_skb(skb);
                }
        }
@@ -2789,33 +2796,6 @@ out:
 }
 #endif
 
-/*
- *     netif_nit_deliver - deliver received packets to network taps
- *     @skb: buffer
- *
- *     This function is used to deliver incoming packets to network
- *     taps. It should be used when the normal netif_receive_skb path
- *     is bypassed, for example because of VLAN acceleration.
- */
-void netif_nit_deliver(struct sk_buff *skb)
-{
-       struct packet_type *ptype;
-
-       if (list_empty(&ptype_all))
-               return;
-
-       skb_reset_network_header(skb);
-       skb_reset_transport_header(skb);
-       skb->mac_len = skb->network_header - skb->mac_header;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ptype, &ptype_all, list) {
-               if (!ptype->dev || ptype->dev == skb->dev)
-                       deliver_skb(skb, ptype, skb->dev);
-       }
-       rcu_read_unlock();
-}
-
 /**
  *     netdev_rx_handler_register - register receive handler
  *     @dev: device to register a handler for
@@ -2925,8 +2905,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
        if (!netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
-       if (vlan_tx_tag_present(skb))
-               vlan_hwaccel_do_receive(skb);
+       trace_netif_receive_skb(skb);
 
        /* if we've gotten here through NAPI, check netpoll */
        if (netpoll_receive_skb(skb))
@@ -2940,8 +2919,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
         * be delivered to pkt handlers that are exact matches.  Also
         * the deliver_no_wcard flag will be set.  If packet handlers
         * are sensitive to duplicate packets these skbs will need to
-        * be dropped at the handler.  The vlan accel path may have
-        * already set the deliver_no_wcard flag.
+        * be dropped at the handler.
         */
        null_or_orig = NULL;
        orig_dev = skb->dev;
@@ -3000,6 +2978,18 @@ ncls:
                        goto out;
        }
 
+       if (vlan_tx_tag_present(skb)) {
+               if (pt_prev) {
+                       ret = deliver_skb(skb, pt_prev, orig_dev);
+                       pt_prev = NULL;
+               }
+               if (vlan_hwaccel_do_receive(&skb)) {
+                       ret = __netif_receive_skb(skb);
+                       goto out;
+               } else if (unlikely(!skb))
+                       goto out;
+       }
+
        /*
         * Make sure frames received on VLAN interfaces stacked on
         * bonding interfaces still make their way to any base bonding
@@ -3264,6 +3254,7 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
                unsigned long diffs;
 
                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+               diffs |= p->vlan_tci ^ skb->vlan_tci;
                diffs |= compare_ether_header(skb_mac_header(p),
                                              skb_gro_mac_header(skb));
                NAPI_GRO_CB(p)->same_flow = !diffs;
@@ -3319,14 +3310,14 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(napi_gro_receive);
 
-void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
 {
        __skb_pull(skb, skb_headlen(skb));
        skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+       skb->vlan_tci = 0;
 
        napi->skb = skb;
 }
-EXPORT_SYMBOL(napi_reuse_skb);
 
 struct sk_buff *napi_get_frags(struct napi_struct *napi)
 {
@@ -5425,7 +5416,7 @@ void netdev_run_todo(void)
                /* paranoia */
                BUG_ON(netdev_refcnt_read(dev));
                WARN_ON(rcu_dereference_raw(dev->ip_ptr));
-               WARN_ON(dev->ip6_ptr);
+               WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
                WARN_ON(dev->dn_ptr);
 
                if (dev->destructor)