net: allocate tx queues in register_netdevice
[pandora-kernel.git] / net / core / dev.c
index 660dd41..4c3ac53 100644 (file)
 #include <linux/random.h>
 #include <trace/events/napi.h>
 #include <linux/pci.h>
+#include <linux/inetdevice.h>
 
 #include "net-sysfs.h"
 
@@ -371,6 +372,14 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  *                                                     --ANK (980803)
  */
 
+static inline struct list_head *ptype_head(const struct packet_type *pt)
+{
+       if (pt->type == htons(ETH_P_ALL))
+               return &ptype_all;
+       else
+               return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+}
+
 /**
  *     dev_add_pack - add packet handler
  *     @pt: packet type declaration
@@ -386,16 +395,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 
 void dev_add_pack(struct packet_type *pt)
 {
-       int hash;
+       struct list_head *head = ptype_head(pt);
 
-       spin_lock_bh(&ptype_lock);
-       if (pt->type == htons(ETH_P_ALL))
-               list_add_rcu(&pt->list, &ptype_all);
-       else {
-               hash = ntohs(pt->type) & PTYPE_HASH_MASK;
-               list_add_rcu(&pt->list, &ptype_base[hash]);
-       }
-       spin_unlock_bh(&ptype_lock);
+       spin_lock(&ptype_lock);
+       list_add_rcu(&pt->list, head);
+       spin_unlock(&ptype_lock);
 }
 EXPORT_SYMBOL(dev_add_pack);
 
@@ -414,15 +418,10 @@ EXPORT_SYMBOL(dev_add_pack);
  */
 void __dev_remove_pack(struct packet_type *pt)
 {
-       struct list_head *head;
+       struct list_head *head = ptype_head(pt);
        struct packet_type *pt1;
 
-       spin_lock_bh(&ptype_lock);
-
-       if (pt->type == htons(ETH_P_ALL))
-               head = &ptype_all;
-       else
-               head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+       spin_lock(&ptype_lock);
 
        list_for_each_entry(pt1, head, list) {
                if (pt == pt1) {
@@ -433,7 +432,7 @@ void __dev_remove_pack(struct packet_type *pt)
 
        printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
 out:
-       spin_unlock_bh(&ptype_lock);
+       spin_unlock(&ptype_lock);
 }
 EXPORT_SYMBOL(__dev_remove_pack);
 
@@ -1484,8 +1483,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        skb_orphan(skb);
        nf_reset(skb);
 
-       if (!(dev->flags & IFF_UP) ||
-           (skb->len > (dev->mtu + dev->hard_header_len))) {
+       if (unlikely(!(dev->flags & IFF_UP) ||
+                    (skb->len > (dev->mtu + dev->hard_header_len)))) {
+               atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -1553,21 +1553,56 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
  */
-void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
 {
-       unsigned int real_num = dev->real_num_tx_queues;
+       if (txq < 1 || txq > dev->num_tx_queues)
+               return -EINVAL;
+
+       if (dev->reg_state == NETREG_REGISTERED) {
+               ASSERT_RTNL();
 
-       if (unlikely(txq > dev->num_tx_queues))
-               ;
-       else if (txq > real_num)
-               dev->real_num_tx_queues = txq;
-       else if (txq < real_num) {
-               dev->real_num_tx_queues = txq;
-               qdisc_reset_all_tx_gt(dev, txq);
+               if (txq < dev->real_num_tx_queues)
+                       qdisc_reset_all_tx_gt(dev, txq);
        }
+
+       dev->real_num_tx_queues = txq;
+       return 0;
 }
 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
 
+#ifdef CONFIG_RPS
+/**
+ *     netif_set_real_num_rx_queues - set actual number of RX queues used
+ *     @dev: Network device
+ *     @rxq: Actual number of RX queues
+ *
+ *     This must be called either with the rtnl_lock held or before
+ *     registration of the net device.  Returns 0 on success, or a
+ *     negative error code.  If called before registration, it always
+ *     succeeds.
+ */
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
+{
+       int rc;
+
+       if (rxq < 1 || rxq > dev->num_rx_queues)
+               return -EINVAL;
+
+       if (dev->reg_state == NETREG_REGISTERED) {
+               ASSERT_RTNL();
+
+               rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
+                                                 rxq);
+               if (rc)
+                       return rc;
+       }
+
+       dev->real_num_rx_queues = rxq;
+       return 0;
+}
+EXPORT_SYMBOL(netif_set_real_num_rx_queues);
+#endif
+
 static inline void __netif_reschedule(struct Qdisc *q)
 {
        struct softnet_data *sd;
@@ -1902,14 +1937,14 @@ static int dev_gso_segment(struct sk_buff *skb)
 
 /*
  * Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested, since
- * drivers need to call skb_tstamp_tx() to send the timestamp.
+ * We cannot orphan skb if tx timestamp is requested or the sk-reference
+ * is needed on driver level for other reasons, e.g. see net/can/raw.c
  */
 static inline void skb_orphan_try(struct sk_buff *skb)
 {
        struct sock *sk = skb->sk;
 
-       if (sk && !skb_tx(skb)->flags) {
+       if (sk && !skb_shinfo(skb)->tx_flags) {
                /* skb_tx_hash() wont be able to get sk.
                 * We copy sk_hash into skb->rxhash
                 */
@@ -1930,7 +1965,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
                                      struct net_device *dev)
 {
        return skb_is_nonlinear(skb) &&
-              ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+              ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
                (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
                                              illegal_highdma(dev, skb))));
 }
@@ -2143,6 +2178,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        return rc;
 }
 
+static DEFINE_PER_CPU(int, xmit_recursion);
+#define RECURSION_LIMIT 3
+
 /**
  *     dev_queue_xmit - transmit a buffer
  *     @skb: buffer to transmit
@@ -2208,10 +2246,15 @@ int dev_queue_xmit(struct sk_buff *skb)
 
                if (txq->xmit_lock_owner != cpu) {
 
+                       if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
+                               goto recursion_alert;
+
                        HARD_TX_LOCK(dev, txq, cpu);
 
                        if (!netif_tx_queue_stopped(txq)) {
+                               __this_cpu_inc(xmit_recursion);
                                rc = dev_hard_start_xmit(skb, dev, txq);
+                               __this_cpu_dec(xmit_recursion);
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
                                        goto out;
@@ -2223,7 +2266,9 @@ int dev_queue_xmit(struct sk_buff *skb)
                                       "queue packet!\n", dev->name);
                } else {
                        /* Recursion is detected! It is possible,
-                        * unfortunately */
+                        * unfortunately
+                        */
+recursion_alert:
                        if (net_ratelimit())
                                printk(KERN_CRIT "Dead loop on virtual device "
                                       "%s, fix it urgently!\n", dev->name);
@@ -2259,69 +2304,44 @@ static inline void ____napi_schedule(struct softnet_data *sd,
        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
 }
 
-#ifdef CONFIG_RPS
-
-/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
-EXPORT_SYMBOL(rps_sock_flow_table);
-
 /*
- * get_rps_cpu is called from netif_receive_skb and returns the target
- * CPU from the RPS map of the receiving queue for a given skb.
- * rcu_read_lock must be held on entry.
+ * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
+ * and src/dst port numbers. Returns a non-zero hash number on success
+ * and 0 on failure.
  */
-static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
-                      struct rps_dev_flow **rflowp)
+__u32 __skb_get_rxhash(struct sk_buff *skb)
 {
+       int nhoff, hash = 0, poff;
        struct ipv6hdr *ip6;
        struct iphdr *ip;
-       struct netdev_rx_queue *rxqueue;
-       struct rps_map *map;
-       struct rps_dev_flow_table *flow_table;
-       struct rps_sock_flow_table *sock_flow_table;
-       int cpu = -1;
        u8 ip_proto;
-       u16 tcpu;
        u32 addr1, addr2, ihl;
        union {
                u32 v32;
                u16 v16[2];
        } ports;
 
-       if (skb_rx_queue_recorded(skb)) {
-               u16 index = skb_get_rx_queue(skb);
-               if (unlikely(index >= dev->num_rx_queues)) {
-                       WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
-                               "on queue %u, but number of RX queues is %u\n",
-                               dev->name, index, dev->num_rx_queues);
-                       goto done;
-               }
-               rxqueue = dev->_rx + index;
-       } else
-               rxqueue = dev->_rx;
-
-       if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
-               goto done;
-
-       if (skb->rxhash)
-               goto got_hash; /* Skip hash computation on packet header */
+       nhoff = skb_network_offset(skb);
 
        switch (skb->protocol) {
        case __constant_htons(ETH_P_IP):
-               if (!pskb_may_pull(skb, sizeof(*ip)))
+               if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
                        goto done;
 
-               ip = (struct iphdr *) skb->data;
-               ip_proto = ip->protocol;
+               ip = (struct iphdr *) (skb->data + nhoff);
+               if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+                       ip_proto = 0;
+               else
+                       ip_proto = ip->protocol;
                addr1 = (__force u32) ip->saddr;
                addr2 = (__force u32) ip->daddr;
                ihl = ip->ihl;
                break;
        case __constant_htons(ETH_P_IPV6):
-               if (!pskb_may_pull(skb, sizeof(*ip6)))
+               if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
                        goto done;
 
-               ip6 = (struct ipv6hdr *) skb->data;
+               ip6 = (struct ipv6hdr *) (skb->data + nhoff);
                ip_proto = ip6->nexthdr;
                addr1 = (__force u32) ip6->saddr.s6_addr32[3];
                addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2330,33 +2350,81 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        default:
                goto done;
        }
-       switch (ip_proto) {
-       case IPPROTO_TCP:
-       case IPPROTO_UDP:
-       case IPPROTO_DCCP:
-       case IPPROTO_ESP:
-       case IPPROTO_AH:
-       case IPPROTO_SCTP:
-       case IPPROTO_UDPLITE:
-               if (pskb_may_pull(skb, (ihl * 4) + 4)) {
-                       ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
+
+       ports.v32 = 0;
+       poff = proto_ports_offset(ip_proto);
+       if (poff >= 0) {
+               nhoff += ihl * 4 + poff;
+               if (pskb_may_pull(skb, nhoff + 4)) {
+                       ports.v32 = * (__force u32 *) (skb->data + nhoff);
                        if (ports.v16[1] < ports.v16[0])
                                swap(ports.v16[0], ports.v16[1]);
-                       break;
                }
-       default:
-               ports.v32 = 0;
-               break;
        }
 
        /* get a consistent hash (same value on both flow directions) */
        if (addr2 < addr1)
                swap(addr1, addr2);
-       skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
-       if (!skb->rxhash)
-               skb->rxhash = 1;
 
-got_hash:
+       hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+       if (!hash)
+               hash = 1;
+
+done:
+       return hash;
+}
+EXPORT_SYMBOL(__skb_get_rxhash);
+
+#ifdef CONFIG_RPS
+
+/* One global table that all flow-based protocols share. */
+struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+EXPORT_SYMBOL(rps_sock_flow_table);
+
+/*
+ * get_rps_cpu is called from netif_receive_skb and returns the target
+ * CPU from the RPS map of the receiving queue for a given skb.
+ * rcu_read_lock must be held on entry.
+ */
+static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+                      struct rps_dev_flow **rflowp)
+{
+       struct netdev_rx_queue *rxqueue;
+       struct rps_map *map = NULL;
+       struct rps_dev_flow_table *flow_table;
+       struct rps_sock_flow_table *sock_flow_table;
+       int cpu = -1;
+       u16 tcpu;
+
+       if (skb_rx_queue_recorded(skb)) {
+               u16 index = skb_get_rx_queue(skb);
+               if (unlikely(index >= dev->real_num_rx_queues)) {
+                       WARN_ONCE(dev->real_num_rx_queues > 1,
+                                 "%s received packet on queue %u, but number "
+                                 "of RX queues is %u\n",
+                                 dev->name, index, dev->real_num_rx_queues);
+                       goto done;
+               }
+               rxqueue = dev->_rx + index;
+       } else
+               rxqueue = dev->_rx;
+
+       if (rxqueue->rps_map) {
+               map = rcu_dereference(rxqueue->rps_map);
+               if (map && map->len == 1) {
+                       tcpu = map->cpus[0];
+                       if (cpu_online(tcpu))
+                               cpu = tcpu;
+                       goto done;
+               }
+       } else if (!rxqueue->rps_flow_table) {
+               goto done;
+       }
+
+       skb_reset_network_header(skb);
+       if (!skb_get_rxhash(skb))
+               goto done;
+
        flow_table = rcu_dereference(rxqueue->rps_flow_table);
        sock_flow_table = rcu_dereference(rps_sock_flow_table);
        if (flow_table && sock_flow_table) {
@@ -2396,7 +2464,6 @@ got_hash:
                }
        }
 
-       map = rcu_dereference(rxqueue->rps_map);
        if (map) {
                tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
 
@@ -2482,6 +2549,7 @@ enqueue:
 
        local_irq_restore(flags);
 
+       atomic_long_inc(&skb->dev->rx_dropped);
        kfree_skb(skb);
        return NET_RX_DROP;
 }
@@ -2636,11 +2704,10 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
  * the ingress scheduler, you just cant add policies on ingress.
  *
  */
-static int ing_filter(struct sk_buff *skb)
+static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
 {
        struct net_device *dev = skb->dev;
        u32 ttl = G_TC_RTTL(skb->tc_verd);
-       struct netdev_queue *rxq;
        int result = TC_ACT_OK;
        struct Qdisc *q;
 
@@ -2654,8 +2721,6 @@ static int ing_filter(struct sk_buff *skb)
        skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 
-       rxq = &dev->rx_queue;
-
        q = rxq->qdisc;
        if (q != &noop_qdisc) {
                spin_lock(qdisc_lock(q));
@@ -2671,7 +2736,9 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                                         struct packet_type **pt_prev,
                                         int *ret, struct net_device *orig_dev)
 {
-       if (skb->dev->rx_queue.qdisc == &noop_qdisc)
+       struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
+
+       if (!rxq || rxq->qdisc == &noop_qdisc)
                goto out;
 
        if (*pt_prev) {
@@ -2679,7 +2746,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                *pt_prev = NULL;
        }
 
-       switch (ing_filter(skb)) {
+       switch (ing_filter(skb, rxq)) {
        case TC_ACT_SHOT:
        case TC_ACT_STOLEN:
                kfree_skb(skb);
@@ -2828,8 +2895,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
        if (!netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
-       if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
-               return NET_RX_SUCCESS;
+       if (vlan_tx_tag_present(skb))
+               vlan_hwaccel_do_receive(skb);
 
        /* if we've gotten here through NAPI, check netpoll */
        if (netpoll_receive_skb(skb))
@@ -2930,6 +2997,7 @@ ncls:
        if (pt_prev) {
                ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        } else {
+               atomic_long_inc(&skb->dev->rx_dropped);
                kfree_skb(skb);
                /* Jamal, now you will not able to escape explaining
                 * me how you were going to use this. :-)
@@ -3050,7 +3118,7 @@ out:
        return netif_receive_skb(skb);
 }
 
-static void napi_gro_flush(struct napi_struct *napi)
+inline void napi_gro_flush(struct napi_struct *napi)
 {
        struct sk_buff *skb, *next;
 
@@ -3063,6 +3131,7 @@ static void napi_gro_flush(struct napi_struct *napi)
        napi->gro_count = 0;
        napi->gro_list = NULL;
 }
+EXPORT_SYMBOL(napi_gro_flush);
 
 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
@@ -3077,7 +3146,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
        if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
                goto normal;
 
-       if (skb_is_gso(skb) || skb_has_frags(skb))
+       if (skb_is_gso(skb) || skb_has_frag_list(skb))
                goto normal;
 
        rcu_read_lock();
@@ -3156,16 +3225,18 @@ normal:
 }
 EXPORT_SYMBOL(dev_gro_receive);
 
-static gro_result_t
+static inline gro_result_t
 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct sk_buff *p;
 
        for (p = napi->gro_list; p; p = p->next) {
-               NAPI_GRO_CB(p)->same_flow =
-                       (p->dev == skb->dev) &&
-                       !compare_ether_header(skb_mac_header(p),
+               unsigned long diffs;
+
+               diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+               diffs |= compare_ether_header(skb_mac_header(p),
                                              skb_gro_mac_header(skb));
+               NAPI_GRO_CB(p)->same_flow = !diffs;
                NAPI_GRO_CB(p)->flush = 0;
        }
 
@@ -4859,21 +4930,6 @@ static void rollback_registered(struct net_device *dev)
        rollback_registered_many(&single);
 }
 
-static void __netdev_init_queue_locks_one(struct net_device *dev,
-                                         struct netdev_queue *dev_queue,
-                                         void *_unused)
-{
-       spin_lock_init(&dev_queue->_xmit_lock);
-       netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
-       dev_queue->xmit_lock_owner = -1;
-}
-
-static void netdev_init_queue_locks(struct net_device *dev)
-{
-       netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
-       __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
-}
-
 unsigned long netdev_fix_features(unsigned long features, const char *name)
 {
        /* Fix illegal SG+CSUM combinations. */
@@ -4941,6 +4997,66 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 }
 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
 
+static int netif_alloc_rx_queues(struct net_device *dev)
+{
+#ifdef CONFIG_RPS
+       unsigned int i, count = dev->num_rx_queues;
+       struct netdev_rx_queue *rx;
+
+       BUG_ON(count < 1);
+
+       rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
+       if (!rx) {
+               pr_err("netdev: Unable to allocate %u rx queues.\n", count);
+               return -ENOMEM;
+       }
+       dev->_rx = rx;
+
+       /*
+        * Set a pointer to first element in the array which holds the
+        * reference count.
+        */
+       for (i = 0; i < count; i++)
+               rx[i].first = rx;
+#endif
+       return 0;
+}
+
+static int netif_alloc_netdev_queues(struct net_device *dev)
+{
+       unsigned int count = dev->num_tx_queues;
+       struct netdev_queue *tx;
+
+       BUG_ON(count < 1);
+
+       tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
+       if (!tx) {
+               pr_err("netdev: Unable to allocate %u tx queues.\n",
+                      count);
+               return -ENOMEM;
+       }
+       dev->_tx = tx;
+       return 0;
+}
+
+static void netdev_init_one_queue(struct net_device *dev,
+                                 struct netdev_queue *queue,
+                                 void *_unused)
+{
+       queue->dev = dev;
+
+       /* Initialize queue lock */
+       spin_lock_init(&queue->_xmit_lock);
+       netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+       queue->xmit_lock_owner = -1;
+}
+
+static void netdev_init_queues(struct net_device *dev)
+{
+       netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
+       spin_lock_init(&dev->tx_global_lock);
+}
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -4974,28 +5090,19 @@ int register_netdevice(struct net_device *dev)
 
        spin_lock_init(&dev->addr_list_lock);
        netdev_set_addr_lockdep_class(dev);
-       netdev_init_queue_locks(dev);
 
        dev->iflink = -1;
 
-#ifdef CONFIG_RPS
-       if (!dev->num_rx_queues) {
-               /*
-                * Allocate a single RX queue if driver never called
-                * alloc_netdev_mq
-                */
+       ret = netif_alloc_rx_queues(dev);
+       if (ret)
+               goto out;
 
-               dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
-               if (!dev->_rx) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+       ret = netif_alloc_netdev_queues(dev);
+       if (ret)
+               goto out;
+
+       netdev_init_queues(dev);
 
-               dev->_rx->first = dev->_rx;
-               atomic_set(&dev->_rx->count, 1);
-               dev->num_rx_queues = 1;
-       }
-#endif
        /* Init, if this function is available */
        if (dev->netdev_ops->ndo_init) {
                ret = dev->netdev_ops->ndo_init(dev);
@@ -5035,6 +5142,12 @@ int register_netdevice(struct net_device *dev)
        if (dev->features & NETIF_F_SG)
                dev->features |= NETIF_F_GSO;
 
+       /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
+        * vlan_dev_init() will do the dev->features check, so these features
+        * are enabled only if supported by underlying device.
+        */
+       dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
+
        ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
        ret = notifier_to_errno(ret);
        if (ret)
@@ -5105,9 +5218,6 @@ int init_dummy_netdev(struct net_device *dev)
         */
        dev->reg_state = NETREG_DUMMY;
 
-       /* initialize the ref count */
-       atomic_set(&dev->refcnt, 1);
-
        /* NAPI wants this */
        INIT_LIST_HEAD(&dev->napi_list);
 
@@ -5115,6 +5225,11 @@ int init_dummy_netdev(struct net_device *dev)
        set_bit(__LINK_STATE_PRESENT, &dev->state);
        set_bit(__LINK_STATE_START, &dev->state);
 
+       /* Note : We dont allocate pcpu_refcnt for dummy devices,
+        * because users of this 'device' dont need to change
+        * its refcount.
+        */
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(init_dummy_netdev);
@@ -5156,6 +5271,16 @@ out:
 }
 EXPORT_SYMBOL(register_netdev);
 
+int netdev_refcnt_read(const struct net_device *dev)
+{
+       int i, refcnt = 0;
+
+       for_each_possible_cpu(i)
+               refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
+       return refcnt;
+}
+EXPORT_SYMBOL(netdev_refcnt_read);
+
 /*
  * netdev_wait_allrefs - wait until all references are gone.
  *
@@ -5170,11 +5295,14 @@ EXPORT_SYMBOL(register_netdev);
 static void netdev_wait_allrefs(struct net_device *dev)
 {
        unsigned long rebroadcast_time, warning_time;
+       int refcnt;
 
        linkwatch_forget_dev(dev);
 
        rebroadcast_time = warning_time = jiffies;
-       while (atomic_read(&dev->refcnt) != 0) {
+       refcnt = netdev_refcnt_read(dev);
+
+       while (refcnt != 0) {
                if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
                        rtnl_lock();
 
@@ -5201,11 +5329,13 @@ static void netdev_wait_allrefs(struct net_device *dev)
 
                msleep(250);
 
+               refcnt = netdev_refcnt_read(dev);
+
                if (time_after(jiffies, warning_time + 10 * HZ)) {
                        printk(KERN_EMERG "unregister_netdevice: "
                               "waiting for %s to become free. Usage "
                               "count = %d\n",
-                              dev->name, atomic_read(&dev->refcnt));
+                              dev->name, refcnt);
                        warning_time = jiffies;
                }
        }
@@ -5263,8 +5393,8 @@ void netdev_run_todo(void)
                netdev_wait_allrefs(dev);
 
                /* paranoia */
-               BUG_ON(atomic_read(&dev->refcnt));
-               WARN_ON(dev->ip_ptr);
+               BUG_ON(netdev_refcnt_read(dev));
+               WARN_ON(rcu_dereference_raw(dev->ip_ptr));
                WARN_ON(dev->ip6_ptr);
                WARN_ON(dev->dn_ptr);
 
@@ -5342,30 +5472,34 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 
        if (ops->ndo_get_stats64) {
                memset(storage, 0, sizeof(*storage));
-               return ops->ndo_get_stats64(dev, storage);
-       }
-       if (ops->ndo_get_stats) {
+               ops->ndo_get_stats64(dev, storage);
+       } else if (ops->ndo_get_stats) {
                netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
-               return storage;
+       } else {
+               netdev_stats_to_stats64(storage, &dev->stats);
+               dev_txq_stats_fold(dev, storage);
        }
-       netdev_stats_to_stats64(storage, &dev->stats);
-       dev_txq_stats_fold(dev, storage);
+       storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
 
-static void netdev_init_one_queue(struct net_device *dev,
-                                 struct netdev_queue *queue,
-                                 void *_unused)
+struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
 {
-       queue->dev = dev;
-}
+       struct netdev_queue *queue = dev_ingress_queue(dev);
 
-static void netdev_init_queues(struct net_device *dev)
-{
-       netdev_init_one_queue(dev, &dev->rx_queue, NULL);
-       netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
-       spin_lock_init(&dev->tx_global_lock);
+#ifdef CONFIG_NET_CLS_ACT
+       if (queue)
+               return queue;
+       queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+       if (!queue)
+               return NULL;
+       netdev_init_one_queue(dev, queue, NULL);
+       queue->qdisc = &noop_qdisc;
+       queue->qdisc_sleeping = &noop_qdisc;
+       rcu_assign_pointer(dev->ingress_queue, queue);
+#endif
+       return queue;
 }
 
 /**
@@ -5382,17 +5516,18 @@ static void netdev_init_queues(struct net_device *dev)
 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
                void (*setup)(struct net_device *), unsigned int queue_count)
 {
-       struct netdev_queue *tx;
        struct net_device *dev;
        size_t alloc_size;
        struct net_device *p;
-#ifdef CONFIG_RPS
-       struct netdev_rx_queue *rx;
-       int i;
-#endif
 
        BUG_ON(strlen(name) >= sizeof(dev->name));
 
+       if (queue_count < 1) {
+               pr_err("alloc_netdev: Unable to allocate device "
+                      "with zero queues.\n");
+               return NULL;
+       }
+
        alloc_size = sizeof(struct net_device);
        if (sizeof_priv) {
                /* ensure 32-byte alignment of private area */
@@ -5408,55 +5543,31 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
                return NULL;
        }
 
-       tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
-       if (!tx) {
-               printk(KERN_ERR "alloc_netdev: Unable to allocate "
-                      "tx qdiscs.\n");
-               goto free_p;
-       }
-
-#ifdef CONFIG_RPS
-       rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
-       if (!rx) {
-               printk(KERN_ERR "alloc_netdev: Unable to allocate "
-                      "rx queues.\n");
-               goto free_tx;
-       }
-
-       atomic_set(&rx->count, queue_count);
-
-       /*
-        * Set a pointer to first element in the array which holds the
-        * reference count.
-        */
-       for (i = 0; i < queue_count; i++)
-               rx[i].first = rx;
-#endif
-
        dev = PTR_ALIGN(p, NETDEV_ALIGN);
        dev->padded = (char *)dev - (char *)p;
 
+       dev->pcpu_refcnt = alloc_percpu(int);
+       if (!dev->pcpu_refcnt)
+               goto free_p;
+
        if (dev_addr_init(dev))
-               goto free_rx;
+               goto free_pcpu;
 
        dev_mc_init(dev);
        dev_uc_init(dev);
 
        dev_net_set(dev, &init_net);
 
-       dev->_tx = tx;
        dev->num_tx_queues = queue_count;
        dev->real_num_tx_queues = queue_count;
 
 #ifdef CONFIG_RPS
-       dev->_rx = rx;
        dev->num_rx_queues = queue_count;
+       dev->real_num_rx_queues = queue_count;
 #endif
 
        dev->gso_max_size = GSO_MAX_SIZE;
 
-       netdev_init_queues(dev);
-
        INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
        dev->ethtool_ntuple_list.count = 0;
        INIT_LIST_HEAD(&dev->napi_list);
@@ -5467,12 +5578,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
        strcpy(dev->name, name);
        return dev;
 
-free_rx:
-#ifdef CONFIG_RPS
-       kfree(rx);
-free_tx:
-#endif
-       kfree(tx);
+free_pcpu:
+       free_percpu(dev->pcpu_refcnt);
 free_p:
        kfree(p);
        return NULL;
@@ -5495,6 +5602,8 @@ void free_netdev(struct net_device *dev)
 
        kfree(dev->_tx);
 
+       kfree(rcu_dereference_raw(dev->ingress_queue));
+
        /* Flush device addresses */
        dev_addr_flush(dev);
 
@@ -5504,6 +5613,9 @@ void free_netdev(struct net_device *dev)
        list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
                netif_napi_del(p);
 
+       free_percpu(dev->pcpu_refcnt);
+       dev->pcpu_refcnt = NULL;
+
        /*  Compatibility with error handling in drivers */
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                kfree((char *)dev - dev->padded);
@@ -5658,6 +5770,10 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
 
        /* Notify protocols, that we are about to destroy
           this device. They should clean all the things.
+
+          Note that dev->reg_state stays at NETREG_REGISTERED.
+          This is wanted because this way 8021q and macvlan know
+          the device is just moving and can keep their slaves up.
        */
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);