Merge branch 'akpm' (Andrew's patch-bomb)
[pandora-kernel.git] / net / core / dev.c
index 1cb0d8a..0cb3fe8 100644 (file)
@@ -1172,6 +1172,7 @@ static int __dev_open(struct net_device *dev)
                net_dmaengine_get();
                dev_set_rx_mode(dev);
                dev_activate(dev);
+               add_device_randomness(dev->dev_addr, dev->addr_len);
        }
 
        return ret;
@@ -1632,6 +1633,8 @@ static inline int deliver_skb(struct sk_buff *skb,
                              struct packet_type *pt_prev,
                              struct net_device *orig_dev)
 {
+       if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+               return -ENOMEM;
        atomic_inc(&skb->users);
        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
@@ -1691,7 +1694,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
        rcu_read_unlock();
 }
 
-/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+/**
+ * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
  * @dev: Network device
  * @txq: number of queues available
  *
@@ -1793,6 +1797,18 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
 #endif
 
+/**
+ * netif_get_num_default_rss_queues - default number of RSS queues
+ *
+ * This routine should set an upper limit on the number of RSS queues
+ * used by default by multiqueue devices.
+ */
+int netif_get_num_default_rss_queues(void)
+{
+       return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
+}
+EXPORT_SYMBOL(netif_get_num_default_rss_queues);
+
 static inline void __netif_reschedule(struct Qdisc *q)
 {
        struct softnet_data *sd;
@@ -2458,6 +2474,23 @@ static void skb_update_prio(struct sk_buff *skb)
 static DEFINE_PER_CPU(int, xmit_recursion);
 #define RECURSION_LIMIT 10
 
+/**
+ *     dev_loopback_xmit - loop back @skb
+ *     @skb: buffer to transmit
+ */
+int dev_loopback_xmit(struct sk_buff *skb)
+{
+       skb_reset_mac_header(skb);
+       __skb_pull(skb, skb_network_offset(skb));
+       skb->pkt_type = PACKET_LOOPBACK;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+       WARN_ON(!skb_dst(skb));
+       skb_dst_force(skb);
+       netif_rx_ni(skb);
+       return 0;
+}
+EXPORT_SYMBOL(dev_loopback_xmit);
+
 /**
  *     dev_queue_xmit - transmit a buffer
  *     @skb: buffer to transmit
@@ -3123,6 +3156,23 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
 
+/*
+ * Limit the use of PFMEMALLOC reserves to those protocols that implement
+ * the special handling of PFMEMALLOC skbs.
+ */
+static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
+{
+       switch (skb->protocol) {
+       case __constant_htons(ETH_P_ARP):
+       case __constant_htons(ETH_P_IP):
+       case __constant_htons(ETH_P_IPV6):
+       case __constant_htons(ETH_P_8021Q):
+               return true;
+       default:
+               return false;
+       }
+}
+
 static int __netif_receive_skb(struct sk_buff *skb)
 {
        struct packet_type *ptype, *pt_prev;
@@ -3132,17 +3182,28 @@ static int __netif_receive_skb(struct sk_buff *skb)
        bool deliver_exact = false;
        int ret = NET_RX_DROP;
        __be16 type;
+       unsigned long pflags = current->flags;
 
        net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
        trace_netif_receive_skb(skb);
 
+       /*
+        * PFMEMALLOC skbs are special, they should
+        * - be delivered to SOCK_MEMALLOC sockets only
+        * - stay away from userspace
+        * - have bounded memory usage
+        *
+        * Use PF_MEMALLOC as this saves us from propagating the allocation
+        * context down to all allocation sites.
+        */
+       if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+               current->flags |= PF_MEMALLOC;
+
        /* if we've gotten here through NAPI, check netpoll */
        if (netpoll_receive_skb(skb))
-               return NET_RX_DROP;
+               goto out;
 
-       if (!skb->skb_iif)
-               skb->skb_iif = skb->dev->ifindex;
        orig_dev = skb->dev;
 
        skb_reset_network_header(skb);
@@ -3154,13 +3215,14 @@ static int __netif_receive_skb(struct sk_buff *skb)
        rcu_read_lock();
 
 another_round:
+       skb->skb_iif = skb->dev->ifindex;
 
        __this_cpu_inc(softnet_data.processed);
 
        if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
                skb = vlan_untag(skb);
                if (unlikely(!skb))
-                       goto out;
+                       goto unlock;
        }
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -3170,6 +3232,9 @@ another_round:
        }
 #endif
 
+       if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+               goto skip_taps;
+
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
                if (!ptype->dev || ptype->dev == skb->dev) {
                        if (pt_prev)
@@ -3178,13 +3243,18 @@ another_round:
                }
        }
 
+skip_taps:
 #ifdef CONFIG_NET_CLS_ACT
        skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
        if (!skb)
-               goto out;
+               goto unlock;
 ncls:
 #endif
 
+       if (sk_memalloc_socks() && skb_pfmemalloc(skb)
+                               && !skb_pfmemalloc_protocol(skb))
+               goto drop;
+
        rx_handler = rcu_dereference(skb->dev->rx_handler);
        if (vlan_tx_tag_present(skb)) {
                if (pt_prev) {
@@ -3194,7 +3264,7 @@ ncls:
                if (vlan_do_receive(&skb, !rx_handler))
                        goto another_round;
                else if (unlikely(!skb))
-                       goto out;
+                       goto unlock;
        }
 
        if (rx_handler) {
@@ -3204,7 +3274,7 @@ ncls:
                }
                switch (rx_handler(&skb)) {
                case RX_HANDLER_CONSUMED:
-                       goto out;
+                       goto unlock;
                case RX_HANDLER_ANOTHER:
                        goto another_round;
                case RX_HANDLER_EXACT:
@@ -3232,8 +3302,12 @@ ncls:
        }
 
        if (pt_prev) {
-               ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+               if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+                       ret = -ENOMEM;
+               else
+                       ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        } else {
+drop:
                atomic_long_inc(&skb->dev->rx_dropped);
                kfree_skb(skb);
                /* Jamal, now you will not able to escape explaining
@@ -3242,8 +3316,10 @@ ncls:
                ret = NET_RX_DROP;
        }
 
-out:
+unlock:
        rcu_read_unlock();
+out:
+       tsk_restore_flags(current, pflags, PF_MEMALLOC);
        return ret;
 }
 
@@ -4767,6 +4843,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
        err = ops->ndo_set_mac_address(dev, sa);
        if (!err)
                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       add_device_randomness(dev->dev_addr, dev->addr_len);
        return err;
 }
 EXPORT_SYMBOL(dev_set_mac_address);
@@ -5545,6 +5622,7 @@ int register_netdevice(struct net_device *dev)
        dev_init_scheduler(dev);
        dev_hold(dev);
        list_netdevice(dev);
+       add_device_randomness(dev->dev_addr, dev->addr_len);
 
        /* Notify protocols, that a new device appeared. */
        ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
@@ -5646,7 +5724,7 @@ int netdev_refcnt_read(const struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_refcnt_read);
 
-/*
+/**
  * netdev_wait_allrefs - wait until all references are gone.
  *
  * This is called when unregistering network devices.