net: reject creation of netdev names with colons
[pandora-kernel.git] / net / core / dev.c
index abe1147..1c0d862 100644 (file)
@@ -859,7 +859,7 @@ int dev_valid_name(const char *name)
                return 0;
 
        while (*name) {
-               if (*name == '/' || isspace(*name))
+               if (*name == '/' || *name == ':' || isspace(*name))
                        return 0;
                name++;
        }
@@ -1616,9 +1616,11 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        skb->tstamp.tv64 = 0;
        skb->pkt_type = PACKET_HOST;
        skb->protocol = eth_type_trans(skb, dev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
        skb->mark = 0;
        secpath_reset(skb);
        nf_reset(skb);
+       nf_reset_trace(skb);
        return netif_rx(skb);
 }
 EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1633,7 +1635,7 @@ static inline int deliver_skb(struct sk_buff *skb,
 
 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
 {
-       if (ptype->af_packet_priv == NULL)
+       if (!ptype->af_packet_priv || !skb->sk)
                return false;
 
        if (ptype->id_match)
@@ -2127,11 +2129,13 @@ u32 netif_skb_features(struct sk_buff *skb)
        if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
 
-       if (protocol == htons(ETH_P_8021Q)) {
-               struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-               protocol = veh->h_vlan_encapsulated_proto;
-       } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, protocol, features);
+       if (!vlan_tx_tag_present(skb)) {
+               if (unlikely(protocol == htons(ETH_P_8021Q))) {
+                       struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+                       protocol = veh->h_vlan_encapsulated_proto;
+               } else {
+                       return harmonize_features(skb, protocol, features);
+               }
        }
 
        features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
@@ -2608,6 +2612,8 @@ ip:
                        goto done;
 
                ip = (const struct iphdr *) (skb->data + nhoff);
+               if (ip->ihl < 5)
+                       goto done;
                if (ip_is_fragment(ip))
                        ip_proto = 0;
                else
@@ -2829,8 +2835,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                if (unlikely(tcpu != next_cpu) &&
                    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
-                     rflow->last_qtail)) >= 0))
+                     rflow->last_qtail)) >= 0)) {
+                       tcpu = next_cpu;
                        rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+               }
 
                if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
                        *rflowp = rflow;
@@ -3191,6 +3199,7 @@ int netdev_rx_handler_register(struct net_device *dev,
        if (dev->rx_handler)
                return -EBUSY;
 
+       /* Note: rx_handler_data must be set before rx_handler */
        rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
        rcu_assign_pointer(dev->rx_handler, rx_handler);
 
@@ -3211,6 +3220,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 
        ASSERT_RTNL();
        RCU_INIT_POINTER(dev->rx_handler, NULL);
+       /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
+        * section has a guarantee to see a non NULL rx_handler_data
+        * as well.
+        */
+       synchronize_net();
        RCU_INIT_POINTER(dev->rx_handler_data, NULL);
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
@@ -3278,18 +3292,18 @@ another_round:
 ncls:
 #endif
 
-       rx_handler = rcu_dereference(skb->dev->rx_handler);
        if (vlan_tx_tag_present(skb)) {
                if (pt_prev) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
                }
-               if (vlan_do_receive(&skb, !rx_handler))
+               if (vlan_do_receive(&skb))
                        goto another_round;
                else if (unlikely(!skb))
                        goto out;
        }
 
+       rx_handler = rcu_dereference(skb->dev->rx_handler);
        if (rx_handler) {
                if (pt_prev) {
                        ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -3297,6 +3311,7 @@ ncls:
                }
                switch (rx_handler(&skb)) {
                case RX_HANDLER_CONSUMED:
+                       ret = NET_RX_SUCCESS;
                        goto out;
                case RX_HANDLER_ANOTHER:
                        goto another_round;
@@ -3309,6 +3324,9 @@ ncls:
                }
        }
 
+       if (vlan_tx_nonzero_tag_present(skb))
+               skb->pkt_type = PACKET_OTHERHOST;
+
        /* deliver only exact match when indicated */
        null_or_dev = deliver_exact ? skb->dev : NULL;
 
@@ -3633,6 +3651,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
+       skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 
        napi->skb = skb;
 }
@@ -4502,7 +4521,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
 
-       if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
+       if (ops->ndo_change_rx_flags)
                ops->ndo_change_rx_flags(dev, flags);
 }
 
@@ -6319,10 +6338,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                oldsd->output_queue = NULL;
                oldsd->output_queue_tailp = &oldsd->output_queue;
        }
-       /* Append NAPI poll list from offline CPU. */
-       if (!list_empty(&oldsd->poll_list)) {
-               list_splice_init(&oldsd->poll_list, &sd->poll_list);
-               raise_softirq_irqoff(NET_RX_SOFTIRQ);
+       /* Append NAPI poll list from offline CPU, with one exception :
+        * process_backlog() must be called by cpu owning percpu backlog.
+        * We properly handle process_queue & input_pkt_queue later.
+        */
+       while (!list_empty(&oldsd->poll_list)) {
+               struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
+                                                           struct napi_struct,
+                                                           poll_list);
+
+               list_del_init(&napi->poll_list);
+               if (napi->poll == process_backlog)
+                       napi->state = 0;
+               else
+                       ____napi_schedule(sd, napi);
        }
 
        raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -6333,7 +6362,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
                netif_rx(skb);
                input_queue_head_incr(oldsd);
        }
-       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+       while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
                netif_rx(skb);
                input_queue_head_incr(oldsd);
        }