Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus
[pandora-kernel.git] / net / 8021q / vlan_core.c
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include "vlan.h"
6
7 /* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
8 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9                       u16 vlan_tci, int polling)
10 {
11         struct net_device *vlan_dev;
12         u16 vlan_id;
13
14         if (netpoll_rx(skb))
15                 return NET_RX_DROP;
16
17         if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
18                 skb->deliver_no_wcard = 1;
19
20         skb->skb_iif = skb->dev->ifindex;
21         __vlan_hwaccel_put_tag(skb, vlan_tci);
22         vlan_id = vlan_tci & VLAN_VID_MASK;
23         vlan_dev = vlan_group_get_device(grp, vlan_id);
24
25         if (vlan_dev)
26                 skb->dev = vlan_dev;
27         else if (vlan_id) {
28                 if (!(skb->dev->flags & IFF_PROMISC))
29                         goto drop;
30                 skb->pkt_type = PACKET_OTHERHOST;
31         }
32
33         return (polling ? netif_receive_skb(skb) : netif_rx(skb));
34
35 drop:
36         dev_kfree_skb_any(skb);
37         return NET_RX_DROP;
38 }
39 EXPORT_SYMBOL(__vlan_hwaccel_rx);
40
41 int vlan_hwaccel_do_receive(struct sk_buff *skb)
42 {
43         struct net_device *dev = skb->dev;
44         struct vlan_rx_stats     *rx_stats;
45
46         skb->dev = vlan_dev_info(dev)->real_dev;
47         netif_nit_deliver(skb);
48
49         skb->dev = dev;
50         skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
51         skb->vlan_tci = 0;
52
53         rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats);
54
55         u64_stats_update_begin(&rx_stats->syncp);
56         rx_stats->rx_packets++;
57         rx_stats->rx_bytes += skb->len;
58
59         switch (skb->pkt_type) {
60         case PACKET_BROADCAST:
61                 break;
62         case PACKET_MULTICAST:
63                 rx_stats->rx_multicast++;
64                 break;
65         case PACKET_OTHERHOST:
66                 /* Our lower layer thinks this is not local, let's make sure.
67                  * This allows the VLAN to have a different MAC than the
68                  * underlying device, and still route correctly. */
69                 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
70                                         dev->dev_addr))
71                         skb->pkt_type = PACKET_HOST;
72                 break;
73         }
74         u64_stats_update_end(&rx_stats->syncp);
75         return 0;
76 }
77
78 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
79 {
80         return vlan_dev_info(dev)->real_dev;
81 }
82 EXPORT_SYMBOL(vlan_dev_real_dev);
83
84 u16 vlan_dev_vlan_id(const struct net_device *dev)
85 {
86         return vlan_dev_info(dev)->vlan_id;
87 }
88 EXPORT_SYMBOL(vlan_dev_vlan_id);
89
90 static gro_result_t
91 vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
92                 unsigned int vlan_tci, struct sk_buff *skb)
93 {
94         struct sk_buff *p;
95         struct net_device *vlan_dev;
96         u16 vlan_id;
97
98         if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
99                 skb->deliver_no_wcard = 1;
100
101         skb->skb_iif = skb->dev->ifindex;
102         __vlan_hwaccel_put_tag(skb, vlan_tci);
103         vlan_id = vlan_tci & VLAN_VID_MASK;
104         vlan_dev = vlan_group_get_device(grp, vlan_id);
105
106         if (vlan_dev)
107                 skb->dev = vlan_dev;
108         else if (vlan_id) {
109                 if (!(skb->dev->flags & IFF_PROMISC))
110                         goto drop;
111                 skb->pkt_type = PACKET_OTHERHOST;
112         }
113
114         for (p = napi->gro_list; p; p = p->next) {
115                 NAPI_GRO_CB(p)->same_flow =
116                         p->dev == skb->dev && !compare_ether_header(
117                                 skb_mac_header(p), skb_gro_mac_header(skb));
118                 NAPI_GRO_CB(p)->flush = 0;
119         }
120
121         return dev_gro_receive(napi, skb);
122
123 drop:
124         return GRO_DROP;
125 }
126
127 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
128                               unsigned int vlan_tci, struct sk_buff *skb)
129 {
130         if (netpoll_rx_on(skb))
131                 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
132                         ? GRO_DROP : GRO_NORMAL;
133
134         skb_gro_reset_offset(skb);
135
136         return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
137 }
138 EXPORT_SYMBOL(vlan_gro_receive);
139
140 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
141                             unsigned int vlan_tci)
142 {
143         struct sk_buff *skb = napi_frags_skb(napi);
144
145         if (!skb)
146                 return GRO_DROP;
147
148         if (netpoll_rx_on(skb)) {
149                 skb->protocol = eth_type_trans(skb, skb->dev);
150                 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
151                         ? GRO_DROP : GRO_NORMAL;
152         }
153
154         return napi_frags_finish(napi, skb,
155                                  vlan_gro_common(napi, grp, vlan_tci, skb));
156 }
157 EXPORT_SYMBOL(vlan_gro_frags);