x86/mm: Fix boot crash with DEBUG_PAGE_ALLOC=y and more than 512G RAM
[pandora-kernel.git] / net / core / flow_dissector.c
1 #include <linux/skbuff.h>
2 #include <linux/export.h>
3 #include <linux/ip.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
6 #include <net/ip.h>
7 #include <net/ipv6.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
16
17 /* copy saddr & daddr, possibly using 64bit load/store
18  * Equivalent to :      flow->src = iph->saddr;
19  *                      flow->dst = iph->daddr;
20  */
21 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
22 {
23         BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24                      offsetof(typeof(*flow), src) + sizeof(flow->src));
25         memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
26 }
27
28 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
29 {
30         int poff, nhoff = skb_network_offset(skb);
31         u8 ip_proto;
32         __be16 proto = skb->protocol;
33
34         memset(flow, 0, sizeof(*flow));
35
36 again:
37         switch (proto) {
38         case __constant_htons(ETH_P_IP): {
39                 const struct iphdr *iph;
40                 struct iphdr _iph;
41 ip:
42                 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
43                 if (!iph)
44                         return false;
45
46                 if (ip_is_fragment(iph))
47                         ip_proto = 0;
48                 else
49                         ip_proto = iph->protocol;
50                 iph_to_flow_copy_addrs(flow, iph);
51                 nhoff += iph->ihl * 4;
52                 break;
53         }
54         case __constant_htons(ETH_P_IPV6): {
55                 const struct ipv6hdr *iph;
56                 struct ipv6hdr _iph;
57 ipv6:
58                 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
59                 if (!iph)
60                         return false;
61
62                 ip_proto = iph->nexthdr;
63                 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
64                 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
65                 nhoff += sizeof(struct ipv6hdr);
66                 break;
67         }
68         case __constant_htons(ETH_P_8021AD):
69         case __constant_htons(ETH_P_8021Q): {
70                 const struct vlan_hdr *vlan;
71                 struct vlan_hdr _vlan;
72
73                 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
74                 if (!vlan)
75                         return false;
76
77                 proto = vlan->h_vlan_encapsulated_proto;
78                 nhoff += sizeof(*vlan);
79                 goto again;
80         }
81         case __constant_htons(ETH_P_PPP_SES): {
82                 struct {
83                         struct pppoe_hdr hdr;
84                         __be16 proto;
85                 } *hdr, _hdr;
86                 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
87                 if (!hdr)
88                         return false;
89                 proto = hdr->proto;
90                 nhoff += PPPOE_SES_HLEN;
91                 switch (proto) {
92                 case __constant_htons(PPP_IP):
93                         goto ip;
94                 case __constant_htons(PPP_IPV6):
95                         goto ipv6;
96                 default:
97                         return false;
98                 }
99         }
100         default:
101                 return false;
102         }
103
104         switch (ip_proto) {
105         case IPPROTO_GRE: {
106                 struct gre_hdr {
107                         __be16 flags;
108                         __be16 proto;
109                 } *hdr, _hdr;
110
111                 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
112                 if (!hdr)
113                         return false;
114                 /*
115                  * Only look inside GRE if version zero and no
116                  * routing
117                  */
118                 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
119                         proto = hdr->proto;
120                         nhoff += 4;
121                         if (hdr->flags & GRE_CSUM)
122                                 nhoff += 4;
123                         if (hdr->flags & GRE_KEY)
124                                 nhoff += 4;
125                         if (hdr->flags & GRE_SEQ)
126                                 nhoff += 4;
127                         if (proto == htons(ETH_P_TEB)) {
128                                 const struct ethhdr *eth;
129                                 struct ethhdr _eth;
130
131                                 eth = skb_header_pointer(skb, nhoff,
132                                                          sizeof(_eth), &_eth);
133                                 if (!eth)
134                                         return false;
135                                 proto = eth->h_proto;
136                                 nhoff += sizeof(*eth);
137                         }
138                         goto again;
139                 }
140                 break;
141         }
142         case IPPROTO_IPIP:
143                 goto again;
144         default:
145                 break;
146         }
147
148         flow->ip_proto = ip_proto;
149         poff = proto_ports_offset(ip_proto);
150         if (poff >= 0) {
151                 __be32 *ports, _ports;
152
153                 nhoff += poff;
154                 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
155                 if (ports)
156                         flow->ports = *ports;
157         }
158
159         flow->thoff = (u16) nhoff;
160
161         return true;
162 }
163 EXPORT_SYMBOL(skb_flow_dissect);
164
165 static u32 hashrnd __read_mostly;
166
167 /*
168  * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
169  * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
170  * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
171  * if hash is a canonical 4-tuple hash over transport ports.
172  */
173 void __skb_get_rxhash(struct sk_buff *skb)
174 {
175         struct flow_keys keys;
176         u32 hash;
177
178         if (!skb_flow_dissect(skb, &keys))
179                 return;
180
181         if (keys.ports)
182                 skb->l4_rxhash = 1;
183
184         /* get a consistent hash (same value on both flow directions) */
185         if (((__force u32)keys.dst < (__force u32)keys.src) ||
186             (((__force u32)keys.dst == (__force u32)keys.src) &&
187              ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
188                 swap(keys.dst, keys.src);
189                 swap(keys.port16[0], keys.port16[1]);
190         }
191
192         hash = jhash_3words((__force u32)keys.dst,
193                             (__force u32)keys.src,
194                             (__force u32)keys.ports, hashrnd);
195         if (!hash)
196                 hash = 1;
197
198         skb->rxhash = hash;
199 }
200 EXPORT_SYMBOL(__skb_get_rxhash);
201
202 /*
203  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
204  * to be used as a distribution range.
205  */
206 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
207                   unsigned int num_tx_queues)
208 {
209         u32 hash;
210         u16 qoffset = 0;
211         u16 qcount = num_tx_queues;
212
213         if (skb_rx_queue_recorded(skb)) {
214                 hash = skb_get_rx_queue(skb);
215                 while (unlikely(hash >= num_tx_queues))
216                         hash -= num_tx_queues;
217                 return hash;
218         }
219
220         if (dev->num_tc) {
221                 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
222                 qoffset = dev->tc_to_txq[tc].offset;
223                 qcount = dev->tc_to_txq[tc].count;
224         }
225
226         if (skb->sk && skb->sk->sk_hash)
227                 hash = skb->sk->sk_hash;
228         else
229                 hash = (__force u16) skb->protocol;
230         hash = jhash_1word(hash, hashrnd);
231
232         return (u16) (((u64) hash * qcount) >> 32) + qoffset;
233 }
234 EXPORT_SYMBOL(__skb_tx_hash);
235
236 /* __skb_get_poff() returns the offset to the payload as far as it could
237  * be dissected. The main user is currently BPF, so that we can dynamically
238  * truncate packets without needing to push actual payload to the user
239  * space and can analyze headers only, instead.
240  */
241 u32 __skb_get_poff(const struct sk_buff *skb)
242 {
243         struct flow_keys keys;
244         u32 poff = 0;
245
246         if (!skb_flow_dissect(skb, &keys))
247                 return 0;
248
249         poff += keys.thoff;
250         switch (keys.ip_proto) {
251         case IPPROTO_TCP: {
252                 const struct tcphdr *tcph;
253                 struct tcphdr _tcph;
254
255                 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
256                 if (!tcph)
257                         return poff;
258
259                 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
260                 break;
261         }
262         case IPPROTO_UDP:
263         case IPPROTO_UDPLITE:
264                 poff += sizeof(struct udphdr);
265                 break;
266         /* For the rest, we do not really care about header
267          * extensions at this point for now.
268          */
269         case IPPROTO_ICMP:
270                 poff += sizeof(struct icmphdr);
271                 break;
272         case IPPROTO_ICMPV6:
273                 poff += sizeof(struct icmp6hdr);
274                 break;
275         case IPPROTO_IGMP:
276                 poff += sizeof(struct igmphdr);
277                 break;
278         case IPPROTO_DCCP:
279                 poff += sizeof(struct dccp_hdr);
280                 break;
281         case IPPROTO_SCTP:
282                 poff += sizeof(struct sctphdr);
283                 break;
284         }
285
286         return poff;
287 }
288
289 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
290 {
291         if (unlikely(queue_index >= dev->real_num_tx_queues)) {
292                 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
293                                      dev->name, queue_index,
294                                      dev->real_num_tx_queues);
295                 return 0;
296         }
297         return queue_index;
298 }
299
300 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
301 {
302 #ifdef CONFIG_XPS
303         struct xps_dev_maps *dev_maps;
304         struct xps_map *map;
305         int queue_index = -1;
306
307         rcu_read_lock();
308         dev_maps = rcu_dereference(dev->xps_maps);
309         if (dev_maps) {
310                 map = rcu_dereference(
311                     dev_maps->cpu_map[raw_smp_processor_id()]);
312                 if (map) {
313                         if (map->len == 1)
314                                 queue_index = map->queues[0];
315                         else {
316                                 u32 hash;
317                                 if (skb->sk && skb->sk->sk_hash)
318                                         hash = skb->sk->sk_hash;
319                                 else
320                                         hash = (__force u16) skb->protocol ^
321                                             skb->rxhash;
322                                 hash = jhash_1word(hash, hashrnd);
323                                 queue_index = map->queues[
324                                     ((u64)hash * map->len) >> 32];
325                         }
326                         if (unlikely(queue_index >= dev->real_num_tx_queues))
327                                 queue_index = -1;
328                 }
329         }
330         rcu_read_unlock();
331
332         return queue_index;
333 #else
334         return -1;
335 #endif
336 }
337
338 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
339 {
340         struct sock *sk = skb->sk;
341         int queue_index = sk_tx_queue_get(sk);
342
343         if (queue_index < 0 || skb->ooo_okay ||
344             queue_index >= dev->real_num_tx_queues) {
345                 int new_index = get_xps_queue(dev, skb);
346                 if (new_index < 0)
347                         new_index = skb_tx_hash(dev, skb);
348
349                 if (queue_index != new_index && sk) {
350                         struct dst_entry *dst =
351                                     rcu_dereference_check(sk->sk_dst_cache, 1);
352
353                         if (dst && skb_dst(skb) == dst)
354                                 sk_tx_queue_set(sk, queue_index);
355
356                 }
357
358                 queue_index = new_index;
359         }
360
361         return queue_index;
362 }
363 EXPORT_SYMBOL(__netdev_pick_tx);
364
365 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
366                                     struct sk_buff *skb)
367 {
368         int queue_index = 0;
369
370         if (dev->real_num_tx_queues != 1) {
371                 const struct net_device_ops *ops = dev->netdev_ops;
372                 if (ops->ndo_select_queue)
373                         queue_index = ops->ndo_select_queue(dev, skb);
374                 else
375                         queue_index = __netdev_pick_tx(dev, skb);
376                 queue_index = dev_cap_txqueue(dev, queue_index);
377         }
378
379         skb_set_queue_mapping(skb, queue_index);
380         return netdev_get_tx_queue(dev, queue_index);
381 }
382
383 static int __init initialize_hashrnd(void)
384 {
385         get_random_bytes(&hashrnd, sizeof(hashrnd));
386         return 0;
387 }
388
389 late_initcall_sync(initialize_hashrnd);