qeth: don't query for info if hardware not ready.
[pandora-kernel.git] / net / core / flow_dissector.c
1 #include <linux/skbuff.h>
2 #include <linux/export.h>
3 #include <linux/ip.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
6 #include <net/ip.h>
7 #include <net/ipv6.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
16
17 /* copy saddr & daddr, possibly using 64bit load/store
18  * Equivalent to :      flow->src = iph->saddr;
19  *                      flow->dst = iph->daddr;
20  */
21 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
22 {
23         BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24                      offsetof(typeof(*flow), src) + sizeof(flow->src));
25         memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
26 }
27
28 /**
29  * skb_flow_get_ports - extract the upper layer ports and return them
30  * @skb: buffer to extract the ports from
31  * @thoff: transport header offset
32  * @ip_proto: protocol for which to get port offset
33  *
34  * The function will try to retrieve the ports at offset thoff + poff where poff
35  * is the protocol port offset returned from proto_ports_offset
36  */
37 __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
38 {
39         int poff = proto_ports_offset(ip_proto);
40
41         if (poff >= 0) {
42                 __be32 *ports, _ports;
43
44                 ports = skb_header_pointer(skb, thoff + poff,
45                                            sizeof(_ports), &_ports);
46                 if (ports)
47                         return *ports;
48         }
49
50         return 0;
51 }
52 EXPORT_SYMBOL(skb_flow_get_ports);
53
54 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
55 {
56         int nhoff = skb_network_offset(skb);
57         u8 ip_proto;
58         __be16 proto = skb->protocol;
59
60         memset(flow, 0, sizeof(*flow));
61
62 again:
63         switch (proto) {
64         case htons(ETH_P_IP): {
65                 const struct iphdr *iph;
66                 struct iphdr _iph;
67 ip:
68                 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
69                 if (!iph || iph->ihl < 5)
70                         return false;
71                 nhoff += iph->ihl * 4;
72
73                 ip_proto = iph->protocol;
74                 if (ip_is_fragment(iph))
75                         ip_proto = 0;
76
77                 iph_to_flow_copy_addrs(flow, iph);
78                 break;
79         }
80         case htons(ETH_P_IPV6): {
81                 const struct ipv6hdr *iph;
82                 struct ipv6hdr _iph;
83                 __be32 flow_label;
84
85 ipv6:
86                 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
87                 if (!iph)
88                         return false;
89
90                 ip_proto = iph->nexthdr;
91                 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
92                 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
93                 nhoff += sizeof(struct ipv6hdr);
94
95                 flow_label = ip6_flowlabel(iph);
96                 if (flow_label) {
97                         /* Awesome, IPv6 packet has a flow label so we can
98                          * use that to represent the ports without any
99                          * further dissection.
100                          */
101                         flow->n_proto = proto;
102                         flow->ip_proto = ip_proto;
103                         flow->ports = flow_label;
104                         flow->thoff = (u16)nhoff;
105
106                         return true;
107                 }
108
109                 break;
110         }
111         case htons(ETH_P_8021AD):
112         case htons(ETH_P_8021Q): {
113                 const struct vlan_hdr *vlan;
114                 struct vlan_hdr _vlan;
115
116                 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
117                 if (!vlan)
118                         return false;
119
120                 proto = vlan->h_vlan_encapsulated_proto;
121                 nhoff += sizeof(*vlan);
122                 goto again;
123         }
124         case htons(ETH_P_PPP_SES): {
125                 struct {
126                         struct pppoe_hdr hdr;
127                         __be16 proto;
128                 } *hdr, _hdr;
129                 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
130                 if (!hdr)
131                         return false;
132                 proto = hdr->proto;
133                 nhoff += PPPOE_SES_HLEN;
134                 switch (proto) {
135                 case htons(PPP_IP):
136                         goto ip;
137                 case htons(PPP_IPV6):
138                         goto ipv6;
139                 default:
140                         return false;
141                 }
142         }
143         default:
144                 return false;
145         }
146
147         switch (ip_proto) {
148         case IPPROTO_GRE: {
149                 struct gre_hdr {
150                         __be16 flags;
151                         __be16 proto;
152                 } *hdr, _hdr;
153
154                 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
155                 if (!hdr)
156                         return false;
157                 /*
158                  * Only look inside GRE if version zero and no
159                  * routing
160                  */
161                 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
162                         proto = hdr->proto;
163                         nhoff += 4;
164                         if (hdr->flags & GRE_CSUM)
165                                 nhoff += 4;
166                         if (hdr->flags & GRE_KEY)
167                                 nhoff += 4;
168                         if (hdr->flags & GRE_SEQ)
169                                 nhoff += 4;
170                         if (proto == htons(ETH_P_TEB)) {
171                                 const struct ethhdr *eth;
172                                 struct ethhdr _eth;
173
174                                 eth = skb_header_pointer(skb, nhoff,
175                                                          sizeof(_eth), &_eth);
176                                 if (!eth)
177                                         return false;
178                                 proto = eth->h_proto;
179                                 nhoff += sizeof(*eth);
180                         }
181                         goto again;
182                 }
183                 break;
184         }
185         case IPPROTO_IPIP:
186                 proto = htons(ETH_P_IP);
187                 goto ip;
188         case IPPROTO_IPV6:
189                 proto = htons(ETH_P_IPV6);
190                 goto ipv6;
191         default:
192                 break;
193         }
194
195         flow->n_proto = proto;
196         flow->ip_proto = ip_proto;
197         flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
198         flow->thoff = (u16) nhoff;
199
200         return true;
201 }
202 EXPORT_SYMBOL(skb_flow_dissect);
203
204 static u32 hashrnd __read_mostly;
205 static __always_inline void __flow_hash_secret_init(void)
206 {
207         net_get_random_once(&hashrnd, sizeof(hashrnd));
208 }
209
210 static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
211 {
212         __flow_hash_secret_init();
213         return jhash_3words(a, b, c, hashrnd);
214 }
215
216 static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
217 {
218         u32 hash;
219
220         /* get a consistent hash (same value on both flow directions) */
221         if (((__force u32)keys->dst < (__force u32)keys->src) ||
222             (((__force u32)keys->dst == (__force u32)keys->src) &&
223              ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
224                 swap(keys->dst, keys->src);
225                 swap(keys->port16[0], keys->port16[1]);
226         }
227
228         hash = __flow_hash_3words((__force u32)keys->dst,
229                                   (__force u32)keys->src,
230                                   (__force u32)keys->ports);
231         if (!hash)
232                 hash = 1;
233
234         return hash;
235 }
236
237 u32 flow_hash_from_keys(struct flow_keys *keys)
238 {
239         return __flow_hash_from_keys(keys);
240 }
241 EXPORT_SYMBOL(flow_hash_from_keys);
242
243 /*
244  * __skb_get_hash: calculate a flow hash based on src/dst addresses
245  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
246  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
247  * if hash is a canonical 4-tuple hash over transport ports.
248  */
249 void __skb_get_hash(struct sk_buff *skb)
250 {
251         struct flow_keys keys;
252
253         if (!skb_flow_dissect(skb, &keys))
254                 return;
255
256         if (keys.ports)
257                 skb->l4_hash = 1;
258
259         skb->sw_hash = 1;
260
261         skb->hash = __flow_hash_from_keys(&keys);
262 }
263 EXPORT_SYMBOL(__skb_get_hash);
264
265 /*
266  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
267  * to be used as a distribution range.
268  */
269 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
270                   unsigned int num_tx_queues)
271 {
272         u32 hash;
273         u16 qoffset = 0;
274         u16 qcount = num_tx_queues;
275
276         if (skb_rx_queue_recorded(skb)) {
277                 hash = skb_get_rx_queue(skb);
278                 while (unlikely(hash >= num_tx_queues))
279                         hash -= num_tx_queues;
280                 return hash;
281         }
282
283         if (dev->num_tc) {
284                 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
285                 qoffset = dev->tc_to_txq[tc].offset;
286                 qcount = dev->tc_to_txq[tc].count;
287         }
288
289         return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset;
290 }
291 EXPORT_SYMBOL(__skb_tx_hash);
292
293 /* __skb_get_poff() returns the offset to the payload as far as it could
294  * be dissected. The main user is currently BPF, so that we can dynamically
295  * truncate packets without needing to push actual payload to the user
296  * space and can analyze headers only, instead.
297  */
298 u32 __skb_get_poff(const struct sk_buff *skb)
299 {
300         struct flow_keys keys;
301         u32 poff = 0;
302
303         if (!skb_flow_dissect(skb, &keys))
304                 return 0;
305
306         poff += keys.thoff;
307         switch (keys.ip_proto) {
308         case IPPROTO_TCP: {
309                 const struct tcphdr *tcph;
310                 struct tcphdr _tcph;
311
312                 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
313                 if (!tcph)
314                         return poff;
315
316                 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
317                 break;
318         }
319         case IPPROTO_UDP:
320         case IPPROTO_UDPLITE:
321                 poff += sizeof(struct udphdr);
322                 break;
323         /* For the rest, we do not really care about header
324          * extensions at this point for now.
325          */
326         case IPPROTO_ICMP:
327                 poff += sizeof(struct icmphdr);
328                 break;
329         case IPPROTO_ICMPV6:
330                 poff += sizeof(struct icmp6hdr);
331                 break;
332         case IPPROTO_IGMP:
333                 poff += sizeof(struct igmphdr);
334                 break;
335         case IPPROTO_DCCP:
336                 poff += sizeof(struct dccp_hdr);
337                 break;
338         case IPPROTO_SCTP:
339                 poff += sizeof(struct sctphdr);
340                 break;
341         }
342
343         return poff;
344 }
345
346 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
347 {
348 #ifdef CONFIG_XPS
349         struct xps_dev_maps *dev_maps;
350         struct xps_map *map;
351         int queue_index = -1;
352
353         rcu_read_lock();
354         dev_maps = rcu_dereference(dev->xps_maps);
355         if (dev_maps) {
356                 map = rcu_dereference(
357                     dev_maps->cpu_map[raw_smp_processor_id()]);
358                 if (map) {
359                         if (map->len == 1)
360                                 queue_index = map->queues[0];
361                         else
362                                 queue_index = map->queues[
363                                     ((u64)skb_get_hash(skb) * map->len) >> 32];
364
365                         if (unlikely(queue_index >= dev->real_num_tx_queues))
366                                 queue_index = -1;
367                 }
368         }
369         rcu_read_unlock();
370
371         return queue_index;
372 #else
373         return -1;
374 #endif
375 }
376
377 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
378 {
379         struct sock *sk = skb->sk;
380         int queue_index = sk_tx_queue_get(sk);
381
382         if (queue_index < 0 || skb->ooo_okay ||
383             queue_index >= dev->real_num_tx_queues) {
384                 int new_index = get_xps_queue(dev, skb);
385                 if (new_index < 0)
386                         new_index = skb_tx_hash(dev, skb);
387
388                 if (queue_index != new_index && sk &&
389                     rcu_access_pointer(sk->sk_dst_cache))
390                         sk_tx_queue_set(sk, new_index);
391
392                 queue_index = new_index;
393         }
394
395         return queue_index;
396 }
397
398 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
399                                     struct sk_buff *skb,
400                                     void *accel_priv)
401 {
402         int queue_index = 0;
403
404         if (dev->real_num_tx_queues != 1) {
405                 const struct net_device_ops *ops = dev->netdev_ops;
406                 if (ops->ndo_select_queue)
407                         queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
408                                                             __netdev_pick_tx);
409                 else
410                         queue_index = __netdev_pick_tx(dev, skb);
411
412                 if (!accel_priv)
413                         queue_index = netdev_cap_txqueue(dev, queue_index);
414         }
415
416         skb_set_queue_mapping(skb, queue_index);
417         return netdev_get_tx_queue(dev, queue_index);
418 }