xfrm: Use __skb_queue_tail in xfrm_trans_queue
[pandora-kernel.git] / net / xfrm / xfrm_input.c
1 /*
2  * xfrm_input.c
3  *
4  * Changes:
5  *      YOSHIFUJI Hideaki @USAGI
6  *              Split up af-specific portion
7  *
8  */
9
10 #include <linux/bottom_half.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/percpu.h>
16 #include <net/dst.h>
17 #include <net/ip.h>
18 #include <net/xfrm.h>
19
20 struct xfrm_trans_tasklet {
21         struct tasklet_struct tasklet;
22         struct sk_buff_head queue;
23 };
24
25 struct xfrm_trans_cb {
26         int (*finish)(struct sk_buff *skb);
27 };
28
29 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
30
31 static struct kmem_cache *secpath_cachep __read_mostly;
32
33 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
34
35 void __secpath_destroy(struct sec_path *sp)
36 {
37         int i;
38         for (i = 0; i < sp->len; i++)
39                 xfrm_state_put(sp->xvec[i]);
40         kmem_cache_free(secpath_cachep, sp);
41 }
42 EXPORT_SYMBOL(__secpath_destroy);
43
44 struct sec_path *secpath_dup(struct sec_path *src)
45 {
46         struct sec_path *sp;
47
48         sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
49         if (!sp)
50                 return NULL;
51
52         sp->len = 0;
53         if (src) {
54                 int i;
55
56                 memcpy(sp, src, sizeof(*sp));
57                 for (i = 0; i < sp->len; i++)
58                         xfrm_state_hold(sp->xvec[i]);
59         }
60         atomic_set(&sp->refcnt, 1);
61         return sp;
62 }
63 EXPORT_SYMBOL(secpath_dup);
64
65 /* Fetch spi and seq from ipsec header */
66
67 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
68 {
69         int offset, offset_seq;
70         int hlen;
71
72         switch (nexthdr) {
73         case IPPROTO_AH:
74                 hlen = sizeof(struct ip_auth_hdr);
75                 offset = offsetof(struct ip_auth_hdr, spi);
76                 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
77                 break;
78         case IPPROTO_ESP:
79                 hlen = sizeof(struct ip_esp_hdr);
80                 offset = offsetof(struct ip_esp_hdr, spi);
81                 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
82                 break;
83         case IPPROTO_COMP:
84                 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
85                         return -EINVAL;
86                 *spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2)));
87                 *seq = 0;
88                 return 0;
89         default:
90                 return 1;
91         }
92
93         if (!pskb_may_pull(skb, hlen))
94                 return -EINVAL;
95
96         *spi = *(__be32*)(skb_transport_header(skb) + offset);
97         *seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
98         return 0;
99 }
100
101 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
102 {
103         struct xfrm_mode *inner_mode = x->inner_mode;
104         int err;
105
106         err = x->outer_mode->afinfo->extract_input(x, skb);
107         if (err)
108                 return err;
109
110         if (x->sel.family == AF_UNSPEC) {
111                 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
112                 if (inner_mode == NULL)
113                         return -EAFNOSUPPORT;
114         }
115
116         skb->protocol = inner_mode->afinfo->eth_proto;
117         return inner_mode->input2(x, skb);
118 }
119 EXPORT_SYMBOL(xfrm_prepare_input);
120
121 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
122 {
123         struct net *net = dev_net(skb->dev);
124         int err;
125         __be32 seq;
126         __be32 seq_hi;
127         struct xfrm_state *x;
128         xfrm_address_t *daddr;
129         struct xfrm_mode *inner_mode;
130         unsigned int family;
131         int decaps = 0;
132         int async = 0;
133
134         /* A negative encap_type indicates async resumption. */
135         if (encap_type < 0) {
136                 async = 1;
137                 x = xfrm_input_state(skb);
138                 seq = XFRM_SKB_CB(skb)->seq.input.low;
139                 goto resume;
140         }
141
142         /* Allocate new secpath or COW existing one. */
143         if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
144                 struct sec_path *sp;
145
146                 sp = secpath_dup(skb->sp);
147                 if (!sp) {
148                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
149                         goto drop;
150                 }
151                 if (skb->sp)
152                         secpath_put(skb->sp);
153                 skb->sp = sp;
154         }
155
156         daddr = (xfrm_address_t *)(skb_network_header(skb) +
157                                    XFRM_SPI_SKB_CB(skb)->daddroff);
158         family = XFRM_SPI_SKB_CB(skb)->family;
159
160         seq = 0;
161         if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
162                 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
163                 goto drop;
164         }
165
166         do {
167                 if (skb->sp->len == XFRM_MAX_DEPTH) {
168                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
169                         goto drop;
170                 }
171
172                 x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
173                 if (x == NULL) {
174                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
175                         xfrm_audit_state_notfound(skb, family, spi, seq);
176                         goto drop;
177                 }
178
179                 skb->sp->xvec[skb->sp->len++] = x;
180
181                 spin_lock(&x->lock);
182                 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
183                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
184                         goto drop_unlock;
185                 }
186
187                 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
188                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
189                         goto drop_unlock;
190                 }
191
192                 if (x->repl->check(x, skb, seq)) {
193                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
194                         goto drop_unlock;
195                 }
196
197                 if (xfrm_state_check_expire(x)) {
198                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
199                         goto drop_unlock;
200                 }
201
202                 spin_unlock(&x->lock);
203
204                 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
205
206                 XFRM_SKB_CB(skb)->seq.input.low = seq;
207                 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
208
209                 skb_dst_force(skb);
210                 dev_hold(skb->dev);
211
212                 nexthdr = x->type->input(x, skb);
213
214                 if (nexthdr == -EINPROGRESS)
215                         return 0;
216
217 resume:
218                 dev_put(skb->dev);
219
220                 spin_lock(&x->lock);
221                 if (nexthdr <= 0) {
222                         if (nexthdr == -EBADMSG) {
223                                 xfrm_audit_state_icvfail(x, skb,
224                                                          x->type->proto);
225                                 x->stats.integrity_failed++;
226                         }
227                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
228                         goto drop_unlock;
229                 }
230
231                 /* only the first xfrm gets the encap type */
232                 encap_type = 0;
233
234                 if (async && x->repl->recheck(x, skb, seq)) {
235                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
236                         goto drop_unlock;
237                 }
238
239                 x->repl->advance(x, seq);
240
241                 x->curlft.bytes += skb->len;
242                 x->curlft.packets++;
243
244                 spin_unlock(&x->lock);
245
246                 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
247
248                 inner_mode = x->inner_mode;
249
250                 if (x->sel.family == AF_UNSPEC) {
251                         inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
252                         if (inner_mode == NULL)
253                                 goto drop;
254                 }
255
256                 if (inner_mode->input(x, skb)) {
257                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
258                         goto drop;
259                 }
260
261                 if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
262                         decaps = 1;
263                         break;
264                 }
265
266                 /*
267                  * We need the inner address.  However, we only get here for
268                  * transport mode so the outer address is identical.
269                  */
270                 daddr = &x->id.daddr;
271                 family = x->outer_mode->afinfo->family;
272
273                 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
274                 if (err < 0) {
275                         XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
276                         goto drop;
277                 }
278         } while (!err);
279
280         nf_reset(skb);
281
282         if (decaps) {
283                 skb_dst_drop(skb);
284                 netif_rx(skb);
285                 return 0;
286         } else {
287                 return x->inner_mode->afinfo->transport_finish(skb, async);
288         }
289
290 drop_unlock:
291         spin_unlock(&x->lock);
292 drop:
293         kfree_skb(skb);
294         return 0;
295 }
296 EXPORT_SYMBOL(xfrm_input);
297
298 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
299 {
300         return xfrm_input(skb, nexthdr, 0, -1);
301 }
302 EXPORT_SYMBOL(xfrm_input_resume);
303
304 static void xfrm_trans_reinject(unsigned long data)
305 {
306         struct xfrm_trans_tasklet *trans = (void *)data;
307         struct sk_buff_head queue;
308         struct sk_buff *skb;
309
310         __skb_queue_head_init(&queue);
311         skb_queue_splice_init(&trans->queue, &queue);
312
313         while ((skb = __skb_dequeue(&queue)))
314                 XFRM_TRANS_SKB_CB(skb)->finish(skb);
315 }
316
317 int xfrm_trans_queue(struct sk_buff *skb, int (*finish)(struct sk_buff *))
318 {
319         struct xfrm_trans_tasklet *trans;
320
321         trans = this_cpu_ptr(&xfrm_trans_tasklet);
322
323         if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
324                 return -ENOBUFS;
325
326         XFRM_TRANS_SKB_CB(skb)->finish = finish;
327         __skb_queue_tail(&trans->queue, skb);
328         tasklet_schedule(&trans->tasklet);
329         return 0;
330 }
331 EXPORT_SYMBOL(xfrm_trans_queue);
332
333 void __init xfrm_input_init(void)
334 {
335         int i;
336
337         secpath_cachep = kmem_cache_create("secpath_cache",
338                                            sizeof(struct sec_path),
339                                            0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
340                                            NULL);
341
342         for_each_possible_cpu(i) {
343                 struct xfrm_trans_tasklet *trans;
344
345                 trans = &per_cpu(xfrm_trans_tasklet, i);
346                 __skb_queue_head_init(&trans->queue);
347                 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
348                              (unsigned long)trans);
349         }
350 }