5 * YOSHIFUJI Hideaki @USAGI
6 * Split up af-specific portion
10 #include <linux/bottom_half.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/percpu.h>
20 struct xfrm_trans_tasklet {
21 struct tasklet_struct tasklet;
22 struct sk_buff_head queue;
25 struct xfrm_trans_cb {
26 int (*finish)(struct sk_buff *skb);
29 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
31 static struct kmem_cache *secpath_cachep __read_mostly;
33 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
35 void __secpath_destroy(struct sec_path *sp)
38 for (i = 0; i < sp->len; i++)
39 xfrm_state_put(sp->xvec[i]);
40 kmem_cache_free(secpath_cachep, sp);
42 EXPORT_SYMBOL(__secpath_destroy);
44 struct sec_path *secpath_dup(struct sec_path *src)
48 sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
56 memcpy(sp, src, sizeof(*sp));
57 for (i = 0; i < sp->len; i++)
58 xfrm_state_hold(sp->xvec[i]);
60 atomic_set(&sp->refcnt, 1);
63 EXPORT_SYMBOL(secpath_dup);
65 /* Fetch spi and seq from ipsec header */
67 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
69 int offset, offset_seq;
74 hlen = sizeof(struct ip_auth_hdr);
75 offset = offsetof(struct ip_auth_hdr, spi);
76 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
79 hlen = sizeof(struct ip_esp_hdr);
80 offset = offsetof(struct ip_esp_hdr, spi);
81 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
84 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
86 *spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2)));
93 if (!pskb_may_pull(skb, hlen))
96 *spi = *(__be32*)(skb_transport_header(skb) + offset);
97 *seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
101 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
103 struct xfrm_mode *inner_mode = x->inner_mode;
106 err = x->outer_mode->afinfo->extract_input(x, skb);
110 if (x->sel.family == AF_UNSPEC) {
111 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
112 if (inner_mode == NULL)
113 return -EAFNOSUPPORT;
116 skb->protocol = inner_mode->afinfo->eth_proto;
117 return inner_mode->input2(x, skb);
119 EXPORT_SYMBOL(xfrm_prepare_input);
121 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
123 struct net *net = dev_net(skb->dev);
127 struct xfrm_state *x;
128 xfrm_address_t *daddr;
129 struct xfrm_mode *inner_mode;
134 /* A negative encap_type indicates async resumption. */
135 if (encap_type < 0) {
137 x = xfrm_input_state(skb);
138 seq = XFRM_SKB_CB(skb)->seq.input.low;
142 /* Allocate new secpath or COW existing one. */
143 if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
146 sp = secpath_dup(skb->sp);
148 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
152 secpath_put(skb->sp);
156 daddr = (xfrm_address_t *)(skb_network_header(skb) +
157 XFRM_SPI_SKB_CB(skb)->daddroff);
158 family = XFRM_SPI_SKB_CB(skb)->family;
161 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
162 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
167 if (skb->sp->len == XFRM_MAX_DEPTH) {
168 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
172 x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
174 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
175 xfrm_audit_state_notfound(skb, family, spi, seq);
179 skb->sp->xvec[skb->sp->len++] = x;
182 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
183 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
187 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
188 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
192 if (x->repl->check(x, skb, seq)) {
193 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
197 if (xfrm_state_check_expire(x)) {
198 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
202 spin_unlock(&x->lock);
204 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
206 XFRM_SKB_CB(skb)->seq.input.low = seq;
207 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
212 nexthdr = x->type->input(x, skb);
214 if (nexthdr == -EINPROGRESS)
222 if (nexthdr == -EBADMSG) {
223 xfrm_audit_state_icvfail(x, skb,
225 x->stats.integrity_failed++;
227 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
231 /* only the first xfrm gets the encap type */
234 if (async && x->repl->recheck(x, skb, seq)) {
235 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
239 x->repl->advance(x, seq);
241 x->curlft.bytes += skb->len;
244 spin_unlock(&x->lock);
246 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
248 inner_mode = x->inner_mode;
250 if (x->sel.family == AF_UNSPEC) {
251 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
252 if (inner_mode == NULL)
256 if (inner_mode->input(x, skb)) {
257 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
261 if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
267 * We need the inner address. However, we only get here for
268 * transport mode so the outer address is identical.
270 daddr = &x->id.daddr;
271 family = x->outer_mode->afinfo->family;
273 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
275 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
287 return x->inner_mode->afinfo->transport_finish(skb, async);
291 spin_unlock(&x->lock);
296 EXPORT_SYMBOL(xfrm_input);
298 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
300 return xfrm_input(skb, nexthdr, 0, -1);
302 EXPORT_SYMBOL(xfrm_input_resume);
304 static void xfrm_trans_reinject(unsigned long data)
306 struct xfrm_trans_tasklet *trans = (void *)data;
307 struct sk_buff_head queue;
310 __skb_queue_head_init(&queue);
311 skb_queue_splice_init(&trans->queue, &queue);
313 while ((skb = __skb_dequeue(&queue)))
314 XFRM_TRANS_SKB_CB(skb)->finish(skb);
317 int xfrm_trans_queue(struct sk_buff *skb, int (*finish)(struct sk_buff *))
319 struct xfrm_trans_tasklet *trans;
321 trans = this_cpu_ptr(&xfrm_trans_tasklet);
323 if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
326 XFRM_TRANS_SKB_CB(skb)->finish = finish;
327 __skb_queue_tail(&trans->queue, skb);
328 tasklet_schedule(&trans->tasklet);
331 EXPORT_SYMBOL(xfrm_trans_queue);
333 void __init xfrm_input_init(void)
337 secpath_cachep = kmem_cache_create("secpath_cache",
338 sizeof(struct sec_path),
339 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
342 for_each_possible_cpu(i) {
343 struct xfrm_trans_tasklet *trans;
345 trans = &per_cpu(xfrm_trans_tasklet, i);
346 __skb_queue_head_init(&trans->queue);
347 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
348 (unsigned long)trans);