Merge branch 'for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
[pandora-kernel.git] / net / ipv6 / reassembly.c
1 /*
2  *      IPv6 fragment reassembly
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on: net/ipv4/ip_fragment.c
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15
16 /*
17  *      Fixes:
18  *      Andi Kleen      Make it work with multiple hosts.
19  *                      More RFC compliance.
20  *
21  *      Horst von Brand Add missing #include <linux/string.h>
22  *      Alexey Kuznetsov        SMP races, threading, cleanup.
23  *      Patrick McHardy         LRU queue of frag heads for evictor.
24  *      Mitsuru KANDA @USAGI    Register inet6_protocol{}.
25  *      David Stevens and
26  *      YOSHIFUJI,H. @USAGI     Always remove fragment header to
27  *                              calculate ICV correctly.
28  */
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/jiffies.h>
35 #include <linux/net.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/jhash.h>
43 #include <linux/skbuff.h>
44 #include <linux/slab.h>
45 #include <linux/export.h>
46
47 #include <net/sock.h>
48 #include <net/snmp.h>
49
50 #include <net/ipv6.h>
51 #include <net/ip6_route.h>
52 #include <net/protocol.h>
53 #include <net/transp_v6.h>
54 #include <net/rawv6.h>
55 #include <net/ndisc.h>
56 #include <net/addrconf.h>
57 #include <net/inet_frag.h>
58
59 struct ip6frag_skb_cb
60 {
61         struct inet6_skb_parm   h;
62         int                     offset;
63 };
64
65 #define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
66
67
68 /*
69  *      Equivalent of ipv4 struct ipq
70  */
71
72 struct frag_queue
73 {
74         struct inet_frag_queue  q;
75
76         __be32                  id;             /* fragment id          */
77         u32                     user;
78         struct in6_addr         saddr;
79         struct in6_addr         daddr;
80
81         int                     iif;
82         unsigned int            csum;
83         __u16                   nhoffset;
84 };
85
86 static struct inet_frags ip6_frags;
87
88 int ip6_frag_nqueues(struct net *net)
89 {
90         return net->ipv6.frags.nqueues;
91 }
92
93 int ip6_frag_mem(struct net *net)
94 {
95         return atomic_read(&net->ipv6.frags.mem);
96 }
97
98 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
99                           struct net_device *dev);
100
101 /*
102  * callers should be careful not to use the hash value outside the ipfrag_lock
103  * as doing so could race with ipfrag_hash_rnd being recalculated.
104  */
105 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
106                              const struct in6_addr *daddr, u32 rnd)
107 {
108         u32 c;
109
110         c = jhash_3words((__force u32)saddr->s6_addr32[0],
111                          (__force u32)saddr->s6_addr32[1],
112                          (__force u32)saddr->s6_addr32[2],
113                          rnd);
114
115         c = jhash_3words((__force u32)saddr->s6_addr32[3],
116                          (__force u32)daddr->s6_addr32[0],
117                          (__force u32)daddr->s6_addr32[1],
118                          c);
119
120         c =  jhash_3words((__force u32)daddr->s6_addr32[2],
121                           (__force u32)daddr->s6_addr32[3],
122                           (__force u32)id,
123                           c);
124
125         return c & (INETFRAGS_HASHSZ - 1);
126 }
127 EXPORT_SYMBOL_GPL(inet6_hash_frag);
128
129 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
130 {
131         struct frag_queue *fq;
132
133         fq = container_of(q, struct frag_queue, q);
134         return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
135 }
136
137 bool ip6_frag_match(struct inet_frag_queue *q, void *a)
138 {
139         struct frag_queue *fq;
140         struct ip6_create_arg *arg = a;
141
142         fq = container_of(q, struct frag_queue, q);
143         return  fq->id == arg->id &&
144                 fq->user == arg->user &&
145                 ipv6_addr_equal(&fq->saddr, arg->src) &&
146                 ipv6_addr_equal(&fq->daddr, arg->dst);
147 }
148 EXPORT_SYMBOL(ip6_frag_match);
149
150 void ip6_frag_init(struct inet_frag_queue *q, void *a)
151 {
152         struct frag_queue *fq = container_of(q, struct frag_queue, q);
153         struct ip6_create_arg *arg = a;
154
155         fq->id = arg->id;
156         fq->user = arg->user;
157         fq->saddr = *arg->src;
158         fq->daddr = *arg->dst;
159 }
160 EXPORT_SYMBOL(ip6_frag_init);
161
162 /* Destruction primitives. */
163
164 static __inline__ void fq_put(struct frag_queue *fq)
165 {
166         inet_frag_put(&fq->q, &ip6_frags);
167 }
168
169 /* Kill fq entry. It is not destroyed immediately,
170  * because caller (and someone more) holds reference count.
171  */
172 static __inline__ void fq_kill(struct frag_queue *fq)
173 {
174         inet_frag_kill(&fq->q, &ip6_frags);
175 }
176
177 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
178 {
179         int evicted;
180
181         evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
182         if (evicted)
183                 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
184 }
185
186 static void ip6_frag_expire(unsigned long data)
187 {
188         struct frag_queue *fq;
189         struct net_device *dev = NULL;
190         struct net *net;
191
192         fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
193
194         spin_lock(&fq->q.lock);
195
196         if (fq->q.last_in & INET_FRAG_COMPLETE)
197                 goto out;
198
199         fq_kill(fq);
200
201         net = container_of(fq->q.net, struct net, ipv6.frags);
202         rcu_read_lock();
203         dev = dev_get_by_index_rcu(net, fq->iif);
204         if (!dev)
205                 goto out_rcu_unlock;
206
207         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
208         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
209
210         /* Don't send error if the first segment did not arrive. */
211         if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
212                 goto out_rcu_unlock;
213
214         /*
215            But use as source device on which LAST ARRIVED
216            segment was received. And do not use fq->dev
217            pointer directly, device might already disappeared.
218          */
219         fq->q.fragments->dev = dev;
220         icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
221 out_rcu_unlock:
222         rcu_read_unlock();
223 out:
224         spin_unlock(&fq->q.lock);
225         fq_put(fq);
226 }
227
228 static __inline__ struct frag_queue *
229 fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
230 {
231         struct inet_frag_queue *q;
232         struct ip6_create_arg arg;
233         unsigned int hash;
234
235         arg.id = id;
236         arg.user = IP6_DEFRAG_LOCAL_DELIVER;
237         arg.src = src;
238         arg.dst = dst;
239
240         read_lock(&ip6_frags.lock);
241         hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
242
243         q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
244         if (q == NULL)
245                 return NULL;
246
247         return container_of(q, struct frag_queue, q);
248 }
249
250 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
251                            struct frag_hdr *fhdr, int nhoff)
252 {
253         struct sk_buff *prev, *next;
254         struct net_device *dev;
255         int offset, end;
256         struct net *net = dev_net(skb_dst(skb)->dev);
257
258         if (fq->q.last_in & INET_FRAG_COMPLETE)
259                 goto err;
260
261         offset = ntohs(fhdr->frag_off) & ~0x7;
262         end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
263                         ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
264
265         if ((unsigned int)end > IPV6_MAXPLEN) {
266                 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
267                                  IPSTATS_MIB_INHDRERRORS);
268                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
269                                   ((u8 *)&fhdr->frag_off -
270                                    skb_network_header(skb)));
271                 return -1;
272         }
273
274         if (skb->ip_summed == CHECKSUM_COMPLETE) {
275                 const unsigned char *nh = skb_network_header(skb);
276                 skb->csum = csum_sub(skb->csum,
277                                      csum_partial(nh, (u8 *)(fhdr + 1) - nh,
278                                                   0));
279         }
280
281         /* Is this the final fragment? */
282         if (!(fhdr->frag_off & htons(IP6_MF))) {
283                 /* If we already have some bits beyond end
284                  * or have different end, the segment is corrupted.
285                  */
286                 if (end < fq->q.len ||
287                     ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
288                         goto err;
289                 fq->q.last_in |= INET_FRAG_LAST_IN;
290                 fq->q.len = end;
291         } else {
292                 /* Check if the fragment is rounded to 8 bytes.
293                  * Required by the RFC.
294                  */
295                 if (end & 0x7) {
296                         /* RFC2460 says always send parameter problem in
297                          * this case. -DaveM
298                          */
299                         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
300                                          IPSTATS_MIB_INHDRERRORS);
301                         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
302                                           offsetof(struct ipv6hdr, payload_len));
303                         return -1;
304                 }
305                 if (end > fq->q.len) {
306                         /* Some bits beyond end -> corruption. */
307                         if (fq->q.last_in & INET_FRAG_LAST_IN)
308                                 goto err;
309                         fq->q.len = end;
310                 }
311         }
312
313         if (end == offset)
314                 goto err;
315
316         /* Point into the IP datagram 'data' part. */
317         if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
318                 goto err;
319
320         if (pskb_trim_rcsum(skb, end - offset))
321                 goto err;
322
323         /* Find out which fragments are in front and at the back of us
324          * in the chain of fragments so far.  We must know where to put
325          * this fragment, right?
326          */
327         prev = fq->q.fragments_tail;
328         if (!prev || FRAG6_CB(prev)->offset < offset) {
329                 next = NULL;
330                 goto found;
331         }
332         prev = NULL;
333         for(next = fq->q.fragments; next != NULL; next = next->next) {
334                 if (FRAG6_CB(next)->offset >= offset)
335                         break;  /* bingo! */
336                 prev = next;
337         }
338
339 found:
340         /* RFC5722, Section 4, amended by Errata ID : 3089
341          *                          When reassembling an IPv6 datagram, if
342          *   one or more its constituent fragments is determined to be an
343          *   overlapping fragment, the entire datagram (and any constituent
344          *   fragments) MUST be silently discarded.
345          */
346
347         /* Check for overlap with preceding fragment. */
348         if (prev &&
349             (FRAG6_CB(prev)->offset + prev->len) > offset)
350                 goto discard_fq;
351
352         /* Look for overlap with succeeding segment. */
353         if (next && FRAG6_CB(next)->offset < end)
354                 goto discard_fq;
355
356         FRAG6_CB(skb)->offset = offset;
357
358         /* Insert this fragment in the chain of fragments. */
359         skb->next = next;
360         if (!next)
361                 fq->q.fragments_tail = skb;
362         if (prev)
363                 prev->next = skb;
364         else
365                 fq->q.fragments = skb;
366
367         dev = skb->dev;
368         if (dev) {
369                 fq->iif = dev->ifindex;
370                 skb->dev = NULL;
371         }
372         fq->q.stamp = skb->tstamp;
373         fq->q.meat += skb->len;
374         atomic_add(skb->truesize, &fq->q.net->mem);
375
376         /* The first fragment.
377          * nhoffset is obtained from the first fragment, of course.
378          */
379         if (offset == 0) {
380                 fq->nhoffset = nhoff;
381                 fq->q.last_in |= INET_FRAG_FIRST_IN;
382         }
383
384         if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
385             fq->q.meat == fq->q.len)
386                 return ip6_frag_reasm(fq, prev, dev);
387
388         write_lock(&ip6_frags.lock);
389         list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
390         write_unlock(&ip6_frags.lock);
391         return -1;
392
393 discard_fq:
394         fq_kill(fq);
395 err:
396         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
397                       IPSTATS_MIB_REASMFAILS);
398         kfree_skb(skb);
399         return -1;
400 }
401
402 /*
403  *      Check if this packet is complete.
404  *      Returns NULL on failure by any reason, and pointer
405  *      to current nexthdr field in reassembled frame.
406  *
407  *      It is called with locked fq, and caller must check that
408  *      queue is eligible for reassembly i.e. it is not COMPLETE,
409  *      the last and the first frames arrived and all the bits are here.
410  */
411 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
412                           struct net_device *dev)
413 {
414         struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
415         struct sk_buff *fp, *head = fq->q.fragments;
416         int    payload_len;
417         unsigned int nhoff;
418         int sum_truesize;
419
420         fq_kill(fq);
421
422         /* Make the one we just received the head. */
423         if (prev) {
424                 head = prev->next;
425                 fp = skb_clone(head, GFP_ATOMIC);
426
427                 if (!fp)
428                         goto out_oom;
429
430                 fp->next = head->next;
431                 if (!fp->next)
432                         fq->q.fragments_tail = fp;
433                 prev->next = fp;
434
435                 skb_morph(head, fq->q.fragments);
436                 head->next = fq->q.fragments->next;
437
438                 consume_skb(fq->q.fragments);
439                 fq->q.fragments = head;
440         }
441
442         WARN_ON(head == NULL);
443         WARN_ON(FRAG6_CB(head)->offset != 0);
444
445         /* Unfragmented part is taken from the first segment. */
446         payload_len = ((head->data - skb_network_header(head)) -
447                        sizeof(struct ipv6hdr) + fq->q.len -
448                        sizeof(struct frag_hdr));
449         if (payload_len > IPV6_MAXPLEN)
450                 goto out_oversize;
451
452         /* Head of list must not be cloned. */
453         if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
454                 goto out_oom;
455
456         /* If the first fragment is fragmented itself, we split
457          * it to two chunks: the first with data and paged part
458          * and the second, holding only fragments. */
459         if (skb_has_frag_list(head)) {
460                 struct sk_buff *clone;
461                 int i, plen = 0;
462
463                 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
464                         goto out_oom;
465                 clone->next = head->next;
466                 head->next = clone;
467                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
468                 skb_frag_list_init(head);
469                 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
470                         plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
471                 clone->len = clone->data_len = head->data_len - plen;
472                 head->data_len -= clone->len;
473                 head->len -= clone->len;
474                 clone->csum = 0;
475                 clone->ip_summed = head->ip_summed;
476                 atomic_add(clone->truesize, &fq->q.net->mem);
477         }
478
479         /* We have to remove fragment header from datagram and to relocate
480          * header in order to calculate ICV correctly. */
481         nhoff = fq->nhoffset;
482         skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
483         memmove(head->head + sizeof(struct frag_hdr), head->head,
484                 (head->data - head->head) - sizeof(struct frag_hdr));
485         head->mac_header += sizeof(struct frag_hdr);
486         head->network_header += sizeof(struct frag_hdr);
487
488         skb_reset_transport_header(head);
489         skb_push(head, head->data - skb_network_header(head));
490
491         sum_truesize = head->truesize;
492         for (fp = head->next; fp;) {
493                 bool headstolen;
494                 int delta;
495                 struct sk_buff *next = fp->next;
496
497                 sum_truesize += fp->truesize;
498                 if (head->ip_summed != fp->ip_summed)
499                         head->ip_summed = CHECKSUM_NONE;
500                 else if (head->ip_summed == CHECKSUM_COMPLETE)
501                         head->csum = csum_add(head->csum, fp->csum);
502
503                 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
504                         kfree_skb_partial(fp, headstolen);
505                 } else {
506                         if (!skb_shinfo(head)->frag_list)
507                                 skb_shinfo(head)->frag_list = fp;
508                         head->data_len += fp->len;
509                         head->len += fp->len;
510                         head->truesize += fp->truesize;
511                 }
512                 fp = next;
513         }
514         atomic_sub(sum_truesize, &fq->q.net->mem);
515
516         head->next = NULL;
517         head->dev = dev;
518         head->tstamp = fq->q.stamp;
519         ipv6_hdr(head)->payload_len = htons(payload_len);
520         IP6CB(head)->nhoff = nhoff;
521
522         /* Yes, and fold redundant checksum back. 8) */
523         if (head->ip_summed == CHECKSUM_COMPLETE)
524                 head->csum = csum_partial(skb_network_header(head),
525                                           skb_network_header_len(head),
526                                           head->csum);
527
528         rcu_read_lock();
529         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
530         rcu_read_unlock();
531         fq->q.fragments = NULL;
532         fq->q.fragments_tail = NULL;
533         return 1;
534
535 out_oversize:
536         net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
537         goto out_fail;
538 out_oom:
539         net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
540 out_fail:
541         rcu_read_lock();
542         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
543         rcu_read_unlock();
544         return -1;
545 }
546
547 static int ipv6_frag_rcv(struct sk_buff *skb)
548 {
549         struct frag_hdr *fhdr;
550         struct frag_queue *fq;
551         const struct ipv6hdr *hdr = ipv6_hdr(skb);
552         struct net *net = dev_net(skb_dst(skb)->dev);
553
554         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
555
556         /* Jumbo payload inhibits frag. header */
557         if (hdr->payload_len==0)
558                 goto fail_hdr;
559
560         if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
561                                  sizeof(struct frag_hdr))))
562                 goto fail_hdr;
563
564         hdr = ipv6_hdr(skb);
565         fhdr = (struct frag_hdr *)skb_transport_header(skb);
566
567         if (!(fhdr->frag_off & htons(0xFFF9))) {
568                 /* It is not a fragmented frame */
569                 skb->transport_header += sizeof(struct frag_hdr);
570                 IP6_INC_STATS_BH(net,
571                                  ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
572
573                 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
574                 return 1;
575         }
576
577         if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
578                 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
579
580         fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
581         if (fq != NULL) {
582                 int ret;
583
584                 spin_lock(&fq->q.lock);
585
586                 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
587
588                 spin_unlock(&fq->q.lock);
589                 fq_put(fq);
590                 return ret;
591         }
592
593         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
594         kfree_skb(skb);
595         return -1;
596
597 fail_hdr:
598         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
599         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
600         return -1;
601 }
602
603 static const struct inet6_protocol frag_protocol =
604 {
605         .handler        =       ipv6_frag_rcv,
606         .flags          =       INET6_PROTO_NOPOLICY,
607 };
608
609 #ifdef CONFIG_SYSCTL
610 static struct ctl_table ip6_frags_ns_ctl_table[] = {
611         {
612                 .procname       = "ip6frag_high_thresh",
613                 .data           = &init_net.ipv6.frags.high_thresh,
614                 .maxlen         = sizeof(int),
615                 .mode           = 0644,
616                 .proc_handler   = proc_dointvec
617         },
618         {
619                 .procname       = "ip6frag_low_thresh",
620                 .data           = &init_net.ipv6.frags.low_thresh,
621                 .maxlen         = sizeof(int),
622                 .mode           = 0644,
623                 .proc_handler   = proc_dointvec
624         },
625         {
626                 .procname       = "ip6frag_time",
627                 .data           = &init_net.ipv6.frags.timeout,
628                 .maxlen         = sizeof(int),
629                 .mode           = 0644,
630                 .proc_handler   = proc_dointvec_jiffies,
631         },
632         { }
633 };
634
635 static struct ctl_table ip6_frags_ctl_table[] = {
636         {
637                 .procname       = "ip6frag_secret_interval",
638                 .data           = &ip6_frags.secret_interval,
639                 .maxlen         = sizeof(int),
640                 .mode           = 0644,
641                 .proc_handler   = proc_dointvec_jiffies,
642         },
643         { }
644 };
645
646 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
647 {
648         struct ctl_table *table;
649         struct ctl_table_header *hdr;
650
651         table = ip6_frags_ns_ctl_table;
652         if (!net_eq(net, &init_net)) {
653                 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
654                 if (table == NULL)
655                         goto err_alloc;
656
657                 table[0].data = &net->ipv6.frags.high_thresh;
658                 table[1].data = &net->ipv6.frags.low_thresh;
659                 table[2].data = &net->ipv6.frags.timeout;
660         }
661
662         hdr = register_net_sysctl(net, "net/ipv6", table);
663         if (hdr == NULL)
664                 goto err_reg;
665
666         net->ipv6.sysctl.frags_hdr = hdr;
667         return 0;
668
669 err_reg:
670         if (!net_eq(net, &init_net))
671                 kfree(table);
672 err_alloc:
673         return -ENOMEM;
674 }
675
676 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
677 {
678         struct ctl_table *table;
679
680         table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
681         unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
682         if (!net_eq(net, &init_net))
683                 kfree(table);
684 }
685
686 static struct ctl_table_header *ip6_ctl_header;
687
688 static int ip6_frags_sysctl_register(void)
689 {
690         ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
691                         ip6_frags_ctl_table);
692         return ip6_ctl_header == NULL ? -ENOMEM : 0;
693 }
694
695 static void ip6_frags_sysctl_unregister(void)
696 {
697         unregister_net_sysctl_table(ip6_ctl_header);
698 }
699 #else
700 static inline int ip6_frags_ns_sysctl_register(struct net *net)
701 {
702         return 0;
703 }
704
705 static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
706 {
707 }
708
709 static inline int ip6_frags_sysctl_register(void)
710 {
711         return 0;
712 }
713
714 static inline void ip6_frags_sysctl_unregister(void)
715 {
716 }
717 #endif
718
719 static int __net_init ipv6_frags_init_net(struct net *net)
720 {
721         net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
722         net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
723         net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
724
725         inet_frags_init_net(&net->ipv6.frags);
726
727         return ip6_frags_ns_sysctl_register(net);
728 }
729
730 static void __net_exit ipv6_frags_exit_net(struct net *net)
731 {
732         ip6_frags_ns_sysctl_unregister(net);
733         inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
734 }
735
736 static struct pernet_operations ip6_frags_ops = {
737         .init = ipv6_frags_init_net,
738         .exit = ipv6_frags_exit_net,
739 };
740
741 int __init ipv6_frag_init(void)
742 {
743         int ret;
744
745         ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
746         if (ret)
747                 goto out;
748
749         ret = ip6_frags_sysctl_register();
750         if (ret)
751                 goto err_sysctl;
752
753         ret = register_pernet_subsys(&ip6_frags_ops);
754         if (ret)
755                 goto err_pernet;
756
757         ip6_frags.hashfn = ip6_hashfn;
758         ip6_frags.constructor = ip6_frag_init;
759         ip6_frags.destructor = NULL;
760         ip6_frags.skb_free = NULL;
761         ip6_frags.qsize = sizeof(struct frag_queue);
762         ip6_frags.match = ip6_frag_match;
763         ip6_frags.frag_expire = ip6_frag_expire;
764         ip6_frags.secret_interval = 10 * 60 * HZ;
765         inet_frags_init(&ip6_frags);
766 out:
767         return ret;
768
769 err_pernet:
770         ip6_frags_sysctl_unregister();
771 err_sysctl:
772         inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
773         goto out;
774 }
775
776 void ipv6_frag_exit(void)
777 {
778         inet_frags_fini(&ip6_frags);
779         ip6_frags_sysctl_unregister();
780         unregister_pernet_subsys(&ip6_frags_ops);
781         inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
782 }