ipv6: stop sending PTB packets for MTU < 1280
[pandora-kernel.git] / net / ipv6 / reassembly.c
1 /*
2  *      IPv6 fragment reassembly
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on: net/ipv4/ip_fragment.c
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15
16 /*
17  *      Fixes:
18  *      Andi Kleen      Make it work with multiple hosts.
19  *                      More RFC compliance.
20  *
21  *      Horst von Brand Add missing #include <linux/string.h>
22  *      Alexey Kuznetsov        SMP races, threading, cleanup.
23  *      Patrick McHardy         LRU queue of frag heads for evictor.
24  *      Mitsuru KANDA @USAGI    Register inet6_protocol{}.
25  *      David Stevens and
26  *      YOSHIFUJI,H. @USAGI     Always remove fragment header to
27  *                              calculate ICV correctly.
28  */
29
30 #define pr_fmt(fmt) "IPv6: " fmt
31
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/sockios.h>
37 #include <linux/jiffies.h>
38 #include <linux/net.h>
39 #include <linux/list.h>
40 #include <linux/netdevice.h>
41 #include <linux/in6.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
45 #include <linux/jhash.h>
46 #include <linux/skbuff.h>
47 #include <linux/slab.h>
48 #include <linux/export.h>
49
50 #include <net/sock.h>
51 #include <net/snmp.h>
52
53 #include <net/ipv6.h>
54 #include <net/ip6_route.h>
55 #include <net/protocol.h>
56 #include <net/transp_v6.h>
57 #include <net/rawv6.h>
58 #include <net/ndisc.h>
59 #include <net/addrconf.h>
60 #include <net/inet_frag.h>
61
62 struct ip6frag_skb_cb
63 {
64         struct inet6_skb_parm   h;
65         int                     offset;
66 };
67
68 #define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
69
70
71 /*
72  *      Equivalent of ipv4 struct ipq
73  */
74
75 struct frag_queue
76 {
77         struct inet_frag_queue  q;
78
79         __be32                  id;             /* fragment id          */
80         u32                     user;
81         struct in6_addr         saddr;
82         struct in6_addr         daddr;
83
84         int                     iif;
85         unsigned int            csum;
86         __u16                   nhoffset;
87 };
88
89 static struct inet_frags ip6_frags;
90
91 int ip6_frag_nqueues(struct net *net)
92 {
93         return net->ipv6.frags.nqueues;
94 }
95
96 int ip6_frag_mem(struct net *net)
97 {
98         return atomic_read(&net->ipv6.frags.mem);
99 }
100
101 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
102                           struct net_device *dev);
103
104 /*
105  * callers should be careful not to use the hash value outside the ipfrag_lock
106  * as doing so could race with ipfrag_hash_rnd being recalculated.
107  */
108 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
109                              const struct in6_addr *daddr, u32 rnd)
110 {
111         u32 c;
112
113         c = jhash_3words((__force u32)saddr->s6_addr32[0],
114                          (__force u32)saddr->s6_addr32[1],
115                          (__force u32)saddr->s6_addr32[2],
116                          rnd);
117
118         c = jhash_3words((__force u32)saddr->s6_addr32[3],
119                          (__force u32)daddr->s6_addr32[0],
120                          (__force u32)daddr->s6_addr32[1],
121                          c);
122
123         c =  jhash_3words((__force u32)daddr->s6_addr32[2],
124                           (__force u32)daddr->s6_addr32[3],
125                           (__force u32)id,
126                           c);
127
128         return c & (INETFRAGS_HASHSZ - 1);
129 }
130 EXPORT_SYMBOL_GPL(inet6_hash_frag);
131
132 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
133 {
134         struct frag_queue *fq;
135
136         fq = container_of(q, struct frag_queue, q);
137         return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
138 }
139
140 int ip6_frag_match(struct inet_frag_queue *q, void *a)
141 {
142         struct frag_queue *fq;
143         struct ip6_create_arg *arg = a;
144
145         fq = container_of(q, struct frag_queue, q);
146         return (fq->id == arg->id && fq->user == arg->user &&
147                         ipv6_addr_equal(&fq->saddr, arg->src) &&
148                         ipv6_addr_equal(&fq->daddr, arg->dst));
149 }
150 EXPORT_SYMBOL(ip6_frag_match);
151
152 void ip6_frag_init(struct inet_frag_queue *q, void *a)
153 {
154         struct frag_queue *fq = container_of(q, struct frag_queue, q);
155         struct ip6_create_arg *arg = a;
156
157         fq->id = arg->id;
158         fq->user = arg->user;
159         ipv6_addr_copy(&fq->saddr, arg->src);
160         ipv6_addr_copy(&fq->daddr, arg->dst);
161 }
162 EXPORT_SYMBOL(ip6_frag_init);
163
164 /* Destruction primitives. */
165
166 static __inline__ void fq_put(struct frag_queue *fq)
167 {
168         inet_frag_put(&fq->q, &ip6_frags);
169 }
170
171 /* Kill fq entry. It is not destroyed immediately,
172  * because caller (and someone more) holds reference count.
173  */
174 static __inline__ void fq_kill(struct frag_queue *fq)
175 {
176         inet_frag_kill(&fq->q, &ip6_frags);
177 }
178
179 static void ip6_evictor(struct net *net, struct inet6_dev *idev)
180 {
181         int evicted;
182
183         evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
184         if (evicted)
185                 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
186 }
187
188 static void ip6_frag_expire(unsigned long data)
189 {
190         struct frag_queue *fq;
191         struct net_device *dev = NULL;
192         struct net *net;
193
194         fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
195
196         spin_lock(&fq->q.lock);
197
198         if (fq->q.last_in & INET_FRAG_COMPLETE)
199                 goto out;
200
201         fq_kill(fq);
202
203         net = container_of(fq->q.net, struct net, ipv6.frags);
204         rcu_read_lock();
205         dev = dev_get_by_index_rcu(net, fq->iif);
206         if (!dev)
207                 goto out_rcu_unlock;
208
209         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
210         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
211
212         /* Don't send error if the first segment did not arrive. */
213         if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
214                 goto out_rcu_unlock;
215
216         /*
217            But use as source device on which LAST ARRIVED
218            segment was received. And do not use fq->dev
219            pointer directly, device might already disappeared.
220          */
221         fq->q.fragments->dev = dev;
222         icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
223 out_rcu_unlock:
224         rcu_read_unlock();
225 out:
226         spin_unlock(&fq->q.lock);
227         fq_put(fq);
228 }
229
230 static __inline__ struct frag_queue *
231 fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
232 {
233         struct inet_frag_queue *q;
234         struct ip6_create_arg arg;
235         unsigned int hash;
236
237         arg.id = id;
238         arg.user = IP6_DEFRAG_LOCAL_DELIVER;
239         arg.src = src;
240         arg.dst = dst;
241
242         read_lock(&ip6_frags.lock);
243         hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
244
245         q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
246         if (IS_ERR_OR_NULL(q)) {
247                 inet_frag_maybe_warn_overflow(q, pr_fmt());
248                 return NULL;
249         }
250         return container_of(q, struct frag_queue, q);
251 }
252
253 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
254                            struct frag_hdr *fhdr, int nhoff)
255 {
256         struct sk_buff *prev, *next;
257         struct net_device *dev;
258         int offset, end;
259         struct net *net = dev_net(skb_dst(skb)->dev);
260
261         if (fq->q.last_in & INET_FRAG_COMPLETE)
262                 goto err;
263
264         offset = ntohs(fhdr->frag_off) & ~0x7;
265         end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
266                         ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
267
268         if ((unsigned int)end > IPV6_MAXPLEN) {
269                 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
270                                  IPSTATS_MIB_INHDRERRORS);
271                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
272                                   ((u8 *)&fhdr->frag_off -
273                                    skb_network_header(skb)));
274                 return -1;
275         }
276
277         if (skb->ip_summed == CHECKSUM_COMPLETE) {
278                 const unsigned char *nh = skb_network_header(skb);
279                 skb->csum = csum_sub(skb->csum,
280                                      csum_partial(nh, (u8 *)(fhdr + 1) - nh,
281                                                   0));
282         }
283
284         /* Is this the final fragment? */
285         if (!(fhdr->frag_off & htons(IP6_MF))) {
286                 /* If we already have some bits beyond end
287                  * or have different end, the segment is corrupted.
288                  */
289                 if (end < fq->q.len ||
290                     ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
291                         goto err;
292                 fq->q.last_in |= INET_FRAG_LAST_IN;
293                 fq->q.len = end;
294         } else {
295                 /* Check if the fragment is rounded to 8 bytes.
296                  * Required by the RFC.
297                  */
298                 if (end & 0x7) {
299                         /* RFC2460 says always send parameter problem in
300                          * this case. -DaveM
301                          */
302                         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
303                                          IPSTATS_MIB_INHDRERRORS);
304                         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
305                                           offsetof(struct ipv6hdr, payload_len));
306                         return -1;
307                 }
308                 if (end > fq->q.len) {
309                         /* Some bits beyond end -> corruption. */
310                         if (fq->q.last_in & INET_FRAG_LAST_IN)
311                                 goto err;
312                         fq->q.len = end;
313                 }
314         }
315
316         if (end == offset)
317                 goto err;
318
319         /* Point into the IP datagram 'data' part. */
320         if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
321                 goto err;
322
323         if (pskb_trim_rcsum(skb, end - offset))
324                 goto err;
325
326         /* Find out which fragments are in front and at the back of us
327          * in the chain of fragments so far.  We must know where to put
328          * this fragment, right?
329          */
330         prev = fq->q.fragments_tail;
331         if (!prev || FRAG6_CB(prev)->offset < offset) {
332                 next = NULL;
333                 goto found;
334         }
335         prev = NULL;
336         for(next = fq->q.fragments; next != NULL; next = next->next) {
337                 if (FRAG6_CB(next)->offset >= offset)
338                         break;  /* bingo! */
339                 prev = next;
340         }
341
342 found:
343         /* RFC5722, Section 4:
344          *                                  When reassembling an IPv6 datagram, if
345          *   one or more its constituent fragments is determined to be an
346          *   overlapping fragment, the entire datagram (and any constituent
347          *   fragments, including those not yet received) MUST be silently
348          *   discarded.
349          */
350
351         /* Check for overlap with preceding fragment. */
352         if (prev &&
353             (FRAG6_CB(prev)->offset + prev->len) > offset)
354                 goto discard_fq;
355
356         /* Look for overlap with succeeding segment. */
357         if (next && FRAG6_CB(next)->offset < end)
358                 goto discard_fq;
359
360         FRAG6_CB(skb)->offset = offset;
361
362         /* Insert this fragment in the chain of fragments. */
363         skb->next = next;
364         if (!next)
365                 fq->q.fragments_tail = skb;
366         if (prev)
367                 prev->next = skb;
368         else
369                 fq->q.fragments = skb;
370
371         dev = skb->dev;
372         if (dev) {
373                 fq->iif = dev->ifindex;
374                 skb->dev = NULL;
375         }
376         fq->q.stamp = skb->tstamp;
377         fq->q.meat += skb->len;
378         atomic_add(skb->truesize, &fq->q.net->mem);
379
380         /* The first fragment.
381          * nhoffset is obtained from the first fragment, of course.
382          */
383         if (offset == 0) {
384                 fq->nhoffset = nhoff;
385                 fq->q.last_in |= INET_FRAG_FIRST_IN;
386         }
387
388         if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
389             fq->q.meat == fq->q.len) {
390                 int res;
391                 unsigned long orefdst = skb->_skb_refdst;
392
393                 skb->_skb_refdst = 0UL;
394                 res = ip6_frag_reasm(fq, prev, dev);
395                 skb->_skb_refdst = orefdst;
396                 return res;
397         }
398
399         skb_dst_drop(skb);
400
401         write_lock(&ip6_frags.lock);
402         list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
403         write_unlock(&ip6_frags.lock);
404         return -1;
405
406 discard_fq:
407         fq_kill(fq);
408 err:
409         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
410                       IPSTATS_MIB_REASMFAILS);
411         kfree_skb(skb);
412         return -1;
413 }
414
415 /*
416  *      Check if this packet is complete.
417  *      Returns NULL on failure by any reason, and pointer
418  *      to current nexthdr field in reassembled frame.
419  *
420  *      It is called with locked fq, and caller must check that
421  *      queue is eligible for reassembly i.e. it is not COMPLETE,
422  *      the last and the first frames arrived and all the bits are here.
423  */
424 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
425                           struct net_device *dev)
426 {
427         struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
428         struct sk_buff *fp, *head = fq->q.fragments;
429         int    payload_len;
430         unsigned int nhoff;
431
432         fq_kill(fq);
433
434         /* Make the one we just received the head. */
435         if (prev) {
436                 head = prev->next;
437                 fp = skb_clone(head, GFP_ATOMIC);
438
439                 if (!fp)
440                         goto out_oom;
441
442                 fp->next = head->next;
443                 if (!fp->next)
444                         fq->q.fragments_tail = fp;
445                 prev->next = fp;
446
447                 skb_morph(head, fq->q.fragments);
448                 head->next = fq->q.fragments->next;
449
450                 kfree_skb(fq->q.fragments);
451                 fq->q.fragments = head;
452         }
453
454         WARN_ON(head == NULL);
455         WARN_ON(FRAG6_CB(head)->offset != 0);
456
457         /* Unfragmented part is taken from the first segment. */
458         payload_len = ((head->data - skb_network_header(head)) -
459                        sizeof(struct ipv6hdr) + fq->q.len -
460                        sizeof(struct frag_hdr));
461         if (payload_len > IPV6_MAXPLEN)
462                 goto out_oversize;
463
464         /* Head of list must not be cloned. */
465         if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
466                 goto out_oom;
467
468         /* If the first fragment is fragmented itself, we split
469          * it to two chunks: the first with data and paged part
470          * and the second, holding only fragments. */
471         if (skb_has_frag_list(head)) {
472                 struct sk_buff *clone;
473                 int i, plen = 0;
474
475                 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
476                         goto out_oom;
477                 clone->next = head->next;
478                 head->next = clone;
479                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
480                 skb_frag_list_init(head);
481                 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
482                         plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
483                 clone->len = clone->data_len = head->data_len - plen;
484                 head->data_len -= clone->len;
485                 head->len -= clone->len;
486                 clone->csum = 0;
487                 clone->ip_summed = head->ip_summed;
488                 atomic_add(clone->truesize, &fq->q.net->mem);
489         }
490
491         /* We have to remove fragment header from datagram and to relocate
492          * header in order to calculate ICV correctly. */
493         nhoff = fq->nhoffset;
494         skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
495         memmove(head->head + sizeof(struct frag_hdr), head->head,
496                 (head->data - head->head) - sizeof(struct frag_hdr));
497         head->mac_header += sizeof(struct frag_hdr);
498         head->network_header += sizeof(struct frag_hdr);
499
500         skb_shinfo(head)->frag_list = head->next;
501         skb_reset_transport_header(head);
502         skb_push(head, head->data - skb_network_header(head));
503
504         for (fp=head->next; fp; fp = fp->next) {
505                 head->data_len += fp->len;
506                 head->len += fp->len;
507                 if (head->ip_summed != fp->ip_summed)
508                         head->ip_summed = CHECKSUM_NONE;
509                 else if (head->ip_summed == CHECKSUM_COMPLETE)
510                         head->csum = csum_add(head->csum, fp->csum);
511                 head->truesize += fp->truesize;
512         }
513         atomic_sub(head->truesize, &fq->q.net->mem);
514
515         head->next = NULL;
516         head->dev = dev;
517         head->tstamp = fq->q.stamp;
518         ipv6_hdr(head)->payload_len = htons(payload_len);
519         IP6CB(head)->nhoff = nhoff;
520         IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
521
522         /* Yes, and fold redundant checksum back. 8) */
523         if (head->ip_summed == CHECKSUM_COMPLETE)
524                 head->csum = csum_partial(skb_network_header(head),
525                                           skb_network_header_len(head),
526                                           head->csum);
527
528         rcu_read_lock();
529         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
530         rcu_read_unlock();
531         fq->q.fragments = NULL;
532         fq->q.fragments_tail = NULL;
533         return 1;
534
535 out_oversize:
536         if (net_ratelimit())
537                 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
538         goto out_fail;
539 out_oom:
540         if (net_ratelimit())
541                 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
542 out_fail:
543         rcu_read_lock();
544         IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
545         rcu_read_unlock();
546         return -1;
547 }
548
549 static int ipv6_frag_rcv(struct sk_buff *skb)
550 {
551         struct frag_hdr *fhdr;
552         struct frag_queue *fq;
553         const struct ipv6hdr *hdr = ipv6_hdr(skb);
554         struct net *net = dev_net(skb_dst(skb)->dev);
555
556         if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
557                 goto fail_hdr;
558
559         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
560
561         /* Jumbo payload inhibits frag. header */
562         if (hdr->payload_len==0)
563                 goto fail_hdr;
564
565         if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
566                                  sizeof(struct frag_hdr))))
567                 goto fail_hdr;
568
569         hdr = ipv6_hdr(skb);
570         fhdr = (struct frag_hdr *)skb_transport_header(skb);
571
572         if (!(fhdr->frag_off & htons(0xFFF9))) {
573                 /* It is not a fragmented frame */
574                 skb->transport_header += sizeof(struct frag_hdr);
575                 IP6_INC_STATS_BH(net,
576                                  ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
577
578                 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
579                 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
580                 return 1;
581         }
582
583         if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
584                 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
585
586         fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
587         if (fq != NULL) {
588                 int ret;
589
590                 spin_lock(&fq->q.lock);
591
592                 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
593
594                 spin_unlock(&fq->q.lock);
595                 fq_put(fq);
596                 return ret;
597         }
598
599         IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
600         kfree_skb(skb);
601         return -1;
602
603 fail_hdr:
604         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
605         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
606         return -1;
607 }
608
609 static const struct inet6_protocol frag_protocol =
610 {
611         .handler        =       ipv6_frag_rcv,
612         .flags          =       INET6_PROTO_NOPOLICY,
613 };
614
615 #ifdef CONFIG_SYSCTL
616 static struct ctl_table ip6_frags_ns_ctl_table[] = {
617         {
618                 .procname       = "ip6frag_high_thresh",
619                 .data           = &init_net.ipv6.frags.high_thresh,
620                 .maxlen         = sizeof(int),
621                 .mode           = 0644,
622                 .proc_handler   = proc_dointvec
623         },
624         {
625                 .procname       = "ip6frag_low_thresh",
626                 .data           = &init_net.ipv6.frags.low_thresh,
627                 .maxlen         = sizeof(int),
628                 .mode           = 0644,
629                 .proc_handler   = proc_dointvec
630         },
631         {
632                 .procname       = "ip6frag_time",
633                 .data           = &init_net.ipv6.frags.timeout,
634                 .maxlen         = sizeof(int),
635                 .mode           = 0644,
636                 .proc_handler   = proc_dointvec_jiffies,
637         },
638         { }
639 };
640
641 static struct ctl_table ip6_frags_ctl_table[] = {
642         {
643                 .procname       = "ip6frag_secret_interval",
644                 .data           = &ip6_frags.secret_interval,
645                 .maxlen         = sizeof(int),
646                 .mode           = 0644,
647                 .proc_handler   = proc_dointvec_jiffies,
648         },
649         { }
650 };
651
652 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
653 {
654         struct ctl_table *table;
655         struct ctl_table_header *hdr;
656
657         table = ip6_frags_ns_ctl_table;
658         if (!net_eq(net, &init_net)) {
659                 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
660                 if (table == NULL)
661                         goto err_alloc;
662
663                 table[0].data = &net->ipv6.frags.high_thresh;
664                 table[1].data = &net->ipv6.frags.low_thresh;
665                 table[2].data = &net->ipv6.frags.timeout;
666         }
667
668         hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
669         if (hdr == NULL)
670                 goto err_reg;
671
672         net->ipv6.sysctl.frags_hdr = hdr;
673         return 0;
674
675 err_reg:
676         if (!net_eq(net, &init_net))
677                 kfree(table);
678 err_alloc:
679         return -ENOMEM;
680 }
681
682 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
683 {
684         struct ctl_table *table;
685
686         table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
687         unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
688         if (!net_eq(net, &init_net))
689                 kfree(table);
690 }
691
692 static struct ctl_table_header *ip6_ctl_header;
693
694 static int ip6_frags_sysctl_register(void)
695 {
696         ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
697                         ip6_frags_ctl_table);
698         return ip6_ctl_header == NULL ? -ENOMEM : 0;
699 }
700
701 static void ip6_frags_sysctl_unregister(void)
702 {
703         unregister_net_sysctl_table(ip6_ctl_header);
704 }
705 #else
706 static inline int ip6_frags_ns_sysctl_register(struct net *net)
707 {
708         return 0;
709 }
710
711 static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
712 {
713 }
714
715 static inline int ip6_frags_sysctl_register(void)
716 {
717         return 0;
718 }
719
720 static inline void ip6_frags_sysctl_unregister(void)
721 {
722 }
723 #endif
724
725 static int __net_init ipv6_frags_init_net(struct net *net)
726 {
727         net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
728         net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
729         net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
730
731         inet_frags_init_net(&net->ipv6.frags);
732
733         return ip6_frags_ns_sysctl_register(net);
734 }
735
736 static void __net_exit ipv6_frags_exit_net(struct net *net)
737 {
738         ip6_frags_ns_sysctl_unregister(net);
739         inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
740 }
741
742 static struct pernet_operations ip6_frags_ops = {
743         .init = ipv6_frags_init_net,
744         .exit = ipv6_frags_exit_net,
745 };
746
747 int __init ipv6_frag_init(void)
748 {
749         int ret;
750
751         ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
752         if (ret)
753                 goto out;
754
755         ret = ip6_frags_sysctl_register();
756         if (ret)
757                 goto err_sysctl;
758
759         ret = register_pernet_subsys(&ip6_frags_ops);
760         if (ret)
761                 goto err_pernet;
762
763         ip6_frags.hashfn = ip6_hashfn;
764         ip6_frags.constructor = ip6_frag_init;
765         ip6_frags.destructor = NULL;
766         ip6_frags.skb_free = NULL;
767         ip6_frags.qsize = sizeof(struct frag_queue);
768         ip6_frags.match = ip6_frag_match;
769         ip6_frags.frag_expire = ip6_frag_expire;
770         ip6_frags.secret_interval = 10 * 60 * HZ;
771         inet_frags_init(&ip6_frags);
772 out:
773         return ret;
774
775 err_pernet:
776         ip6_frags_sysctl_unregister();
777 err_sysctl:
778         inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
779         goto out;
780 }
781
782 void ipv6_frag_exit(void)
783 {
784         inet_frags_fini(&ip6_frags);
785         ip6_frags_sysctl_unregister();
786         unregister_pernet_subsys(&ip6_frags_ops);
787         inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
788 }