2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
28 #define INETFRAGS_EVICT_BUCKETS 128
29 #define INETFRAGS_EVICT_MAX 512
31 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
32 * Value : 0xff if frame should be dropped.
33 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
35 const u8 ip_frag_ecn_table[16] = {
36 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
37 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
38 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
39 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
41 /* invalid combinations : drop frame */
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
46 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
47 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
48 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
50 EXPORT_SYMBOL(ip_frag_ecn_table);
53 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
55 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
58 static void inet_frag_secret_rebuild(unsigned long dummy)
60 struct inet_frags *f = (struct inet_frags *)dummy;
61 unsigned long now = jiffies;
64 /* Per bucket lock NOT needed here, due to write lock protection */
67 get_random_bytes(&f->rnd, sizeof(u32));
68 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
69 struct inet_frag_bucket *hb;
70 struct inet_frag_queue *q;
74 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
75 unsigned int hval = inet_frag_hashfn(f, q);
78 struct inet_frag_bucket *hb_dest;
82 /* Relink to new hash chain. */
83 hb_dest = &f->hash[hval];
84 hlist_add_head(&q->list, &hb_dest->chain);
88 write_unlock(&f->lock);
90 mod_timer(&f->secret_timer, now + f->secret_interval);
93 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
95 return q->net->low_thresh == 0 ||
96 frag_mem_limit(q->net) >= q->net->low_thresh;
100 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
102 struct inet_frag_queue *fq;
103 struct hlist_node *n;
104 unsigned int evicted = 0;
108 spin_lock(&hb->chain_lock);
110 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
111 if (!inet_fragq_should_evict(fq))
114 if (!del_timer(&fq->timer)) {
115 /* q expiring right now thus increment its refcount so
116 * it won't be freed under us and wait until the timer
117 * has finished executing then destroy it
119 atomic_inc(&fq->refcnt);
120 spin_unlock(&hb->chain_lock);
121 del_timer_sync(&fq->timer);
122 WARN_ON(atomic_read(&fq->refcnt) != 1);
123 inet_frag_put(fq, f);
127 /* suppress xmit of (icmp) error packet */
128 fq->last_in &= ~INET_FRAG_FIRST_IN;
129 fq->last_in |= INET_FRAG_EVICTED;
130 hlist_del(&fq->list);
131 hlist_add_head(&fq->list, &expired);
135 spin_unlock(&hb->chain_lock);
137 hlist_for_each_entry_safe(fq, n, &expired, list)
138 f->frag_expire((unsigned long) fq);
143 static void inet_frag_worker(struct work_struct *work)
145 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
146 unsigned int i, evicted = 0;
147 struct inet_frags *f;
149 f = container_of(work, struct inet_frags, frags_work);
151 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
153 read_lock_bh(&f->lock);
155 for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
156 evicted += inet_evict_bucket(f, &f->hash[i]);
157 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
158 if (evicted > INETFRAGS_EVICT_MAX)
164 read_unlock_bh(&f->lock);
167 static void inet_frag_schedule_worker(struct inet_frags *f)
169 if (unlikely(!work_pending(&f->frags_work)))
170 schedule_work(&f->frags_work);
173 void inet_frags_init(struct inet_frags *f)
177 INIT_WORK(&f->frags_work, inet_frag_worker);
179 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
180 struct inet_frag_bucket *hb = &f->hash[i];
182 spin_lock_init(&hb->chain_lock);
183 INIT_HLIST_HEAD(&hb->chain);
185 rwlock_init(&f->lock);
187 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
189 f->secret_timer.expires = jiffies + f->secret_interval;
190 add_timer(&f->secret_timer);
192 EXPORT_SYMBOL(inet_frags_init);
194 void inet_frags_init_net(struct netns_frags *nf)
196 init_frag_mem_limit(nf);
198 EXPORT_SYMBOL(inet_frags_init_net);
200 void inet_frags_fini(struct inet_frags *f)
202 del_timer(&f->secret_timer);
203 cancel_work_sync(&f->frags_work);
205 EXPORT_SYMBOL(inet_frags_fini);
207 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
213 read_lock_bh(&f->lock);
215 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
216 inet_evict_bucket(f, &f->hash[i]);
218 read_unlock_bh(&f->lock);
220 percpu_counter_destroy(&nf->mem);
222 EXPORT_SYMBOL(inet_frags_exit_net);
224 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
226 struct inet_frag_bucket *hb;
230 hash = inet_frag_hashfn(f, fq);
233 spin_lock(&hb->chain_lock);
234 hlist_del(&fq->list);
235 spin_unlock(&hb->chain_lock);
237 read_unlock(&f->lock);
240 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
242 if (del_timer(&fq->timer))
243 atomic_dec(&fq->refcnt);
245 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
247 atomic_dec(&fq->refcnt);
248 fq->last_in |= INET_FRAG_COMPLETE;
251 EXPORT_SYMBOL(inet_frag_kill);
253 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
261 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
264 struct netns_frags *nf;
265 unsigned int sum, sum_truesize = 0;
267 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
268 WARN_ON(del_timer(&q->timer) != 0);
270 /* Release all fragment data. */
274 struct sk_buff *xp = fp->next;
276 sum_truesize += fp->truesize;
277 frag_kfree_skb(nf, f, fp);
280 sum = sum_truesize + f->qsize;
281 sub_frag_mem_limit(q, sum);
287 EXPORT_SYMBOL(inet_frag_destroy);
289 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
290 struct inet_frag_queue *qp_in, struct inet_frags *f,
293 struct inet_frag_bucket *hb;
294 struct inet_frag_queue *qp;
297 read_lock(&f->lock); /* Protects against hash rebuild */
299 * While we stayed w/o the lock other CPU could update
300 * the rnd seed, so we need to re-calculate the hash
301 * chain. Fortunatelly the qp_in can be used to get one.
303 hash = inet_frag_hashfn(f, qp_in);
305 spin_lock(&hb->chain_lock);
308 /* With SMP race we have to recheck hash table, because
309 * such entry could be created on other cpu, while we
310 * released the hash bucket lock.
312 hlist_for_each_entry(qp, &hb->chain, list) {
313 if (qp->net == nf && f->match(qp, arg)) {
314 atomic_inc(&qp->refcnt);
315 spin_unlock(&hb->chain_lock);
316 read_unlock(&f->lock);
317 qp_in->last_in |= INET_FRAG_COMPLETE;
318 inet_frag_put(qp_in, f);
324 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
325 atomic_inc(&qp->refcnt);
327 atomic_inc(&qp->refcnt);
328 hlist_add_head(&qp->list, &hb->chain);
330 spin_unlock(&hb->chain_lock);
331 read_unlock(&f->lock);
336 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
337 struct inet_frags *f, void *arg)
339 struct inet_frag_queue *q;
341 if (frag_mem_limit(nf) > nf->high_thresh) {
342 inet_frag_schedule_worker(f);
346 q = kzalloc(f->qsize, GFP_ATOMIC);
351 f->constructor(q, arg);
352 add_frag_mem_limit(q, f->qsize);
354 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
355 spin_lock_init(&q->lock);
356 atomic_set(&q->refcnt, 1);
361 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
362 struct inet_frags *f, void *arg)
364 struct inet_frag_queue *q;
366 q = inet_frag_alloc(nf, f, arg);
370 return inet_frag_intern(nf, q, f, arg);
373 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
374 struct inet_frags *f, void *key, unsigned int hash)
377 struct inet_frag_bucket *hb;
378 struct inet_frag_queue *q;
381 if (frag_mem_limit(nf) > nf->low_thresh)
382 inet_frag_schedule_worker(f);
384 hash &= (INETFRAGS_HASHSZ - 1);
387 spin_lock(&hb->chain_lock);
388 hlist_for_each_entry(q, &hb->chain, list) {
389 if (q->net == nf && f->match(q, key)) {
390 atomic_inc(&q->refcnt);
391 spin_unlock(&hb->chain_lock);
392 read_unlock(&f->lock);
397 spin_unlock(&hb->chain_lock);
398 read_unlock(&f->lock);
400 if (depth <= INETFRAGS_MAXDEPTH)
401 return inet_frag_create(nf, f, key);
403 return ERR_PTR(-ENOBUFS);
405 EXPORT_SYMBOL(inet_frag_find);
407 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
410 static const char msg[] = "inet_frag_find: Fragment hash bucket"
411 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
412 ". Dropping fragment.\n";
414 if (PTR_ERR(q) == -ENOBUFS)
415 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
417 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);