2 * INETPEER - A storage for permanent information about peers
4 * This source is covered by the GNU GPL, the same as all kernel sources.
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
19 #include <linux/net.h>
20 #include <linux/workqueue.h>
22 #include <net/inetpeer.h>
23 #include <net/secure_seq.h>
26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes.
30 * Nodes are removed only when reference counter goes to 0.
31 * When it's happened the node may be removed when a sufficient amount of
32 * time has been passed since its last use. The less-recently-used entry can
33 * also be removed if the pool is overloaded i.e. if the total amount of
34 * entries is greater-or-equal than the threshold.
36 * Node pool is organised as an AVL tree.
37 * Such an implementation has been chosen not just for fun. It's a way to
38 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
39 * amount of long living nodes in a single hash slot would significantly delay
40 * lookups performed with disabled BHs.
42 * Serialisation issues.
43 * 1. Nodes may appear in the tree only with the pool lock held.
44 * 2. Nodes may disappear from the tree only with the pool lock held
45 * AND reference count being 0.
46 * 3. Global variable peer_total is modified under the pool lock.
47 * 4. struct inet_peer fields modification:
48 * avl_left, avl_right, avl_parent, avl_height: pool lock
49 * refcnt: atomically against modifications on other CPU;
50 * usually under some other lock to prevent node disappearing
54 static struct kmem_cache *peer_cachep __read_mostly;
56 static LIST_HEAD(gc_list);
57 static const int gc_delay = 60 * HZ;
58 static struct delayed_work gc_work;
59 static DEFINE_SPINLOCK(gc_lock);
61 #define node_height(x) x->avl_height
63 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
64 #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
65 static const struct inet_peer peer_fake_node = {
66 .avl_left = peer_avl_empty_rcu,
67 .avl_right = peer_avl_empty_rcu,
71 struct inet_peer_base {
72 struct inet_peer __rcu *root;
77 static struct inet_peer_base v4_peers = {
78 .root = peer_avl_empty_rcu,
79 .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
83 static struct inet_peer_base v6_peers = {
84 .root = peer_avl_empty_rcu,
85 .lock = __SEQLOCK_UNLOCKED(v6_peers.lock),
89 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
91 /* Exported for sysctl_net_ipv4. */
92 int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
93 * aggressively at this stage */
94 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
95 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
97 static void inetpeer_gc_worker(struct work_struct *work)
99 struct inet_peer *p, *n;
102 spin_lock_bh(&gc_lock);
103 list_replace_init(&gc_list, &list);
104 spin_unlock_bh(&gc_lock);
106 if (list_empty(&list))
109 list_for_each_entry_safe(p, n, &list, gc_list) {
114 if (p->avl_left != peer_avl_empty) {
115 list_add_tail(&p->avl_left->gc_list, &list);
116 p->avl_left = peer_avl_empty;
119 if (p->avl_right != peer_avl_empty) {
120 list_add_tail(&p->avl_right->gc_list, &list);
121 p->avl_right = peer_avl_empty;
124 n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
126 if (!atomic_read(&p->refcnt)) {
127 list_del(&p->gc_list);
128 kmem_cache_free(peer_cachep, p);
132 if (list_empty(&list))
135 spin_lock_bh(&gc_lock);
136 list_splice(&list, &gc_list);
137 spin_unlock_bh(&gc_lock);
139 schedule_delayed_work(&gc_work, gc_delay);
142 /* Called from ip_output.c:ip_init */
143 void __init inet_initpeers(void)
147 /* Use the straight interface to information about memory. */
149 /* The values below were suggested by Alexey Kuznetsov
150 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
153 if (si.totalram <= (32768*1024)/PAGE_SIZE)
154 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
155 if (si.totalram <= (16384*1024)/PAGE_SIZE)
156 inet_peer_threshold >>= 1; /* about 512KB */
157 if (si.totalram <= (8192*1024)/PAGE_SIZE)
158 inet_peer_threshold >>= 2; /* about 128KB */
160 peer_cachep = kmem_cache_create("inet_peer_cache",
161 sizeof(struct inet_peer),
162 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
165 INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
168 static int addr_compare(const struct inetpeer_addr *a,
169 const struct inetpeer_addr *b)
171 int i, n = (a->family == AF_INET ? 1 : 4);
173 for (i = 0; i < n; i++) {
174 if (a->addr.a6[i] == b->addr.a6[i])
176 if (a->addr.a6[i] < b->addr.a6[i])
184 #define rcu_deref_locked(X, BASE) \
185 rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
188 * Called with local BH disabled and the pool lock held.
190 #define lookup(_daddr, _stack, _base) \
192 struct inet_peer *u; \
193 struct inet_peer __rcu **v; \
196 *stackptr++ = &_base->root; \
197 for (u = rcu_deref_locked(_base->root, _base); \
198 u != peer_avl_empty; ) { \
199 int cmp = addr_compare(_daddr, &u->daddr); \
207 u = rcu_deref_locked(*v, _base); \
213 * Called with rcu_read_lock()
214 * Because we hold no lock against a writer, its quite possible we fall
215 * in an endless loop.
216 * But every pointer we follow is guaranteed to be valid thanks to RCU.
217 * We exit from this function if number of links exceeds PEER_MAXDEPTH
219 static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
220 struct inet_peer_base *base)
222 struct inet_peer *u = rcu_dereference(base->root);
225 while (u != peer_avl_empty) {
226 int cmp = addr_compare(daddr, &u->daddr);
228 /* Before taking a reference, check if this entry was
229 * deleted (refcnt=-1)
231 if (!atomic_add_unless(&u->refcnt, 1, -1))
236 u = rcu_dereference(u->avl_left);
238 u = rcu_dereference(u->avl_right);
239 if (unlikely(++count == PEER_MAXDEPTH))
245 /* Called with local BH disabled and the pool lock held. */
246 #define lookup_rightempty(start, base) \
248 struct inet_peer *u; \
249 struct inet_peer __rcu **v; \
250 *stackptr++ = &start->avl_left; \
251 v = &start->avl_left; \
252 for (u = rcu_deref_locked(*v, base); \
253 u->avl_right != peer_avl_empty_rcu; ) { \
256 u = rcu_deref_locked(*v, base); \
261 /* Called with local BH disabled and the pool lock held.
262 * Variable names are the proof of operation correctness.
263 * Look into mm/map_avl.c for more detail description of the ideas.
265 static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
266 struct inet_peer __rcu ***stackend,
267 struct inet_peer_base *base)
269 struct inet_peer __rcu **nodep;
270 struct inet_peer *node, *l, *r;
273 while (stackend > stack) {
275 node = rcu_deref_locked(*nodep, base);
276 l = rcu_deref_locked(node->avl_left, base);
277 r = rcu_deref_locked(node->avl_right, base);
280 if (lh > rh + 1) { /* l: RH+2 */
281 struct inet_peer *ll, *lr, *lrl, *lrr;
283 ll = rcu_deref_locked(l->avl_left, base);
284 lr = rcu_deref_locked(l->avl_right, base);
285 lrh = node_height(lr);
286 if (lrh <= node_height(ll)) { /* ll: RH+1 */
287 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
288 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
289 node->avl_height = lrh + 1; /* RH+1 or RH+2 */
290 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
291 RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
292 l->avl_height = node->avl_height + 1;
293 RCU_INIT_POINTER(*nodep, l);
294 } else { /* ll: RH, lr: RH+1 */
295 lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
296 lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
297 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
298 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
299 node->avl_height = rh + 1; /* node: RH+1 */
300 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
301 RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
302 l->avl_height = rh + 1; /* l: RH+1 */
303 RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
304 RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
305 lr->avl_height = rh + 2;
306 RCU_INIT_POINTER(*nodep, lr);
308 } else if (rh > lh + 1) { /* r: LH+2 */
309 struct inet_peer *rr, *rl, *rlr, *rll;
311 rr = rcu_deref_locked(r->avl_right, base);
312 rl = rcu_deref_locked(r->avl_left, base);
313 rlh = node_height(rl);
314 if (rlh <= node_height(rr)) { /* rr: LH+1 */
315 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
316 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
317 node->avl_height = rlh + 1; /* LH+1 or LH+2 */
318 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
319 RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
320 r->avl_height = node->avl_height + 1;
321 RCU_INIT_POINTER(*nodep, r);
322 } else { /* rr: RH, rl: RH+1 */
323 rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
324 rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
325 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
326 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
327 node->avl_height = lh + 1; /* node: LH+1 */
328 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
329 RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
330 r->avl_height = lh + 1; /* r: LH+1 */
331 RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
332 RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
333 rl->avl_height = lh + 2;
334 RCU_INIT_POINTER(*nodep, rl);
337 node->avl_height = (lh > rh ? lh : rh) + 1;
342 /* Called with local BH disabled and the pool lock held. */
343 #define link_to_pool(n, base) \
346 n->avl_left = peer_avl_empty_rcu; \
347 n->avl_right = peer_avl_empty_rcu; \
348 /* lockless readers can catch us now */ \
349 rcu_assign_pointer(**--stackptr, n); \
350 peer_avl_rebalance(stack, stackptr, base); \
353 static void inetpeer_free_rcu(struct rcu_head *head)
355 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
358 static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
359 struct inet_peer __rcu **stack[PEER_MAXDEPTH])
361 struct inet_peer __rcu ***stackptr, ***delp;
363 if (lookup(&p->daddr, stack, base) != p)
365 delp = stackptr - 1; /* *delp[0] == p */
366 if (p->avl_left == peer_avl_empty_rcu) {
367 *delp[0] = p->avl_right;
370 /* look for a node to insert instead of p */
372 t = lookup_rightempty(p, base);
373 BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
374 **--stackptr = t->avl_left;
375 /* t is removed, t->daddr > x->daddr for any
376 * x in p->avl_left subtree.
377 * Put t in the old place of p. */
378 RCU_INIT_POINTER(*delp[0], t);
379 t->avl_left = p->avl_left;
380 t->avl_right = p->avl_right;
381 t->avl_height = p->avl_height;
382 BUG_ON(delp[1] != &p->avl_left);
383 delp[1] = &t->avl_left; /* was &p->avl_left */
385 peer_avl_rebalance(stack, stackptr, base);
387 call_rcu(&p->rcu, inetpeer_free_rcu);
390 static struct inet_peer_base *family_to_base(int family)
392 return family == AF_INET ? &v4_peers : &v6_peers;
395 /* perform garbage collect on all items stacked during a lookup */
396 static int inet_peer_gc(struct inet_peer_base *base,
397 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
398 struct inet_peer __rcu ***stackptr)
400 struct inet_peer *p, *gchead = NULL;
404 if (base->total >= inet_peer_threshold)
405 ttl = 0; /* be aggressive */
407 ttl = inet_peer_maxttl
408 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
409 base->total / inet_peer_threshold * HZ;
410 stackptr--; /* last stack slot is peer_avl_empty */
411 while (stackptr > stack) {
413 p = rcu_deref_locked(**stackptr, base);
414 if (atomic_read(&p->refcnt) == 0) {
416 delta = (__u32)jiffies - p->dtime;
418 atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
424 while ((p = gchead) != NULL) {
427 unlink_from_pool(p, base, stack);
432 struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
434 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
435 struct inet_peer_base *base = family_to_base(daddr->family);
437 unsigned int sequence;
438 int invalidated, gccnt = 0;
440 /* Attempt a lockless lookup first.
441 * Because of a concurrent writer, we might not find an existing entry.
444 sequence = read_seqbegin(&base->lock);
445 p = lookup_rcu(daddr, base);
446 invalidated = read_seqretry(&base->lock, sequence);
452 /* If no writer did a change during our lookup, we can return early. */
453 if (!create && !invalidated)
456 /* retry an exact lookup, taking the lock before.
457 * At least, nodes should be hot in our cache.
459 write_seqlock_bh(&base->lock);
461 p = lookup(daddr, stack, base);
462 if (p != peer_avl_empty) {
463 atomic_inc(&p->refcnt);
464 write_sequnlock_bh(&base->lock);
468 gccnt = inet_peer_gc(base, stack, stackptr);
472 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
475 atomic_set(&p->refcnt, 1);
476 atomic_set(&p->rid, 0);
478 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
483 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
484 INIT_LIST_HEAD(&p->gc_list);
487 link_to_pool(p, base);
490 write_sequnlock_bh(&base->lock);
494 EXPORT_SYMBOL_GPL(inet_getpeer);
496 void inet_putpeer(struct inet_peer *p)
498 p->dtime = (__u32)jiffies;
499 smp_mb__before_atomic_dec();
500 atomic_dec(&p->refcnt);
502 EXPORT_SYMBOL_GPL(inet_putpeer);
505 * Check transmit rate limitation for given message.
506 * The rate information is held in the inet_peer entries now.
507 * This function is generic and could be used for other purposes
508 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
510 * Note that the same inet_peer fields are modified by functions in
511 * route.c too, but these work for packet destinations while xrlim_allow
512 * works for icmp destinations. This means the rate limiting information
513 * for one "ip object" is shared - and these ICMPs are twice limited:
514 * by source and by destination.
516 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
517 * SHOULD allow setting of rate limits
519 * Shared between ICMPv4 and ICMPv6.
521 #define XRLIM_BURST_FACTOR 6
522 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
524 unsigned long now, token;
530 token = peer->rate_tokens;
532 token += now - peer->rate_last;
533 peer->rate_last = now;
534 if (token > XRLIM_BURST_FACTOR * timeout)
535 token = XRLIM_BURST_FACTOR * timeout;
536 if (token >= timeout) {
540 peer->rate_tokens = token;
543 EXPORT_SYMBOL(inet_peer_xrlim_allow);
545 static void inetpeer_inval_rcu(struct rcu_head *head)
547 struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
549 spin_lock_bh(&gc_lock);
550 list_add_tail(&p->gc_list, &gc_list);
551 spin_unlock_bh(&gc_lock);
553 schedule_delayed_work(&gc_work, gc_delay);
556 void inetpeer_invalidate_tree(int family)
558 struct inet_peer *old, *new, *prev;
559 struct inet_peer_base *base = family_to_base(family);
561 write_seqlock_bh(&base->lock);
564 if (old == peer_avl_empty_rcu)
567 new = peer_avl_empty_rcu;
569 prev = cmpxchg(&base->root, old, new);
572 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
576 write_sequnlock_bh(&base->lock);
578 EXPORT_SYMBOL(inetpeer_invalidate_tree);