Merge branch 'master' of ssh://infradead/~/public_git/wireless into for-davem
[pandora-kernel.git] / net / ipv4 / inetpeer.c
1 /*
2  *              INETPEER - A storage for permanent information about peers
3  *
4  *  This source is covered by the GNU GPL, the same as all kernel sources.
5  *
6  *  Authors:    Andrey V. Savochkin <saw@msu.ru>
7  */
8
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <net/ip.h>
21 #include <net/inetpeer.h>
22 #include <net/secure_seq.h>
23
24 /*
25  *  Theory of operations.
26  *  We keep one entry for each peer IP address.  The nodes contains long-living
27  *  information about the peer which doesn't depend on routes.
28  *  At this moment this information consists only of ID field for the next
29  *  outgoing IP packet.  This field is incremented with each packet as encoded
30  *  in inet_getid() function (include/net/inetpeer.h).
31  *  At the moment of writing this notes identifier of IP packets is generated
32  *  to be unpredictable using this code only for packets subjected
33  *  (actually or potentially) to defragmentation.  I.e. DF packets less than
34  *  PMTU in size uses a constant ID and do not use this code (see
35  *  ip_select_ident() in include/net/ip.h).
36  *
37  *  Route cache entries hold references to our nodes.
38  *  New cache entries get references via lookup by destination IP address in
39  *  the avl tree.  The reference is grabbed only when it's needed i.e. only
40  *  when we try to output IP packet which needs an unpredictable ID (see
41  *  __ip_select_ident() in net/ipv4/route.c).
42  *  Nodes are removed only when reference counter goes to 0.
43  *  When it's happened the node may be removed when a sufficient amount of
44  *  time has been passed since its last use.  The less-recently-used entry can
45  *  also be removed if the pool is overloaded i.e. if the total amount of
46  *  entries is greater-or-equal than the threshold.
47  *
48  *  Node pool is organised as an AVL tree.
49  *  Such an implementation has been chosen not just for fun.  It's a way to
50  *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
51  *  amount of long living nodes in a single hash slot would significantly delay
52  *  lookups performed with disabled BHs.
53  *
54  *  Serialisation issues.
55  *  1.  Nodes may appear in the tree only with the pool lock held.
56  *  2.  Nodes may disappear from the tree only with the pool lock held
57  *      AND reference count being 0.
58  *  3.  Global variable peer_total is modified under the pool lock.
59  *  4.  struct inet_peer fields modification:
60  *              avl_left, avl_right, avl_parent, avl_height: pool lock
61  *              refcnt: atomically against modifications on other CPU;
62  *                 usually under some other lock to prevent node disappearing
63  *              daddr: unchangeable
64  *              ip_id_count: atomic value (no lock needed)
65  */
66
67 static struct kmem_cache *peer_cachep __read_mostly;
68
69 #define node_height(x) x->avl_height
70
71 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
72 #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
73 static const struct inet_peer peer_fake_node = {
74         .avl_left       = peer_avl_empty_rcu,
75         .avl_right      = peer_avl_empty_rcu,
76         .avl_height     = 0
77 };
78
79 struct inet_peer_base {
80         struct inet_peer __rcu *root;
81         seqlock_t       lock;
82         int             total;
83 };
84
85 static struct inet_peer_base v4_peers = {
86         .root           = peer_avl_empty_rcu,
87         .lock           = __SEQLOCK_UNLOCKED(v4_peers.lock),
88         .total          = 0,
89 };
90
91 static struct inet_peer_base v6_peers = {
92         .root           = peer_avl_empty_rcu,
93         .lock           = __SEQLOCK_UNLOCKED(v6_peers.lock),
94         .total          = 0,
95 };
96
97 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
98
99 /* Exported for sysctl_net_ipv4.  */
100 int inet_peer_threshold __read_mostly = 65536 + 128;    /* start to throw entries more
101                                          * aggressively at this stage */
102 int inet_peer_minttl __read_mostly = 120 * HZ;  /* TTL under high load: 120 sec */
103 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;      /* usual time to live: 10 min */
104
105
106 /* Called from ip_output.c:ip_init  */
107 void __init inet_initpeers(void)
108 {
109         struct sysinfo si;
110
111         /* Use the straight interface to information about memory. */
112         si_meminfo(&si);
113         /* The values below were suggested by Alexey Kuznetsov
114          * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
115          * myself.  --SAW
116          */
117         if (si.totalram <= (32768*1024)/PAGE_SIZE)
118                 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
119         if (si.totalram <= (16384*1024)/PAGE_SIZE)
120                 inet_peer_threshold >>= 1; /* about 512KB */
121         if (si.totalram <= (8192*1024)/PAGE_SIZE)
122                 inet_peer_threshold >>= 2; /* about 128KB */
123
124         peer_cachep = kmem_cache_create("inet_peer_cache",
125                         sizeof(struct inet_peer),
126                         0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
127                         NULL);
128
129 }
130
131 static int addr_compare(const struct inetpeer_addr *a,
132                         const struct inetpeer_addr *b)
133 {
134         int i, n = (a->family == AF_INET ? 1 : 4);
135
136         for (i = 0; i < n; i++) {
137                 if (a->addr.a6[i] == b->addr.a6[i])
138                         continue;
139                 if (a->addr.a6[i] < b->addr.a6[i])
140                         return -1;
141                 return 1;
142         }
143
144         return 0;
145 }
146
147 #define rcu_deref_locked(X, BASE)                               \
148         rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
149
150 /*
151  * Called with local BH disabled and the pool lock held.
152  */
153 #define lookup(_daddr, _stack, _base)                           \
154 ({                                                              \
155         struct inet_peer *u;                                    \
156         struct inet_peer __rcu **v;                             \
157                                                                 \
158         stackptr = _stack;                                      \
159         *stackptr++ = &_base->root;                             \
160         for (u = rcu_deref_locked(_base->root, _base);          \
161              u != peer_avl_empty; ) {                           \
162                 int cmp = addr_compare(_daddr, &u->daddr);      \
163                 if (cmp == 0)                                   \
164                         break;                                  \
165                 if (cmp == -1)                                  \
166                         v = &u->avl_left;                       \
167                 else                                            \
168                         v = &u->avl_right;                      \
169                 *stackptr++ = v;                                \
170                 u = rcu_deref_locked(*v, _base);                \
171         }                                                       \
172         u;                                                      \
173 })
174
175 /*
176  * Called with rcu_read_lock()
177  * Because we hold no lock against a writer, its quite possible we fall
178  * in an endless loop.
179  * But every pointer we follow is guaranteed to be valid thanks to RCU.
180  * We exit from this function if number of links exceeds PEER_MAXDEPTH
181  */
182 static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
183                                     struct inet_peer_base *base)
184 {
185         struct inet_peer *u = rcu_dereference(base->root);
186         int count = 0;
187
188         while (u != peer_avl_empty) {
189                 int cmp = addr_compare(daddr, &u->daddr);
190                 if (cmp == 0) {
191                         /* Before taking a reference, check if this entry was
192                          * deleted (refcnt=-1)
193                          */
194                         if (!atomic_add_unless(&u->refcnt, 1, -1))
195                                 u = NULL;
196                         return u;
197                 }
198                 if (cmp == -1)
199                         u = rcu_dereference(u->avl_left);
200                 else
201                         u = rcu_dereference(u->avl_right);
202                 if (unlikely(++count == PEER_MAXDEPTH))
203                         break;
204         }
205         return NULL;
206 }
207
208 /* Called with local BH disabled and the pool lock held. */
209 #define lookup_rightempty(start, base)                          \
210 ({                                                              \
211         struct inet_peer *u;                                    \
212         struct inet_peer __rcu **v;                             \
213         *stackptr++ = &start->avl_left;                         \
214         v = &start->avl_left;                                   \
215         for (u = rcu_deref_locked(*v, base);                    \
216              u->avl_right != peer_avl_empty_rcu; ) {            \
217                 v = &u->avl_right;                              \
218                 *stackptr++ = v;                                \
219                 u = rcu_deref_locked(*v, base);                 \
220         }                                                       \
221         u;                                                      \
222 })
223
224 /* Called with local BH disabled and the pool lock held.
225  * Variable names are the proof of operation correctness.
226  * Look into mm/map_avl.c for more detail description of the ideas.
227  */
228 static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
229                                struct inet_peer __rcu ***stackend,
230                                struct inet_peer_base *base)
231 {
232         struct inet_peer __rcu **nodep;
233         struct inet_peer *node, *l, *r;
234         int lh, rh;
235
236         while (stackend > stack) {
237                 nodep = *--stackend;
238                 node = rcu_deref_locked(*nodep, base);
239                 l = rcu_deref_locked(node->avl_left, base);
240                 r = rcu_deref_locked(node->avl_right, base);
241                 lh = node_height(l);
242                 rh = node_height(r);
243                 if (lh > rh + 1) { /* l: RH+2 */
244                         struct inet_peer *ll, *lr, *lrl, *lrr;
245                         int lrh;
246                         ll = rcu_deref_locked(l->avl_left, base);
247                         lr = rcu_deref_locked(l->avl_right, base);
248                         lrh = node_height(lr);
249                         if (lrh <= node_height(ll)) {   /* ll: RH+1 */
250                                 RCU_INIT_POINTER(node->avl_left, lr);   /* lr: RH or RH+1 */
251                                 RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
252                                 node->avl_height = lrh + 1; /* RH+1 or RH+2 */
253                                 RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
254                                 RCU_INIT_POINTER(l->avl_right, node);   /* node: RH+1 or RH+2 */
255                                 l->avl_height = node->avl_height + 1;
256                                 RCU_INIT_POINTER(*nodep, l);
257                         } else { /* ll: RH, lr: RH+1 */
258                                 lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
259                                 lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
260                                 RCU_INIT_POINTER(node->avl_left, lrr);  /* lrr: RH or RH-1 */
261                                 RCU_INIT_POINTER(node->avl_right, r);   /* r: RH */
262                                 node->avl_height = rh + 1; /* node: RH+1 */
263                                 RCU_INIT_POINTER(l->avl_left, ll);      /* ll: RH */
264                                 RCU_INIT_POINTER(l->avl_right, lrl);    /* lrl: RH or RH-1 */
265                                 l->avl_height = rh + 1; /* l: RH+1 */
266                                 RCU_INIT_POINTER(lr->avl_left, l);      /* l: RH+1 */
267                                 RCU_INIT_POINTER(lr->avl_right, node);  /* node: RH+1 */
268                                 lr->avl_height = rh + 2;
269                                 RCU_INIT_POINTER(*nodep, lr);
270                         }
271                 } else if (rh > lh + 1) { /* r: LH+2 */
272                         struct inet_peer *rr, *rl, *rlr, *rll;
273                         int rlh;
274                         rr = rcu_deref_locked(r->avl_right, base);
275                         rl = rcu_deref_locked(r->avl_left, base);
276                         rlh = node_height(rl);
277                         if (rlh <= node_height(rr)) {   /* rr: LH+1 */
278                                 RCU_INIT_POINTER(node->avl_right, rl);  /* rl: LH or LH+1 */
279                                 RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
280                                 node->avl_height = rlh + 1; /* LH+1 or LH+2 */
281                                 RCU_INIT_POINTER(r->avl_right, rr);     /* rr: LH+1 */
282                                 RCU_INIT_POINTER(r->avl_left, node);    /* node: LH+1 or LH+2 */
283                                 r->avl_height = node->avl_height + 1;
284                                 RCU_INIT_POINTER(*nodep, r);
285                         } else { /* rr: RH, rl: RH+1 */
286                                 rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
287                                 rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
288                                 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
289                                 RCU_INIT_POINTER(node->avl_left, l);    /* l: LH */
290                                 node->avl_height = lh + 1; /* node: LH+1 */
291                                 RCU_INIT_POINTER(r->avl_right, rr);     /* rr: LH */
292                                 RCU_INIT_POINTER(r->avl_left, rlr);     /* rlr: LH or LH-1 */
293                                 r->avl_height = lh + 1; /* r: LH+1 */
294                                 RCU_INIT_POINTER(rl->avl_right, r);     /* r: LH+1 */
295                                 RCU_INIT_POINTER(rl->avl_left, node);   /* node: LH+1 */
296                                 rl->avl_height = lh + 2;
297                                 RCU_INIT_POINTER(*nodep, rl);
298                         }
299                 } else {
300                         node->avl_height = (lh > rh ? lh : rh) + 1;
301                 }
302         }
303 }
304
305 /* Called with local BH disabled and the pool lock held. */
306 #define link_to_pool(n, base)                                   \
307 do {                                                            \
308         n->avl_height = 1;                                      \
309         n->avl_left = peer_avl_empty_rcu;                       \
310         n->avl_right = peer_avl_empty_rcu;                      \
311         /* lockless readers can catch us now */                 \
312         rcu_assign_pointer(**--stackptr, n);                    \
313         peer_avl_rebalance(stack, stackptr, base);              \
314 } while (0)
315
316 static void inetpeer_free_rcu(struct rcu_head *head)
317 {
318         kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
319 }
320
321 static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
322                              struct inet_peer __rcu **stack[PEER_MAXDEPTH])
323 {
324         struct inet_peer __rcu ***stackptr, ***delp;
325
326         if (lookup(&p->daddr, stack, base) != p)
327                 BUG();
328         delp = stackptr - 1; /* *delp[0] == p */
329         if (p->avl_left == peer_avl_empty_rcu) {
330                 *delp[0] = p->avl_right;
331                 --stackptr;
332         } else {
333                 /* look for a node to insert instead of p */
334                 struct inet_peer *t;
335                 t = lookup_rightempty(p, base);
336                 BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
337                 **--stackptr = t->avl_left;
338                 /* t is removed, t->daddr > x->daddr for any
339                  * x in p->avl_left subtree.
340                  * Put t in the old place of p. */
341                 RCU_INIT_POINTER(*delp[0], t);
342                 t->avl_left = p->avl_left;
343                 t->avl_right = p->avl_right;
344                 t->avl_height = p->avl_height;
345                 BUG_ON(delp[1] != &p->avl_left);
346                 delp[1] = &t->avl_left; /* was &p->avl_left */
347         }
348         peer_avl_rebalance(stack, stackptr, base);
349         base->total--;
350         call_rcu(&p->rcu, inetpeer_free_rcu);
351 }
352
353 static struct inet_peer_base *family_to_base(int family)
354 {
355         return family == AF_INET ? &v4_peers : &v6_peers;
356 }
357
358 /* perform garbage collect on all items stacked during a lookup */
359 static int inet_peer_gc(struct inet_peer_base *base,
360                         struct inet_peer __rcu **stack[PEER_MAXDEPTH],
361                         struct inet_peer __rcu ***stackptr)
362 {
363         struct inet_peer *p, *gchead = NULL;
364         __u32 delta, ttl;
365         int cnt = 0;
366
367         if (base->total >= inet_peer_threshold)
368                 ttl = 0; /* be aggressive */
369         else
370                 ttl = inet_peer_maxttl
371                                 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
372                                         base->total / inet_peer_threshold * HZ;
373         stackptr--; /* last stack slot is peer_avl_empty */
374         while (stackptr > stack) {
375                 stackptr--;
376                 p = rcu_deref_locked(**stackptr, base);
377                 if (atomic_read(&p->refcnt) == 0) {
378                         smp_rmb();
379                         delta = (__u32)jiffies - p->dtime;
380                         if (delta >= ttl &&
381                             atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
382                                 p->gc_next = gchead;
383                                 gchead = p;
384                         }
385                 }
386         }
387         while ((p = gchead) != NULL) {
388                 gchead = p->gc_next;
389                 cnt++;
390                 unlink_from_pool(p, base, stack);
391         }
392         return cnt;
393 }
394
395 struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
396 {
397         struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
398         struct inet_peer_base *base = family_to_base(daddr->family);
399         struct inet_peer *p;
400         unsigned int sequence;
401         int invalidated, gccnt = 0;
402
403         /* Attempt a lockless lookup first.
404          * Because of a concurrent writer, we might not find an existing entry.
405          */
406         rcu_read_lock();
407         sequence = read_seqbegin(&base->lock);
408         p = lookup_rcu(daddr, base);
409         invalidated = read_seqretry(&base->lock, sequence);
410         rcu_read_unlock();
411
412         if (p)
413                 return p;
414
415         /* If no writer did a change during our lookup, we can return early. */
416         if (!create && !invalidated)
417                 return NULL;
418
419         /* retry an exact lookup, taking the lock before.
420          * At least, nodes should be hot in our cache.
421          */
422         write_seqlock_bh(&base->lock);
423 relookup:
424         p = lookup(daddr, stack, base);
425         if (p != peer_avl_empty) {
426                 atomic_inc(&p->refcnt);
427                 write_sequnlock_bh(&base->lock);
428                 return p;
429         }
430         if (!gccnt) {
431                 gccnt = inet_peer_gc(base, stack, stackptr);
432                 if (gccnt && create)
433                         goto relookup;
434         }
435         p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
436         if (p) {
437                 p->daddr = *daddr;
438                 atomic_set(&p->refcnt, 1);
439                 atomic_set(&p->rid, 0);
440                 atomic_set(&p->ip_id_count,
441                                 (daddr->family == AF_INET) ?
442                                         secure_ip_id(daddr->addr.a4) :
443                                         secure_ipv6_id(daddr->addr.a6));
444                 p->tcp_ts_stamp = 0;
445                 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
446                 p->rate_tokens = 0;
447                 p->rate_last = 0;
448                 p->pmtu_expires = 0;
449                 p->pmtu_orig = 0;
450                 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
451
452
453                 /* Link the node. */
454                 link_to_pool(p, base);
455                 base->total++;
456         }
457         write_sequnlock_bh(&base->lock);
458
459         return p;
460 }
461 EXPORT_SYMBOL_GPL(inet_getpeer);
462
463 void inet_putpeer(struct inet_peer *p)
464 {
465         p->dtime = (__u32)jiffies;
466         smp_mb__before_atomic_dec();
467         atomic_dec(&p->refcnt);
468 }
469 EXPORT_SYMBOL_GPL(inet_putpeer);
470
471 /*
472  *      Check transmit rate limitation for given message.
473  *      The rate information is held in the inet_peer entries now.
474  *      This function is generic and could be used for other purposes
475  *      too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
476  *
477  *      Note that the same inet_peer fields are modified by functions in
478  *      route.c too, but these work for packet destinations while xrlim_allow
479  *      works for icmp destinations. This means the rate limiting information
480  *      for one "ip object" is shared - and these ICMPs are twice limited:
481  *      by source and by destination.
482  *
483  *      RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
484  *                        SHOULD allow setting of rate limits
485  *
486  *      Shared between ICMPv4 and ICMPv6.
487  */
488 #define XRLIM_BURST_FACTOR 6
489 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
490 {
491         unsigned long now, token;
492         bool rc = false;
493
494         if (!peer)
495                 return true;
496
497         token = peer->rate_tokens;
498         now = jiffies;
499         token += now - peer->rate_last;
500         peer->rate_last = now;
501         if (token > XRLIM_BURST_FACTOR * timeout)
502                 token = XRLIM_BURST_FACTOR * timeout;
503         if (token >= timeout) {
504                 token -= timeout;
505                 rc = true;
506         }
507         peer->rate_tokens = token;
508         return rc;
509 }
510 EXPORT_SYMBOL(inet_peer_xrlim_allow);