Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[pandora-kernel.git] / net / ipv4 / ipvs / ip_vs_lblcr.c
1 /*
2  * IPVS:        Locality-Based Least-Connection with Replication scheduler
3  *
4  * Authors:     Wensong Zhang <wensong@gnuchina.org>
5  *
6  *              This program is free software; you can redistribute it and/or
7  *              modify it under the terms of the GNU General Public License
8  *              as published by the Free Software Foundation; either version
9  *              2 of the License, or (at your option) any later version.
10  *
11  * Changes:
12  *     Julian Anastasov        :    Added the missing (dest->weight>0)
13  *                                  condition in the ip_vs_dest_set_max.
14  *
15  */
16
17 /*
18  * The lblc/r algorithm is as follows (pseudo code):
19  *
20  *       if serverSet[dest_ip] is null then
21  *               n, serverSet[dest_ip] <- {weighted least-conn node};
22  *       else
23  *               n <- {least-conn (alive) node in serverSet[dest_ip]};
24  *               if (n is null) OR
25  *                  (n.conns>n.weight AND
26  *                   there is a node m with m.conns<m.weight/2) then
27  *                   n <- {weighted least-conn node};
28  *                   add n to serverSet[dest_ip];
29  *               if |serverSet[dest_ip]| > 1 AND
30  *                   now - serverSet[dest_ip].lastMod > T then
31  *                   m <- {most conn node in serverSet[dest_ip]};
32  *                   remove m from serverSet[dest_ip];
33  *       if serverSet[dest_ip] changed then
34  *               serverSet[dest_ip].lastMod <- now;
35  *
36  *       return n;
37  *
38  */
39
40 #include <linux/ip.h>
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/skbuff.h>
44 #include <linux/jiffies.h>
45
46 /* for sysctl */
47 #include <linux/fs.h>
48 #include <linux/sysctl.h>
49 #include <net/net_namespace.h>
50
51 #include <net/ip_vs.h>
52
53
54 /*
55  *    It is for garbage collection of stale IPVS lblcr entries,
56  *    when the table is full.
57  */
58 #define CHECK_EXPIRE_INTERVAL   (60*HZ)
59 #define ENTRY_TIMEOUT           (6*60*HZ)
60
61 /*
62  *    It is for full expiration check.
63  *    When there is no partial expiration check (garbage collection)
64  *    in a half hour, do a full expiration check to collect stale
65  *    entries that haven't been touched for a day.
66  */
67 #define COUNT_FOR_FULL_EXPIRATION   30
68 static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
69
70
71 /*
72  *     for IPVS lblcr entry hash table
73  */
74 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
75 #define CONFIG_IP_VS_LBLCR_TAB_BITS      10
76 #endif
77 #define IP_VS_LBLCR_TAB_BITS     CONFIG_IP_VS_LBLCR_TAB_BITS
78 #define IP_VS_LBLCR_TAB_SIZE     (1 << IP_VS_LBLCR_TAB_BITS)
79 #define IP_VS_LBLCR_TAB_MASK     (IP_VS_LBLCR_TAB_SIZE - 1)
80
81
82 /*
83  *      IPVS destination set structure and operations
84  */
85 struct ip_vs_dest_list {
86         struct ip_vs_dest_list  *next;          /* list link */
87         struct ip_vs_dest       *dest;          /* destination server */
88 };
89
90 struct ip_vs_dest_set {
91         atomic_t                size;           /* set size */
92         unsigned long           lastmod;        /* last modified time */
93         struct ip_vs_dest_list  *list;          /* destination list */
94         rwlock_t                lock;           /* lock for this list */
95 };
96
97
98 static struct ip_vs_dest_list *
99 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
100 {
101         struct ip_vs_dest_list *e;
102
103         for (e=set->list; e!=NULL; e=e->next) {
104                 if (e->dest == dest)
105                         /* already existed */
106                         return NULL;
107         }
108
109         e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC);
110         if (e == NULL) {
111                 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
112                 return NULL;
113         }
114
115         atomic_inc(&dest->refcnt);
116         e->dest = dest;
117
118         /* link it to the list */
119         write_lock(&set->lock);
120         e->next = set->list;
121         set->list = e;
122         atomic_inc(&set->size);
123         write_unlock(&set->lock);
124
125         set->lastmod = jiffies;
126         return e;
127 }
128
129 static void
130 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
131 {
132         struct ip_vs_dest_list *e, **ep;
133
134         write_lock(&set->lock);
135         for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
136                 if (e->dest == dest) {
137                         /* HIT */
138                         *ep = e->next;
139                         atomic_dec(&set->size);
140                         set->lastmod = jiffies;
141                         atomic_dec(&e->dest->refcnt);
142                         kfree(e);
143                         break;
144                 }
145                 ep = &e->next;
146         }
147         write_unlock(&set->lock);
148 }
149
150 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
151 {
152         struct ip_vs_dest_list *e, **ep;
153
154         write_lock(&set->lock);
155         for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
156                 *ep = e->next;
157                 /*
158                  * We don't kfree dest because it is refered either
159                  * by its service or by the trash dest list.
160                  */
161                 atomic_dec(&e->dest->refcnt);
162                 kfree(e);
163         }
164         write_unlock(&set->lock);
165 }
166
167 /* get weighted least-connection node in the destination set */
168 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
169 {
170         register struct ip_vs_dest_list *e;
171         struct ip_vs_dest *dest, *least;
172         int loh, doh;
173
174         if (set == NULL)
175                 return NULL;
176
177         read_lock(&set->lock);
178         /* select the first destination server, whose weight > 0 */
179         for (e=set->list; e!=NULL; e=e->next) {
180                 least = e->dest;
181                 if (least->flags & IP_VS_DEST_F_OVERLOAD)
182                         continue;
183
184                 if ((atomic_read(&least->weight) > 0)
185                     && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
186                         loh = atomic_read(&least->activeconns) * 50
187                                 + atomic_read(&least->inactconns);
188                         goto nextstage;
189                 }
190         }
191         read_unlock(&set->lock);
192         return NULL;
193
194         /* find the destination with the weighted least load */
195   nextstage:
196         for (e=e->next; e!=NULL; e=e->next) {
197                 dest = e->dest;
198                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
199                         continue;
200
201                 doh = atomic_read(&dest->activeconns) * 50
202                         + atomic_read(&dest->inactconns);
203                 if ((loh * atomic_read(&dest->weight) >
204                      doh * atomic_read(&least->weight))
205                     && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
206                         least = dest;
207                         loh = doh;
208                 }
209         }
210         read_unlock(&set->lock);
211
212         IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
213                   "activeconns %d refcnt %d weight %d overhead %d\n",
214                   NIPQUAD(least->addr), ntohs(least->port),
215                   atomic_read(&least->activeconns),
216                   atomic_read(&least->refcnt),
217                   atomic_read(&least->weight), loh);
218         return least;
219 }
220
221
222 /* get weighted most-connection node in the destination set */
223 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
224 {
225         register struct ip_vs_dest_list *e;
226         struct ip_vs_dest *dest, *most;
227         int moh, doh;
228
229         if (set == NULL)
230                 return NULL;
231
232         read_lock(&set->lock);
233         /* select the first destination server, whose weight > 0 */
234         for (e=set->list; e!=NULL; e=e->next) {
235                 most = e->dest;
236                 if (atomic_read(&most->weight) > 0) {
237                         moh = atomic_read(&most->activeconns) * 50
238                                 + atomic_read(&most->inactconns);
239                         goto nextstage;
240                 }
241         }
242         read_unlock(&set->lock);
243         return NULL;
244
245         /* find the destination with the weighted most load */
246   nextstage:
247         for (e=e->next; e!=NULL; e=e->next) {
248                 dest = e->dest;
249                 doh = atomic_read(&dest->activeconns) * 50
250                         + atomic_read(&dest->inactconns);
251                 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
252                 if ((moh * atomic_read(&dest->weight) <
253                      doh * atomic_read(&most->weight))
254                     && (atomic_read(&dest->weight) > 0)) {
255                         most = dest;
256                         moh = doh;
257                 }
258         }
259         read_unlock(&set->lock);
260
261         IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
262                   "activeconns %d refcnt %d weight %d overhead %d\n",
263                   NIPQUAD(most->addr), ntohs(most->port),
264                   atomic_read(&most->activeconns),
265                   atomic_read(&most->refcnt),
266                   atomic_read(&most->weight), moh);
267         return most;
268 }
269
270
271 /*
272  *      IPVS lblcr entry represents an association between destination
273  *      IP address and its destination server set
274  */
275 struct ip_vs_lblcr_entry {
276         struct list_head        list;
277         __be32                   addr;           /* destination IP address */
278         struct ip_vs_dest_set   set;            /* destination server set */
279         unsigned long           lastuse;        /* last used time */
280 };
281
282
283 /*
284  *      IPVS lblcr hash table
285  */
286 struct ip_vs_lblcr_table {
287         rwlock_t                lock;           /* lock for this table */
288         struct list_head        bucket[IP_VS_LBLCR_TAB_SIZE];  /* hash bucket */
289         atomic_t                entries;        /* number of entries */
290         int                     max_size;       /* maximum size of entries */
291         struct timer_list       periodic_timer; /* collect stale entries */
292         int                     rover;          /* rover for expire check */
293         int                     counter;        /* counter for no expire */
294 };
295
296
297 /*
298  *      IPVS LBLCR sysctl table
299  */
300
301 static ctl_table vs_vars_table[] = {
302         {
303                 .procname       = "lblcr_expiration",
304                 .data           = &sysctl_ip_vs_lblcr_expiration,
305                 .maxlen         = sizeof(int),
306                 .mode           = 0644,
307                 .proc_handler   = &proc_dointvec_jiffies,
308         },
309         { .ctl_name = 0 }
310 };
311
312 static struct ctl_table_header * sysctl_header;
313
314 /*
315  *      new/free a ip_vs_lblcr_entry, which is a mapping of a destination
316  *      IP address to a server.
317  */
318 static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
319 {
320         struct ip_vs_lblcr_entry *en;
321
322         en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
323         if (en == NULL) {
324                 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
325                 return NULL;
326         }
327
328         INIT_LIST_HEAD(&en->list);
329         en->addr = daddr;
330
331         /* initilize its dest set */
332         atomic_set(&(en->set.size), 0);
333         en->set.list = NULL;
334         rwlock_init(&en->set.lock);
335
336         return en;
337 }
338
339
340 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
341 {
342         list_del(&en->list);
343         ip_vs_dest_set_eraseall(&en->set);
344         kfree(en);
345 }
346
347
348 /*
349  *      Returns hash value for IPVS LBLCR entry
350  */
351 static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
352 {
353         return (ntohl(addr)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
354 }
355
356
357 /*
358  *      Hash an entry in the ip_vs_lblcr_table.
359  *      returns bool success.
360  */
361 static int
362 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
363 {
364         unsigned hash;
365
366         if (!list_empty(&en->list)) {
367                 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
368                           "called from %p\n", __builtin_return_address(0));
369                 return 0;
370         }
371
372         /*
373          *      Hash by destination IP address
374          */
375         hash = ip_vs_lblcr_hashkey(en->addr);
376
377         write_lock(&tbl->lock);
378         list_add(&en->list, &tbl->bucket[hash]);
379         atomic_inc(&tbl->entries);
380         write_unlock(&tbl->lock);
381
382         return 1;
383 }
384
385
386 /*
387  *  Get ip_vs_lblcr_entry associated with supplied parameters.
388  */
389 static inline struct ip_vs_lblcr_entry *
390 ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
391 {
392         unsigned hash;
393         struct ip_vs_lblcr_entry *en;
394
395         hash = ip_vs_lblcr_hashkey(addr);
396
397         read_lock(&tbl->lock);
398
399         list_for_each_entry(en, &tbl->bucket[hash], list) {
400                 if (en->addr == addr) {
401                         /* HIT */
402                         read_unlock(&tbl->lock);
403                         return en;
404                 }
405         }
406
407         read_unlock(&tbl->lock);
408
409         return NULL;
410 }
411
412
413 /*
414  *      Flush all the entries of the specified table.
415  */
416 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
417 {
418         int i;
419         struct ip_vs_lblcr_entry *en, *nxt;
420
421         for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
422                 write_lock(&tbl->lock);
423                 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
424                         ip_vs_lblcr_free(en);
425                         atomic_dec(&tbl->entries);
426                 }
427                 write_unlock(&tbl->lock);
428         }
429 }
430
431
432 static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
433 {
434         unsigned long now = jiffies;
435         int i, j;
436         struct ip_vs_lblcr_entry *en, *nxt;
437
438         for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
439                 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
440
441                 write_lock(&tbl->lock);
442                 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
443                         if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
444                                        now))
445                                 continue;
446
447                         ip_vs_lblcr_free(en);
448                         atomic_dec(&tbl->entries);
449                 }
450                 write_unlock(&tbl->lock);
451         }
452         tbl->rover = j;
453 }
454
455
456 /*
457  *      Periodical timer handler for IPVS lblcr table
458  *      It is used to collect stale entries when the number of entries
459  *      exceeds the maximum size of the table.
460  *
461  *      Fixme: we probably need more complicated algorithm to collect
462  *             entries that have not been used for a long time even
463  *             if the number of entries doesn't exceed the maximum size
464  *             of the table.
465  *      The full expiration check is for this purpose now.
466  */
467 static void ip_vs_lblcr_check_expire(unsigned long data)
468 {
469         struct ip_vs_lblcr_table *tbl;
470         unsigned long now = jiffies;
471         int goal;
472         int i, j;
473         struct ip_vs_lblcr_entry *en, *nxt;
474
475         tbl = (struct ip_vs_lblcr_table *)data;
476
477         if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
478                 /* do full expiration check */
479                 ip_vs_lblcr_full_check(tbl);
480                 tbl->counter = 1;
481                 goto out;
482         }
483
484         if (atomic_read(&tbl->entries) <= tbl->max_size) {
485                 tbl->counter++;
486                 goto out;
487         }
488
489         goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
490         if (goal > tbl->max_size/2)
491                 goal = tbl->max_size/2;
492
493         for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
494                 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
495
496                 write_lock(&tbl->lock);
497                 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
498                         if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
499                                 continue;
500
501                         ip_vs_lblcr_free(en);
502                         atomic_dec(&tbl->entries);
503                         goal--;
504                 }
505                 write_unlock(&tbl->lock);
506                 if (goal <= 0)
507                         break;
508         }
509         tbl->rover = j;
510
511   out:
512         mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
513 }
514
515 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
516 {
517         int i;
518         struct ip_vs_lblcr_table *tbl;
519
520         /*
521          *    Allocate the ip_vs_lblcr_table for this service
522          */
523         tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC);
524         if (tbl == NULL) {
525                 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
526                 return -ENOMEM;
527         }
528         svc->sched_data = tbl;
529         IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
530                   "current service\n",
531                   sizeof(struct ip_vs_lblcr_table));
532
533         /*
534          *    Initialize the hash buckets
535          */
536         for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
537                 INIT_LIST_HEAD(&tbl->bucket[i]);
538         }
539         rwlock_init(&tbl->lock);
540         tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
541         tbl->rover = 0;
542         tbl->counter = 1;
543
544         /*
545          *    Hook periodic timer for garbage collection
546          */
547         setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
548                         (unsigned long)tbl);
549         tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
550         add_timer(&tbl->periodic_timer);
551
552         return 0;
553 }
554
555
556 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
557 {
558         struct ip_vs_lblcr_table *tbl = svc->sched_data;
559
560         /* remove periodic timer */
561         del_timer_sync(&tbl->periodic_timer);
562
563         /* got to clean up table entries here */
564         ip_vs_lblcr_flush(tbl);
565
566         /* release the table itself */
567         kfree(svc->sched_data);
568         IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
569                   sizeof(struct ip_vs_lblcr_table));
570
571         return 0;
572 }
573
574
575 static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
576 {
577         return 0;
578 }
579
580
581 static inline struct ip_vs_dest *
582 __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
583 {
584         struct ip_vs_dest *dest, *least;
585         int loh, doh;
586
587         /*
588          * We think the overhead of processing active connections is fifty
589          * times higher than that of inactive connections in average. (This
590          * fifty times might not be accurate, we will change it later.) We
591          * use the following formula to estimate the overhead:
592          *                dest->activeconns*50 + dest->inactconns
593          * and the load:
594          *                (dest overhead) / dest->weight
595          *
596          * Remember -- no floats in kernel mode!!!
597          * The comparison of h1*w2 > h2*w1 is equivalent to that of
598          *                h1/w1 > h2/w2
599          * if every weight is larger than zero.
600          *
601          * The server with weight=0 is quiesced and will not receive any
602          * new connection.
603          */
604         list_for_each_entry(dest, &svc->destinations, n_list) {
605                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
606                         continue;
607
608                 if (atomic_read(&dest->weight) > 0) {
609                         least = dest;
610                         loh = atomic_read(&least->activeconns) * 50
611                                 + atomic_read(&least->inactconns);
612                         goto nextstage;
613                 }
614         }
615         return NULL;
616
617         /*
618          *    Find the destination with the least load.
619          */
620   nextstage:
621         list_for_each_entry_continue(dest, &svc->destinations, n_list) {
622                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
623                         continue;
624
625                 doh = atomic_read(&dest->activeconns) * 50
626                         + atomic_read(&dest->inactconns);
627                 if (loh * atomic_read(&dest->weight) >
628                     doh * atomic_read(&least->weight)) {
629                         least = dest;
630                         loh = doh;
631                 }
632         }
633
634         IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
635                   "activeconns %d refcnt %d weight %d overhead %d\n",
636                   NIPQUAD(least->addr), ntohs(least->port),
637                   atomic_read(&least->activeconns),
638                   atomic_read(&least->refcnt),
639                   atomic_read(&least->weight), loh);
640
641         return least;
642 }
643
644
645 /*
646  *   If this destination server is overloaded and there is a less loaded
647  *   server, then return true.
648  */
649 static inline int
650 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
651 {
652         if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
653                 struct ip_vs_dest *d;
654
655                 list_for_each_entry(d, &svc->destinations, n_list) {
656                         if (atomic_read(&d->activeconns)*2
657                             < atomic_read(&d->weight)) {
658                                 return 1;
659                         }
660                 }
661         }
662         return 0;
663 }
664
665
666 /*
667  *    Locality-Based (weighted) Least-Connection scheduling
668  */
669 static struct ip_vs_dest *
670 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
671 {
672         struct ip_vs_dest *dest;
673         struct ip_vs_lblcr_table *tbl;
674         struct ip_vs_lblcr_entry *en;
675         struct iphdr *iph = ip_hdr(skb);
676
677         IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
678
679         tbl = (struct ip_vs_lblcr_table *)svc->sched_data;
680         en = ip_vs_lblcr_get(tbl, iph->daddr);
681         if (en == NULL) {
682                 dest = __ip_vs_wlc_schedule(svc, iph);
683                 if (dest == NULL) {
684                         IP_VS_DBG(1, "no destination available\n");
685                         return NULL;
686                 }
687                 en = ip_vs_lblcr_new(iph->daddr);
688                 if (en == NULL) {
689                         return NULL;
690                 }
691                 ip_vs_dest_set_insert(&en->set, dest);
692                 ip_vs_lblcr_hash(tbl, en);
693         } else {
694                 dest = ip_vs_dest_set_min(&en->set);
695                 if (!dest || is_overloaded(dest, svc)) {
696                         dest = __ip_vs_wlc_schedule(svc, iph);
697                         if (dest == NULL) {
698                                 IP_VS_DBG(1, "no destination available\n");
699                                 return NULL;
700                         }
701                         ip_vs_dest_set_insert(&en->set, dest);
702                 }
703                 if (atomic_read(&en->set.size) > 1 &&
704                     jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) {
705                         struct ip_vs_dest *m;
706                         m = ip_vs_dest_set_max(&en->set);
707                         if (m)
708                                 ip_vs_dest_set_erase(&en->set, m);
709                 }
710         }
711         en->lastuse = jiffies;
712
713         IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
714                   "--> server %u.%u.%u.%u:%d\n",
715                   NIPQUAD(en->addr),
716                   NIPQUAD(dest->addr),
717                   ntohs(dest->port));
718
719         return dest;
720 }
721
722
723 /*
724  *      IPVS LBLCR Scheduler structure
725  */
726 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
727 {
728         .name =                 "lblcr",
729         .refcnt =               ATOMIC_INIT(0),
730         .module =               THIS_MODULE,
731         .n_list =               LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
732         .init_service =         ip_vs_lblcr_init_svc,
733         .done_service =         ip_vs_lblcr_done_svc,
734         .update_service =       ip_vs_lblcr_update_svc,
735         .schedule =             ip_vs_lblcr_schedule,
736 };
737
738
739 static int __init ip_vs_lblcr_init(void)
740 {
741         int ret;
742
743         sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
744         ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
745         if (ret)
746                 unregister_sysctl_table(sysctl_header);
747         return ret;
748 }
749
750
751 static void __exit ip_vs_lblcr_cleanup(void)
752 {
753         unregister_sysctl_table(sysctl_header);
754         unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
755 }
756
757
758 module_init(ip_vs_lblcr_init);
759 module_exit(ip_vs_lblcr_cleanup);
760 MODULE_LICENSE("GPL");