2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Version: $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $
6 * Authors: Wensong Zhang <wensong@gnuchina.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Julian Anastasov : Added the missing (dest->weight>0)
15 * condition in the ip_vs_dest_set_max.
20 * The lblc/r algorithm is as follows (pseudo code):
22 * if serverSet[dest_ip] is null then
23 * n, serverSet[dest_ip] <- {weighted least-conn node};
25 * n <- {least-conn (alive) node in serverSet[dest_ip]};
27 * (n.conns>n.weight AND
28 * there is a node m with m.conns<m.weight/2) then
29 * n <- {weighted least-conn node};
30 * add n to serverSet[dest_ip];
31 * if |serverSet[dest_ip]| > 1 AND
32 * now - serverSet[dest_ip].lastMod > T then
33 * m <- {most conn node in serverSet[dest_ip]};
34 * remove m from serverSet[dest_ip];
35 * if serverSet[dest_ip] changed then
36 * serverSet[dest_ip].lastMod <- now;
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/skbuff.h>
46 #include <linux/jiffies.h>
50 #include <linux/sysctl.h>
51 /* for proc_net_create/proc_net_remove */
52 #include <linux/proc_fs.h>
53 #include <net/net_namespace.h>
55 #include <net/ip_vs.h>
59 * It is for garbage collection of stale IPVS lblcr entries,
60 * when the table is full.
62 #define CHECK_EXPIRE_INTERVAL (60*HZ)
63 #define ENTRY_TIMEOUT (6*60*HZ)
66 * It is for full expiration check.
67 * When there is no partial expiration check (garbage collection)
68 * in a half hour, do a full expiration check to collect stale
69 * entries that haven't been touched for a day.
71 #define COUNT_FOR_FULL_EXPIRATION 30
72 static int sysctl_ip_vs_lblcr_expiration
= 24*60*60*HZ
;
76 * for IPVS lblcr entry hash table
78 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
79 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
81 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
82 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
83 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
87 * IPVS destination set structure and operations
89 struct ip_vs_dest_list
{
90 struct ip_vs_dest_list
*next
; /* list link */
91 struct ip_vs_dest
*dest
; /* destination server */
94 struct ip_vs_dest_set
{
95 atomic_t size
; /* set size */
96 unsigned long lastmod
; /* last modified time */
97 struct ip_vs_dest_list
*list
; /* destination list */
98 rwlock_t lock
; /* lock for this list */
102 static struct ip_vs_dest_list
*
103 ip_vs_dest_set_insert(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
105 struct ip_vs_dest_list
*e
;
107 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
109 /* already existed */
113 e
= kmalloc(sizeof(struct ip_vs_dest_list
), GFP_ATOMIC
);
115 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
119 atomic_inc(&dest
->refcnt
);
122 /* link it to the list */
123 write_lock(&set
->lock
);
126 atomic_inc(&set
->size
);
127 write_unlock(&set
->lock
);
129 set
->lastmod
= jiffies
;
134 ip_vs_dest_set_erase(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
136 struct ip_vs_dest_list
*e
, **ep
;
138 write_lock(&set
->lock
);
139 for (ep
=&set
->list
, e
=*ep
; e
!=NULL
; e
=*ep
) {
140 if (e
->dest
== dest
) {
143 atomic_dec(&set
->size
);
144 set
->lastmod
= jiffies
;
145 atomic_dec(&e
->dest
->refcnt
);
151 write_unlock(&set
->lock
);
154 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set
*set
)
156 struct ip_vs_dest_list
*e
, **ep
;
158 write_lock(&set
->lock
);
159 for (ep
=&set
->list
, e
=*ep
; e
!=NULL
; e
=*ep
) {
162 * We don't kfree dest because it is refered either
163 * by its service or by the trash dest list.
165 atomic_dec(&e
->dest
->refcnt
);
168 write_unlock(&set
->lock
);
171 /* get weighted least-connection node in the destination set */
172 static inline struct ip_vs_dest
*ip_vs_dest_set_min(struct ip_vs_dest_set
*set
)
174 register struct ip_vs_dest_list
*e
;
175 struct ip_vs_dest
*dest
, *least
;
181 read_lock(&set
->lock
);
182 /* select the first destination server, whose weight > 0 */
183 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
185 if (least
->flags
& IP_VS_DEST_F_OVERLOAD
)
188 if ((atomic_read(&least
->weight
) > 0)
189 && (least
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
190 loh
= atomic_read(&least
->activeconns
) * 50
191 + atomic_read(&least
->inactconns
);
195 read_unlock(&set
->lock
);
198 /* find the destination with the weighted least load */
200 for (e
=e
->next
; e
!=NULL
; e
=e
->next
) {
202 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
205 doh
= atomic_read(&dest
->activeconns
) * 50
206 + atomic_read(&dest
->inactconns
);
207 if ((loh
* atomic_read(&dest
->weight
) >
208 doh
* atomic_read(&least
->weight
))
209 && (dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
214 read_unlock(&set
->lock
);
216 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
217 "activeconns %d refcnt %d weight %d overhead %d\n",
218 NIPQUAD(least
->addr
), ntohs(least
->port
),
219 atomic_read(&least
->activeconns
),
220 atomic_read(&least
->refcnt
),
221 atomic_read(&least
->weight
), loh
);
226 /* get weighted most-connection node in the destination set */
227 static inline struct ip_vs_dest
*ip_vs_dest_set_max(struct ip_vs_dest_set
*set
)
229 register struct ip_vs_dest_list
*e
;
230 struct ip_vs_dest
*dest
, *most
;
236 read_lock(&set
->lock
);
237 /* select the first destination server, whose weight > 0 */
238 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
240 if (atomic_read(&most
->weight
) > 0) {
241 moh
= atomic_read(&most
->activeconns
) * 50
242 + atomic_read(&most
->inactconns
);
246 read_unlock(&set
->lock
);
249 /* find the destination with the weighted most load */
251 for (e
=e
->next
; e
!=NULL
; e
=e
->next
) {
253 doh
= atomic_read(&dest
->activeconns
) * 50
254 + atomic_read(&dest
->inactconns
);
255 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
256 if ((moh
* atomic_read(&dest
->weight
) <
257 doh
* atomic_read(&most
->weight
))
258 && (atomic_read(&dest
->weight
) > 0)) {
263 read_unlock(&set
->lock
);
265 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
266 "activeconns %d refcnt %d weight %d overhead %d\n",
267 NIPQUAD(most
->addr
), ntohs(most
->port
),
268 atomic_read(&most
->activeconns
),
269 atomic_read(&most
->refcnt
),
270 atomic_read(&most
->weight
), moh
);
276 * IPVS lblcr entry represents an association between destination
277 * IP address and its destination server set
279 struct ip_vs_lblcr_entry
{
280 struct list_head list
;
281 __be32 addr
; /* destination IP address */
282 struct ip_vs_dest_set set
; /* destination server set */
283 unsigned long lastuse
; /* last used time */
288 * IPVS lblcr hash table
290 struct ip_vs_lblcr_table
{
291 rwlock_t lock
; /* lock for this table */
292 struct list_head bucket
[IP_VS_LBLCR_TAB_SIZE
]; /* hash bucket */
293 atomic_t entries
; /* number of entries */
294 int max_size
; /* maximum size of entries */
295 struct timer_list periodic_timer
; /* collect stale entries */
296 int rover
; /* rover for expire check */
297 int counter
; /* counter for no expire */
302 * IPVS LBLCR sysctl table
305 static ctl_table vs_vars_table
[] = {
307 .ctl_name
= NET_IPV4_VS_LBLCR_EXPIRE
,
308 .procname
= "lblcr_expiration",
309 .data
= &sysctl_ip_vs_lblcr_expiration
,
310 .maxlen
= sizeof(int),
312 .proc_handler
= &proc_dointvec_jiffies
,
317 static ctl_table vs_table
[] = {
319 .ctl_name
= NET_IPV4_VS
,
322 .child
= vs_vars_table
327 static ctl_table ipvs_ipv4_table
[] = {
329 .ctl_name
= NET_IPV4
,
337 static ctl_table lblcr_root_table
[] = {
342 .child
= ipvs_ipv4_table
347 static struct ctl_table_header
* sysctl_header
;
350 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination
351 * IP address to a server.
353 static inline struct ip_vs_lblcr_entry
*ip_vs_lblcr_new(__be32 daddr
)
355 struct ip_vs_lblcr_entry
*en
;
357 en
= kmalloc(sizeof(struct ip_vs_lblcr_entry
), GFP_ATOMIC
);
359 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
363 INIT_LIST_HEAD(&en
->list
);
366 /* initilize its dest set */
367 atomic_set(&(en
->set
.size
), 0);
369 rwlock_init(&en
->set
.lock
);
375 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry
*en
)
378 ip_vs_dest_set_eraseall(&en
->set
);
384 * Returns hash value for IPVS LBLCR entry
386 static inline unsigned ip_vs_lblcr_hashkey(__be32 addr
)
388 return (ntohl(addr
)*2654435761UL) & IP_VS_LBLCR_TAB_MASK
;
393 * Hash an entry in the ip_vs_lblcr_table.
394 * returns bool success.
397 ip_vs_lblcr_hash(struct ip_vs_lblcr_table
*tbl
, struct ip_vs_lblcr_entry
*en
)
401 if (!list_empty(&en
->list
)) {
402 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
403 "called from %p\n", __builtin_return_address(0));
408 * Hash by destination IP address
410 hash
= ip_vs_lblcr_hashkey(en
->addr
);
412 write_lock(&tbl
->lock
);
413 list_add(&en
->list
, &tbl
->bucket
[hash
]);
414 atomic_inc(&tbl
->entries
);
415 write_unlock(&tbl
->lock
);
422 * Get ip_vs_lblcr_entry associated with supplied parameters.
424 static inline struct ip_vs_lblcr_entry
*
425 ip_vs_lblcr_get(struct ip_vs_lblcr_table
*tbl
, __be32 addr
)
428 struct ip_vs_lblcr_entry
*en
;
430 hash
= ip_vs_lblcr_hashkey(addr
);
432 read_lock(&tbl
->lock
);
434 list_for_each_entry(en
, &tbl
->bucket
[hash
], list
) {
435 if (en
->addr
== addr
) {
437 read_unlock(&tbl
->lock
);
442 read_unlock(&tbl
->lock
);
449 * Flush all the entries of the specified table.
451 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table
*tbl
)
454 struct ip_vs_lblcr_entry
*en
, *nxt
;
456 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
457 write_lock(&tbl
->lock
);
458 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[i
], list
) {
459 ip_vs_lblcr_free(en
);
460 atomic_dec(&tbl
->entries
);
462 write_unlock(&tbl
->lock
);
467 static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table
*tbl
)
469 unsigned long now
= jiffies
;
471 struct ip_vs_lblcr_entry
*en
, *nxt
;
473 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
474 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
476 write_lock(&tbl
->lock
);
477 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
478 if (time_after(en
->lastuse
+sysctl_ip_vs_lblcr_expiration
,
482 ip_vs_lblcr_free(en
);
483 atomic_dec(&tbl
->entries
);
485 write_unlock(&tbl
->lock
);
492 * Periodical timer handler for IPVS lblcr table
493 * It is used to collect stale entries when the number of entries
494 * exceeds the maximum size of the table.
496 * Fixme: we probably need more complicated algorithm to collect
497 * entries that have not been used for a long time even
498 * if the number of entries doesn't exceed the maximum size
500 * The full expiration check is for this purpose now.
502 static void ip_vs_lblcr_check_expire(unsigned long data
)
504 struct ip_vs_lblcr_table
*tbl
;
505 unsigned long now
= jiffies
;
508 struct ip_vs_lblcr_entry
*en
, *nxt
;
510 tbl
= (struct ip_vs_lblcr_table
*)data
;
512 if ((tbl
->counter
% COUNT_FOR_FULL_EXPIRATION
) == 0) {
513 /* do full expiration check */
514 ip_vs_lblcr_full_check(tbl
);
519 if (atomic_read(&tbl
->entries
) <= tbl
->max_size
) {
524 goal
= (atomic_read(&tbl
->entries
) - tbl
->max_size
)*4/3;
525 if (goal
> tbl
->max_size
/2)
526 goal
= tbl
->max_size
/2;
528 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
529 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
531 write_lock(&tbl
->lock
);
532 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
533 if (time_before(now
, en
->lastuse
+ENTRY_TIMEOUT
))
536 ip_vs_lblcr_free(en
);
537 atomic_dec(&tbl
->entries
);
540 write_unlock(&tbl
->lock
);
547 mod_timer(&tbl
->periodic_timer
, jiffies
+CHECK_EXPIRE_INTERVAL
);
551 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
552 static struct ip_vs_lblcr_table
*lblcr_table_list
;
555 * /proc/net/ip_vs_lblcr to display the mappings of
556 * destination IP address <==> its serverSet
559 ip_vs_lblcr_getinfo(char *buffer
, char **start
, off_t offset
, int length
)
563 struct ip_vs_lblcr_table
*tbl
;
564 unsigned long now
= jiffies
;
566 struct ip_vs_lblcr_entry
*en
;
568 tbl
= lblcr_table_list
;
570 size
= sprintf(buffer
, "LastTime Dest IP address Server set\n");
574 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
575 read_lock_bh(&tbl
->lock
);
576 list_for_each_entry(en
, &tbl
->bucket
[i
], list
) {
578 struct ip_vs_dest_list
*d
;
580 sprintf(tbuf
, "%u.%u.%u.%u", NIPQUAD(en
->addr
));
581 size
= sprintf(buffer
+len
, "%8lu %-16s ",
582 now
-en
->lastuse
, tbuf
);
584 read_lock(&en
->set
.lock
);
585 for (d
=en
->set
.list
; d
!=NULL
; d
=d
->next
) {
586 size
+= sprintf(buffer
+len
+size
,
588 NIPQUAD(d
->dest
->addr
));
590 read_unlock(&en
->set
.lock
);
591 size
+= sprintf(buffer
+len
+size
, "\n");
596 if (pos
>= offset
+length
) {
597 read_unlock_bh(&tbl
->lock
);
601 read_unlock_bh(&tbl
->lock
);
605 begin
= len
- (pos
- offset
);
606 *start
= buffer
+ begin
;
615 static int ip_vs_lblcr_init_svc(struct ip_vs_service
*svc
)
618 struct ip_vs_lblcr_table
*tbl
;
621 * Allocate the ip_vs_lblcr_table for this service
623 tbl
= kmalloc(sizeof(struct ip_vs_lblcr_table
), GFP_ATOMIC
);
625 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
628 svc
->sched_data
= tbl
;
629 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
631 sizeof(struct ip_vs_lblcr_table
));
634 * Initialize the hash buckets
636 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
637 INIT_LIST_HEAD(&tbl
->bucket
[i
]);
639 rwlock_init(&tbl
->lock
);
640 tbl
->max_size
= IP_VS_LBLCR_TAB_SIZE
*16;
645 * Hook periodic timer for garbage collection
647 init_timer(&tbl
->periodic_timer
);
648 tbl
->periodic_timer
.data
= (unsigned long)tbl
;
649 tbl
->periodic_timer
.function
= ip_vs_lblcr_check_expire
;
650 tbl
->periodic_timer
.expires
= jiffies
+CHECK_EXPIRE_INTERVAL
;
651 add_timer(&tbl
->periodic_timer
);
653 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
654 lblcr_table_list
= tbl
;
660 static int ip_vs_lblcr_done_svc(struct ip_vs_service
*svc
)
662 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
664 /* remove periodic timer */
665 del_timer_sync(&tbl
->periodic_timer
);
667 /* got to clean up table entries here */
668 ip_vs_lblcr_flush(tbl
);
670 /* release the table itself */
671 kfree(svc
->sched_data
);
672 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
673 sizeof(struct ip_vs_lblcr_table
));
679 static int ip_vs_lblcr_update_svc(struct ip_vs_service
*svc
)
685 static inline struct ip_vs_dest
*
686 __ip_vs_wlc_schedule(struct ip_vs_service
*svc
, struct iphdr
*iph
)
688 struct ip_vs_dest
*dest
, *least
;
692 * We think the overhead of processing active connections is fifty
693 * times higher than that of inactive connections in average. (This
694 * fifty times might not be accurate, we will change it later.) We
695 * use the following formula to estimate the overhead:
696 * dest->activeconns*50 + dest->inactconns
698 * (dest overhead) / dest->weight
700 * Remember -- no floats in kernel mode!!!
701 * The comparison of h1*w2 > h2*w1 is equivalent to that of
703 * if every weight is larger than zero.
705 * The server with weight=0 is quiesced and will not receive any
708 list_for_each_entry(dest
, &svc
->destinations
, n_list
) {
709 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
712 if (atomic_read(&dest
->weight
) > 0) {
714 loh
= atomic_read(&least
->activeconns
) * 50
715 + atomic_read(&least
->inactconns
);
722 * Find the destination with the least load.
725 list_for_each_entry_continue(dest
, &svc
->destinations
, n_list
) {
726 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
729 doh
= atomic_read(&dest
->activeconns
) * 50
730 + atomic_read(&dest
->inactconns
);
731 if (loh
* atomic_read(&dest
->weight
) >
732 doh
* atomic_read(&least
->weight
)) {
738 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
739 "activeconns %d refcnt %d weight %d overhead %d\n",
740 NIPQUAD(least
->addr
), ntohs(least
->port
),
741 atomic_read(&least
->activeconns
),
742 atomic_read(&least
->refcnt
),
743 atomic_read(&least
->weight
), loh
);
750 * If this destination server is overloaded and there is a less loaded
751 * server, then return true.
754 is_overloaded(struct ip_vs_dest
*dest
, struct ip_vs_service
*svc
)
756 if (atomic_read(&dest
->activeconns
) > atomic_read(&dest
->weight
)) {
757 struct ip_vs_dest
*d
;
759 list_for_each_entry(d
, &svc
->destinations
, n_list
) {
760 if (atomic_read(&d
->activeconns
)*2
761 < atomic_read(&d
->weight
)) {
771 * Locality-Based (weighted) Least-Connection scheduling
773 static struct ip_vs_dest
*
774 ip_vs_lblcr_schedule(struct ip_vs_service
*svc
, const struct sk_buff
*skb
)
776 struct ip_vs_dest
*dest
;
777 struct ip_vs_lblcr_table
*tbl
;
778 struct ip_vs_lblcr_entry
*en
;
779 struct iphdr
*iph
= ip_hdr(skb
);
781 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
783 tbl
= (struct ip_vs_lblcr_table
*)svc
->sched_data
;
784 en
= ip_vs_lblcr_get(tbl
, iph
->daddr
);
786 dest
= __ip_vs_wlc_schedule(svc
, iph
);
788 IP_VS_DBG(1, "no destination available\n");
791 en
= ip_vs_lblcr_new(iph
->daddr
);
795 ip_vs_dest_set_insert(&en
->set
, dest
);
796 ip_vs_lblcr_hash(tbl
, en
);
798 dest
= ip_vs_dest_set_min(&en
->set
);
799 if (!dest
|| is_overloaded(dest
, svc
)) {
800 dest
= __ip_vs_wlc_schedule(svc
, iph
);
802 IP_VS_DBG(1, "no destination available\n");
805 ip_vs_dest_set_insert(&en
->set
, dest
);
807 if (atomic_read(&en
->set
.size
) > 1 &&
808 jiffies
-en
->set
.lastmod
> sysctl_ip_vs_lblcr_expiration
) {
809 struct ip_vs_dest
*m
;
810 m
= ip_vs_dest_set_max(&en
->set
);
812 ip_vs_dest_set_erase(&en
->set
, m
);
815 en
->lastuse
= jiffies
;
817 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
818 "--> server %u.%u.%u.%u:%d\n",
828 * IPVS LBLCR Scheduler structure
830 static struct ip_vs_scheduler ip_vs_lblcr_scheduler
=
833 .refcnt
= ATOMIC_INIT(0),
834 .module
= THIS_MODULE
,
835 .init_service
= ip_vs_lblcr_init_svc
,
836 .done_service
= ip_vs_lblcr_done_svc
,
837 .update_service
= ip_vs_lblcr_update_svc
,
838 .schedule
= ip_vs_lblcr_schedule
,
842 static int __init
ip_vs_lblcr_init(void)
844 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler
.n_list
);
845 sysctl_header
= register_sysctl_table(lblcr_root_table
);
846 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
847 proc_net_create(&init_net
, "ip_vs_lblcr", 0, ip_vs_lblcr_getinfo
);
849 return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
853 static void __exit
ip_vs_lblcr_cleanup(void)
855 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
856 proc_net_remove(&init_net
, "ip_vs_lblcr");
858 unregister_sysctl_table(sysctl_header
);
859 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
863 module_init(ip_vs_lblcr_init
);
864 module_exit(ip_vs_lblcr_cleanup
);
865 MODULE_LICENSE("GPL");