ip6ip6: autoload ip6 tunnel
[deliverable/linux.git] / net / ipv4 / netfilter / ipt_CLUSTERIP.c
1 /* Cluster IP hashmark target
2 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
3 * based on ideas of Fabio Olive Leite <olive@unixforge.org>
4 *
5 * Development of this code funded by SuSE Linux AG, http://www.suse.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/module.h>
14 #include <linux/proc_fs.h>
15 #include <linux/jhash.h>
16 #include <linux/bitops.h>
17 #include <linux/skbuff.h>
18 #include <linux/slab.h>
19 #include <linux/ip.h>
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmp.h>
23 #include <linux/if_arp.h>
24 #include <linux/seq_file.h>
25 #include <linux/netfilter_arp.h>
26 #include <linux/netfilter/x_tables.h>
27 #include <linux/netfilter_ipv4/ip_tables.h>
28 #include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/net_namespace.h>
31 #include <net/checksum.h>
32 #include <net/ip.h>
33
34 #define CLUSTERIP_VERSION "0.8"
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
38 MODULE_DESCRIPTION("Xtables: CLUSTERIP target");
39
40 struct clusterip_config {
41 struct list_head list; /* list of all configs */
42 atomic_t refcount; /* reference count */
43 atomic_t entries; /* number of entries/rules
44 * referencing us */
45
46 __be32 clusterip; /* the IP address */
47 u_int8_t clustermac[ETH_ALEN]; /* the MAC address */
48 struct net_device *dev; /* device */
49 u_int16_t num_total_nodes; /* total number of nodes */
50 unsigned long local_nodes; /* node number array */
51
52 #ifdef CONFIG_PROC_FS
53 struct proc_dir_entry *pde; /* proc dir entry */
54 #endif
55 enum clusterip_hashmode hash_mode; /* which hashing mode */
56 u_int32_t hash_initval; /* hash initialization */
57 struct rcu_head rcu;
58 };
59
60 static LIST_HEAD(clusterip_configs);
61
62 /* clusterip_lock protects the clusterip_configs list */
63 static DEFINE_SPINLOCK(clusterip_lock);
64
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations clusterip_proc_fops;
67 static struct proc_dir_entry *clusterip_procdir;
68 #endif
69
70 static inline void
71 clusterip_config_get(struct clusterip_config *c)
72 {
73 atomic_inc(&c->refcount);
74 }
75
76
77 static void clusterip_config_rcu_free(struct rcu_head *head)
78 {
79 kfree(container_of(head, struct clusterip_config, rcu));
80 }
81
82 static inline void
83 clusterip_config_put(struct clusterip_config *c)
84 {
85 if (atomic_dec_and_test(&c->refcount))
86 call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
87 }
88
89 /* decrease the count of entries using/referencing this config. If last
90 * entry(rule) is removed, remove the config from lists, but don't free it
91 * yet, since proc-files could still be holding references */
92 static inline void
93 clusterip_config_entry_put(struct clusterip_config *c)
94 {
95 local_bh_disable();
96 if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) {
97 list_del_rcu(&c->list);
98 spin_unlock(&clusterip_lock);
99 local_bh_enable();
100
101 dev_mc_del(c->dev, c->clustermac);
102 dev_put(c->dev);
103
104 /* In case anyone still accesses the file, the open/close
105 * functions are also incrementing the refcount on their own,
106 * so it's safe to remove the entry even if it's in use. */
107 #ifdef CONFIG_PROC_FS
108 remove_proc_entry(c->pde->name, c->pde->parent);
109 #endif
110 return;
111 }
112 local_bh_enable();
113 }
114
115 static struct clusterip_config *
116 __clusterip_config_find(__be32 clusterip)
117 {
118 struct clusterip_config *c;
119
120 list_for_each_entry_rcu(c, &clusterip_configs, list) {
121 if (c->clusterip == clusterip)
122 return c;
123 }
124
125 return NULL;
126 }
127
128 static inline struct clusterip_config *
129 clusterip_config_find_get(__be32 clusterip, int entry)
130 {
131 struct clusterip_config *c;
132
133 rcu_read_lock_bh();
134 c = __clusterip_config_find(clusterip);
135 if (c) {
136 if (unlikely(!atomic_inc_not_zero(&c->refcount)))
137 c = NULL;
138 else if (entry)
139 atomic_inc(&c->entries);
140 }
141 rcu_read_unlock_bh();
142
143 return c;
144 }
145
146 static void
147 clusterip_config_init_nodelist(struct clusterip_config *c,
148 const struct ipt_clusterip_tgt_info *i)
149 {
150 int n;
151
152 for (n = 0; n < i->num_local_nodes; n++)
153 set_bit(i->local_nodes[n] - 1, &c->local_nodes);
154 }
155
156 static struct clusterip_config *
157 clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
158 struct net_device *dev)
159 {
160 struct clusterip_config *c;
161
162 c = kzalloc(sizeof(*c), GFP_ATOMIC);
163 if (!c)
164 return NULL;
165
166 c->dev = dev;
167 c->clusterip = ip;
168 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
169 c->num_total_nodes = i->num_total_nodes;
170 clusterip_config_init_nodelist(c, i);
171 c->hash_mode = i->hash_mode;
172 c->hash_initval = i->hash_initval;
173 atomic_set(&c->refcount, 1);
174 atomic_set(&c->entries, 1);
175
176 #ifdef CONFIG_PROC_FS
177 {
178 char buffer[16];
179
180 /* create proc dir entry */
181 sprintf(buffer, "%pI4", &ip);
182 c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR,
183 clusterip_procdir,
184 &clusterip_proc_fops, c);
185 if (!c->pde) {
186 kfree(c);
187 return NULL;
188 }
189 }
190 #endif
191
192 spin_lock_bh(&clusterip_lock);
193 list_add_rcu(&c->list, &clusterip_configs);
194 spin_unlock_bh(&clusterip_lock);
195
196 return c;
197 }
198
199 #ifdef CONFIG_PROC_FS
200 static int
201 clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum)
202 {
203
204 if (nodenum == 0 ||
205 nodenum > c->num_total_nodes)
206 return 1;
207
208 /* check if we already have this number in our bitfield */
209 if (test_and_set_bit(nodenum - 1, &c->local_nodes))
210 return 1;
211
212 return 0;
213 }
214
215 static bool
216 clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum)
217 {
218 if (nodenum == 0 ||
219 nodenum > c->num_total_nodes)
220 return true;
221
222 if (test_and_clear_bit(nodenum - 1, &c->local_nodes))
223 return false;
224
225 return true;
226 }
227 #endif
228
229 static inline u_int32_t
230 clusterip_hashfn(const struct sk_buff *skb,
231 const struct clusterip_config *config)
232 {
233 const struct iphdr *iph = ip_hdr(skb);
234 unsigned long hashval;
235 u_int16_t sport = 0, dport = 0;
236 int poff;
237
238 poff = proto_ports_offset(iph->protocol);
239 if (poff >= 0) {
240 const u_int16_t *ports;
241 u16 _ports[2];
242
243 ports = skb_header_pointer(skb, iph->ihl * 4 + poff, 4, _ports);
244 if (ports) {
245 sport = ports[0];
246 dport = ports[1];
247 }
248 } else {
249 if (net_ratelimit())
250 pr_info("unknown protocol %u\n", iph->protocol);
251 }
252
253 switch (config->hash_mode) {
254 case CLUSTERIP_HASHMODE_SIP:
255 hashval = jhash_1word(ntohl(iph->saddr),
256 config->hash_initval);
257 break;
258 case CLUSTERIP_HASHMODE_SIP_SPT:
259 hashval = jhash_2words(ntohl(iph->saddr), sport,
260 config->hash_initval);
261 break;
262 case CLUSTERIP_HASHMODE_SIP_SPT_DPT:
263 hashval = jhash_3words(ntohl(iph->saddr), sport, dport,
264 config->hash_initval);
265 break;
266 default:
267 /* to make gcc happy */
268 hashval = 0;
269 /* This cannot happen, unless the check function wasn't called
270 * at rule load time */
271 pr_info("unknown mode %u\n", config->hash_mode);
272 BUG();
273 break;
274 }
275
276 /* node numbers are 1..n, not 0..n */
277 return (((u64)hashval * config->num_total_nodes) >> 32) + 1;
278 }
279
280 static inline int
281 clusterip_responsible(const struct clusterip_config *config, u_int32_t hash)
282 {
283 return test_bit(hash - 1, &config->local_nodes);
284 }
285
286 /***********************************************************************
287 * IPTABLES TARGET
288 ***********************************************************************/
289
290 static unsigned int
291 clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
292 {
293 const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
294 struct nf_conn *ct;
295 enum ip_conntrack_info ctinfo;
296 u_int32_t hash;
297
298 /* don't need to clusterip_config_get() here, since refcount
299 * is only decremented by destroy() - and ip_tables guarantees
300 * that the ->target() function isn't called after ->destroy() */
301
302 ct = nf_ct_get(skb, &ctinfo);
303 if (ct == NULL) {
304 pr_info("no conntrack!\n");
305 /* FIXME: need to drop invalid ones, since replies
306 * to outgoing connections of other nodes will be
307 * marked as INVALID */
308 return NF_DROP;
309 }
310
311 /* special case: ICMP error handling. conntrack distinguishes between
312 * error messages (RELATED) and information requests (see below) */
313 if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
314 (ctinfo == IP_CT_RELATED ||
315 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY))
316 return XT_CONTINUE;
317
318 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
319 * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
320 * on, which all have an ID field [relevant for hashing]. */
321
322 hash = clusterip_hashfn(skb, cipinfo->config);
323
324 switch (ctinfo) {
325 case IP_CT_NEW:
326 ct->mark = hash;
327 break;
328 case IP_CT_RELATED:
329 case IP_CT_RELATED+IP_CT_IS_REPLY:
330 /* FIXME: we don't handle expectations at the
331 * moment. they can arrive on a different node than
332 * the master connection (e.g. FTP passive mode) */
333 case IP_CT_ESTABLISHED:
334 case IP_CT_ESTABLISHED+IP_CT_IS_REPLY:
335 break;
336 default:
337 break;
338 }
339
340 #ifdef DEBUG
341 nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
342 #endif
343 pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
344 if (!clusterip_responsible(cipinfo->config, hash)) {
345 pr_debug("not responsible\n");
346 return NF_DROP;
347 }
348 pr_debug("responsible\n");
349
350 /* despite being received via linklayer multicast, this is
351 * actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */
352 skb->pkt_type = PACKET_HOST;
353
354 return XT_CONTINUE;
355 }
356
357 static int clusterip_tg_check(const struct xt_tgchk_param *par)
358 {
359 struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
360 const struct ipt_entry *e = par->entryinfo;
361 struct clusterip_config *config;
362 int ret;
363
364 if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
365 cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
366 cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
367 pr_info("unknown mode %u\n", cipinfo->hash_mode);
368 return -EINVAL;
369
370 }
371 if (e->ip.dmsk.s_addr != htonl(0xffffffff) ||
372 e->ip.dst.s_addr == 0) {
373 pr_info("Please specify destination IP\n");
374 return -EINVAL;
375 }
376
377 /* FIXME: further sanity checks */
378
379 config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
380 if (!config) {
381 if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
382 pr_info("no config found for %pI4, need 'new'\n",
383 &e->ip.dst.s_addr);
384 return -EINVAL;
385 } else {
386 struct net_device *dev;
387
388 if (e->ip.iniface[0] == '\0') {
389 pr_info("Please specify an interface name\n");
390 return -EINVAL;
391 }
392
393 dev = dev_get_by_name(&init_net, e->ip.iniface);
394 if (!dev) {
395 pr_info("no such interface %s\n",
396 e->ip.iniface);
397 return -ENOENT;
398 }
399
400 config = clusterip_config_init(cipinfo,
401 e->ip.dst.s_addr, dev);
402 if (!config) {
403 pr_info("cannot allocate config\n");
404 dev_put(dev);
405 return -ENOMEM;
406 }
407 dev_mc_add(config->dev, config->clustermac);
408 }
409 }
410 cipinfo->config = config;
411
412 ret = nf_ct_l3proto_try_module_get(par->family);
413 if (ret < 0)
414 pr_info("cannot load conntrack support for proto=%u\n",
415 par->family);
416 return ret;
417 }
418
419 /* drop reference count of cluster config when rule is deleted */
420 static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
421 {
422 const struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
423
424 /* if no more entries are referencing the config, remove it
425 * from the list and destroy the proc entry */
426 clusterip_config_entry_put(cipinfo->config);
427
428 clusterip_config_put(cipinfo->config);
429
430 nf_ct_l3proto_module_put(par->family);
431 }
432
433 #ifdef CONFIG_COMPAT
434 struct compat_ipt_clusterip_tgt_info
435 {
436 u_int32_t flags;
437 u_int8_t clustermac[6];
438 u_int16_t num_total_nodes;
439 u_int16_t num_local_nodes;
440 u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
441 u_int32_t hash_mode;
442 u_int32_t hash_initval;
443 compat_uptr_t config;
444 };
445 #endif /* CONFIG_COMPAT */
446
447 static struct xt_target clusterip_tg_reg __read_mostly = {
448 .name = "CLUSTERIP",
449 .family = NFPROTO_IPV4,
450 .target = clusterip_tg,
451 .checkentry = clusterip_tg_check,
452 .destroy = clusterip_tg_destroy,
453 .targetsize = sizeof(struct ipt_clusterip_tgt_info),
454 #ifdef CONFIG_COMPAT
455 .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info),
456 #endif /* CONFIG_COMPAT */
457 .me = THIS_MODULE
458 };
459
460
461 /***********************************************************************
462 * ARP MANGLING CODE
463 ***********************************************************************/
464
465 /* hardcoded for 48bit ethernet and 32bit ipv4 addresses */
466 struct arp_payload {
467 u_int8_t src_hw[ETH_ALEN];
468 __be32 src_ip;
469 u_int8_t dst_hw[ETH_ALEN];
470 __be32 dst_ip;
471 } __packed;
472
473 #ifdef DEBUG
474 static void arp_print(struct arp_payload *payload)
475 {
476 #define HBUFFERLEN 30
477 char hbuffer[HBUFFERLEN];
478 int j,k;
479
480 for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) {
481 hbuffer[k++] = hex_asc_hi(payload->src_hw[j]);
482 hbuffer[k++] = hex_asc_lo(payload->src_hw[j]);
483 hbuffer[k++]=':';
484 }
485 hbuffer[--k]='\0';
486
487 pr_debug("src %pI4@%s, dst %pI4\n",
488 &payload->src_ip, hbuffer, &payload->dst_ip);
489 }
490 #endif
491
492 static unsigned int
493 arp_mangle(unsigned int hook,
494 struct sk_buff *skb,
495 const struct net_device *in,
496 const struct net_device *out,
497 int (*okfn)(struct sk_buff *))
498 {
499 struct arphdr *arp = arp_hdr(skb);
500 struct arp_payload *payload;
501 struct clusterip_config *c;
502
503 /* we don't care about non-ethernet and non-ipv4 ARP */
504 if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
505 arp->ar_pro != htons(ETH_P_IP) ||
506 arp->ar_pln != 4 || arp->ar_hln != ETH_ALEN)
507 return NF_ACCEPT;
508
509 /* we only want to mangle arp requests and replies */
510 if (arp->ar_op != htons(ARPOP_REPLY) &&
511 arp->ar_op != htons(ARPOP_REQUEST))
512 return NF_ACCEPT;
513
514 payload = (void *)(arp+1);
515
516 /* if there is no clusterip configuration for the arp reply's
517 * source ip, we don't want to mangle it */
518 c = clusterip_config_find_get(payload->src_ip, 0);
519 if (!c)
520 return NF_ACCEPT;
521
522 /* normally the linux kernel always replies to arp queries of
523 * addresses on different interfacs. However, in the CLUSTERIP case
524 * this wouldn't work, since we didn't subscribe the mcast group on
525 * other interfaces */
526 if (c->dev != out) {
527 pr_debug("not mangling arp reply on different "
528 "interface: cip'%s'-skb'%s'\n",
529 c->dev->name, out->name);
530 clusterip_config_put(c);
531 return NF_ACCEPT;
532 }
533
534 /* mangle reply hardware address */
535 memcpy(payload->src_hw, c->clustermac, arp->ar_hln);
536
537 #ifdef DEBUG
538 pr_debug("mangled arp reply: ");
539 arp_print(payload);
540 #endif
541
542 clusterip_config_put(c);
543
544 return NF_ACCEPT;
545 }
546
547 static struct nf_hook_ops cip_arp_ops __read_mostly = {
548 .hook = arp_mangle,
549 .pf = NFPROTO_ARP,
550 .hooknum = NF_ARP_OUT,
551 .priority = -1
552 };
553
554 /***********************************************************************
555 * PROC DIR HANDLING
556 ***********************************************************************/
557
558 #ifdef CONFIG_PROC_FS
559
560 struct clusterip_seq_position {
561 unsigned int pos; /* position */
562 unsigned int weight; /* number of bits set == size */
563 unsigned int bit; /* current bit */
564 unsigned long val; /* current value */
565 };
566
567 static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
568 {
569 struct clusterip_config *c = s->private;
570 unsigned int weight;
571 u_int32_t local_nodes;
572 struct clusterip_seq_position *idx;
573
574 /* FIXME: possible race */
575 local_nodes = c->local_nodes;
576 weight = hweight32(local_nodes);
577 if (*pos >= weight)
578 return NULL;
579
580 idx = kmalloc(sizeof(struct clusterip_seq_position), GFP_KERNEL);
581 if (!idx)
582 return ERR_PTR(-ENOMEM);
583
584 idx->pos = *pos;
585 idx->weight = weight;
586 idx->bit = ffs(local_nodes);
587 idx->val = local_nodes;
588 clear_bit(idx->bit - 1, &idx->val);
589
590 return idx;
591 }
592
593 static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
594 {
595 struct clusterip_seq_position *idx = v;
596
597 *pos = ++idx->pos;
598 if (*pos >= idx->weight) {
599 kfree(v);
600 return NULL;
601 }
602 idx->bit = ffs(idx->val);
603 clear_bit(idx->bit - 1, &idx->val);
604 return idx;
605 }
606
607 static void clusterip_seq_stop(struct seq_file *s, void *v)
608 {
609 if (!IS_ERR(v))
610 kfree(v);
611 }
612
613 static int clusterip_seq_show(struct seq_file *s, void *v)
614 {
615 struct clusterip_seq_position *idx = v;
616
617 if (idx->pos != 0)
618 seq_putc(s, ',');
619
620 seq_printf(s, "%u", idx->bit);
621
622 if (idx->pos == idx->weight - 1)
623 seq_putc(s, '\n');
624
625 return 0;
626 }
627
628 static const struct seq_operations clusterip_seq_ops = {
629 .start = clusterip_seq_start,
630 .next = clusterip_seq_next,
631 .stop = clusterip_seq_stop,
632 .show = clusterip_seq_show,
633 };
634
635 static int clusterip_proc_open(struct inode *inode, struct file *file)
636 {
637 int ret = seq_open(file, &clusterip_seq_ops);
638
639 if (!ret) {
640 struct seq_file *sf = file->private_data;
641 struct clusterip_config *c = PDE(inode)->data;
642
643 sf->private = c;
644
645 clusterip_config_get(c);
646 }
647
648 return ret;
649 }
650
651 static int clusterip_proc_release(struct inode *inode, struct file *file)
652 {
653 struct clusterip_config *c = PDE(inode)->data;
654 int ret;
655
656 ret = seq_release(inode, file);
657
658 if (!ret)
659 clusterip_config_put(c);
660
661 return ret;
662 }
663
664 static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
665 size_t size, loff_t *ofs)
666 {
667 struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
668 #define PROC_WRITELEN 10
669 char buffer[PROC_WRITELEN+1];
670 unsigned long nodenum;
671
672 if (copy_from_user(buffer, input, PROC_WRITELEN))
673 return -EFAULT;
674
675 if (*buffer == '+') {
676 nodenum = simple_strtoul(buffer+1, NULL, 10);
677 if (clusterip_add_node(c, nodenum))
678 return -ENOMEM;
679 } else if (*buffer == '-') {
680 nodenum = simple_strtoul(buffer+1, NULL,10);
681 if (clusterip_del_node(c, nodenum))
682 return -ENOENT;
683 } else
684 return -EIO;
685
686 return size;
687 }
688
689 static const struct file_operations clusterip_proc_fops = {
690 .owner = THIS_MODULE,
691 .open = clusterip_proc_open,
692 .read = seq_read,
693 .write = clusterip_proc_write,
694 .llseek = seq_lseek,
695 .release = clusterip_proc_release,
696 };
697
698 #endif /* CONFIG_PROC_FS */
699
700 static int __init clusterip_tg_init(void)
701 {
702 int ret;
703
704 ret = xt_register_target(&clusterip_tg_reg);
705 if (ret < 0)
706 return ret;
707
708 ret = nf_register_hook(&cip_arp_ops);
709 if (ret < 0)
710 goto cleanup_target;
711
712 #ifdef CONFIG_PROC_FS
713 clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net);
714 if (!clusterip_procdir) {
715 pr_err("Unable to proc dir entry\n");
716 ret = -ENOMEM;
717 goto cleanup_hook;
718 }
719 #endif /* CONFIG_PROC_FS */
720
721 pr_info("ClusterIP Version %s loaded successfully\n",
722 CLUSTERIP_VERSION);
723 return 0;
724
725 #ifdef CONFIG_PROC_FS
726 cleanup_hook:
727 nf_unregister_hook(&cip_arp_ops);
728 #endif /* CONFIG_PROC_FS */
729 cleanup_target:
730 xt_unregister_target(&clusterip_tg_reg);
731 return ret;
732 }
733
734 static void __exit clusterip_tg_exit(void)
735 {
736 pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
737 #ifdef CONFIG_PROC_FS
738 remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
739 #endif
740 nf_unregister_hook(&cip_arp_ops);
741 xt_unregister_target(&clusterip_tg_reg);
742
743 /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
744 rcu_barrier_bh();
745 }
746
747 module_init(clusterip_tg_init);
748 module_exit(clusterip_tg_exit);
This page took 0.046925 seconds and 5 git commands to generate.