Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[deliverable/linux.git] / net / netfilter / ipvs / ip_vs_core.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others.
20 *
21 * Changes:
22 * Paul `Rusty' Russell properly handle non-linear skbs
23 * Harald Welte don't use nfcache
24 *
25 */
26
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h> /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h> /* net_generic() */
45
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54
55 #include <net/ip_vs.h>
56
57
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71
72 static int ip_vs_net_id __read_mostly;
73 /* netns cnt used for uniqueness */
74 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
75
76 /* ID used in ICMP lookups */
77 #define icmp_id(icmph) (((icmph)->un).echo.id)
78 #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
79
80 const char *ip_vs_proto_name(unsigned int proto)
81 {
82 static char buf[20];
83
84 switch (proto) {
85 case IPPROTO_IP:
86 return "IP";
87 case IPPROTO_UDP:
88 return "UDP";
89 case IPPROTO_TCP:
90 return "TCP";
91 case IPPROTO_SCTP:
92 return "SCTP";
93 case IPPROTO_ICMP:
94 return "ICMP";
95 #ifdef CONFIG_IP_VS_IPV6
96 case IPPROTO_ICMPV6:
97 return "ICMPv6";
98 #endif
99 default:
100 sprintf(buf, "IP_%u", proto);
101 return buf;
102 }
103 }
104
105 void ip_vs_init_hash_table(struct list_head *table, int rows)
106 {
107 while (--rows >= 0)
108 INIT_LIST_HEAD(&table[rows]);
109 }
110
111 static inline void
112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
113 {
114 struct ip_vs_dest *dest = cp->dest;
115 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s;
119 struct ip_vs_service *svc;
120
121 s = this_cpu_ptr(dest->stats.cpustats);
122 u64_stats_update_begin(&s->syncp);
123 s->cnt.inpkts++;
124 s->cnt.inbytes += skb->len;
125 u64_stats_update_end(&s->syncp);
126
127 rcu_read_lock();
128 svc = rcu_dereference(dest->svc);
129 s = this_cpu_ptr(svc->stats.cpustats);
130 u64_stats_update_begin(&s->syncp);
131 s->cnt.inpkts++;
132 s->cnt.inbytes += skb->len;
133 u64_stats_update_end(&s->syncp);
134 rcu_read_unlock();
135
136 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
137 u64_stats_update_begin(&s->syncp);
138 s->cnt.inpkts++;
139 s->cnt.inbytes += skb->len;
140 u64_stats_update_end(&s->syncp);
141 }
142 }
143
144
145 static inline void
146 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
147 {
148 struct ip_vs_dest *dest = cp->dest;
149 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
150
151 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
152 struct ip_vs_cpu_stats *s;
153 struct ip_vs_service *svc;
154
155 s = this_cpu_ptr(dest->stats.cpustats);
156 u64_stats_update_begin(&s->syncp);
157 s->cnt.outpkts++;
158 s->cnt.outbytes += skb->len;
159 u64_stats_update_end(&s->syncp);
160
161 rcu_read_lock();
162 svc = rcu_dereference(dest->svc);
163 s = this_cpu_ptr(svc->stats.cpustats);
164 u64_stats_update_begin(&s->syncp);
165 s->cnt.outpkts++;
166 s->cnt.outbytes += skb->len;
167 u64_stats_update_end(&s->syncp);
168 rcu_read_unlock();
169
170 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
171 u64_stats_update_begin(&s->syncp);
172 s->cnt.outpkts++;
173 s->cnt.outbytes += skb->len;
174 u64_stats_update_end(&s->syncp);
175 }
176 }
177
178
179 static inline void
180 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
181 {
182 struct netns_ipvs *ipvs = net_ipvs(svc->net);
183 struct ip_vs_cpu_stats *s;
184
185 s = this_cpu_ptr(cp->dest->stats.cpustats);
186 u64_stats_update_begin(&s->syncp);
187 s->cnt.conns++;
188 u64_stats_update_end(&s->syncp);
189
190 s = this_cpu_ptr(svc->stats.cpustats);
191 u64_stats_update_begin(&s->syncp);
192 s->cnt.conns++;
193 u64_stats_update_end(&s->syncp);
194
195 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
196 u64_stats_update_begin(&s->syncp);
197 s->cnt.conns++;
198 u64_stats_update_end(&s->syncp);
199 }
200
201
202 static inline void
203 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
204 const struct sk_buff *skb,
205 struct ip_vs_proto_data *pd)
206 {
207 if (likely(pd->pp->state_transition))
208 pd->pp->state_transition(cp, direction, skb, pd);
209 }
210
211 static inline int
212 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
213 struct sk_buff *skb, int protocol,
214 const union nf_inet_addr *caddr, __be16 cport,
215 const union nf_inet_addr *vaddr, __be16 vport,
216 struct ip_vs_conn_param *p)
217 {
218 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
219 vport, p);
220 p->pe = rcu_dereference(svc->pe);
221 if (p->pe && p->pe->fill_param)
222 return p->pe->fill_param(p, skb);
223
224 return 0;
225 }
226
227 /*
228 * IPVS persistent scheduling function
229 * It creates a connection entry according to its template if exists,
230 * or selects a server and creates a connection entry plus a template.
231 * Locking: we are svc user (svc->refcnt), so we hold all dests too
232 * Protocols supported: TCP, UDP
233 */
234 static struct ip_vs_conn *
235 ip_vs_sched_persist(struct ip_vs_service *svc,
236 struct sk_buff *skb, __be16 src_port, __be16 dst_port,
237 int *ignored, struct ip_vs_iphdr *iph)
238 {
239 struct ip_vs_conn *cp = NULL;
240 struct ip_vs_dest *dest;
241 struct ip_vs_conn *ct;
242 __be16 dport = 0; /* destination port to forward */
243 unsigned int flags;
244 struct ip_vs_conn_param param;
245 const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
246 union nf_inet_addr snet; /* source network of the client,
247 after masking */
248
249 /* Mask saddr with the netmask to adjust template granularity */
250 #ifdef CONFIG_IP_VS_IPV6
251 if (svc->af == AF_INET6)
252 ipv6_addr_prefix(&snet.in6, &iph->saddr.in6,
253 (__force __u32) svc->netmask);
254 else
255 #endif
256 snet.ip = iph->saddr.ip & svc->netmask;
257
258 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
259 "mnet %s\n",
260 IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port),
261 IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port),
262 IP_VS_DBG_ADDR(svc->af, &snet));
263
264 /*
265 * As far as we know, FTP is a very complicated network protocol, and
266 * it uses control connection and data connections. For active FTP,
267 * FTP server initialize data connection to the client, its source port
268 * is often 20. For passive FTP, FTP server tells the clients the port
269 * that it passively listens to, and the client issues the data
270 * connection. In the tunneling or direct routing mode, the load
271 * balancer is on the client-to-server half of connection, the port
272 * number is unknown to the load balancer. So, a conn template like
273 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
274 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
275 * is created for other persistent services.
276 */
277 {
278 int protocol = iph->protocol;
279 const union nf_inet_addr *vaddr = &iph->daddr;
280 __be16 vport = 0;
281
282 if (dst_port == svc->port) {
283 /* non-FTP template:
284 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
285 * FTP template:
286 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
287 */
288 if (svc->port != FTPPORT)
289 vport = dst_port;
290 } else {
291 /* Note: persistent fwmark-based services and
292 * persistent port zero service are handled here.
293 * fwmark template:
294 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
295 * port zero template:
296 * <protocol,caddr,0,vaddr,0,daddr,0>
297 */
298 if (svc->fwmark) {
299 protocol = IPPROTO_IP;
300 vaddr = &fwmark;
301 }
302 }
303 /* return *ignored = -1 so NF_DROP can be used */
304 if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
305 vaddr, vport, &param) < 0) {
306 *ignored = -1;
307 return NULL;
308 }
309 }
310
311 /* Check if a template already exists */
312 ct = ip_vs_ct_in_get(&param);
313 if (!ct || !ip_vs_check_template(ct)) {
314 struct ip_vs_scheduler *sched;
315
316 /*
317 * No template found or the dest of the connection
318 * template is not available.
319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */
321 sched = rcu_dereference(svc->scheduler);
322 if (sched) {
323 /* read svc->sched_data after svc->scheduler */
324 smp_rmb();
325 dest = sched->schedule(svc, skb, iph);
326 } else {
327 dest = NULL;
328 }
329 if (!dest) {
330 IP_VS_DBG(1, "p-schedule: no dest found.\n");
331 kfree(param.pe_data);
332 *ignored = 0;
333 return NULL;
334 }
335
336 if (dst_port == svc->port && svc->port != FTPPORT)
337 dport = dest->port;
338
339 /* Create a template
340 * This adds param.pe_data to the template,
341 * and thus param.pe_data will be destroyed
342 * when the template expires */
343 ct = ip_vs_conn_new(&param, dest->af, &dest->addr, dport,
344 IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
345 if (ct == NULL) {
346 kfree(param.pe_data);
347 *ignored = -1;
348 return NULL;
349 }
350
351 ct->timeout = svc->timeout;
352 } else {
353 /* set destination with the found template */
354 dest = ct->dest;
355 kfree(param.pe_data);
356 }
357
358 dport = dst_port;
359 if (dport == svc->port && dest->port)
360 dport = dest->port;
361
362 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
363 && iph->protocol == IPPROTO_UDP) ?
364 IP_VS_CONN_F_ONE_PACKET : 0;
365
366 /*
367 * Create a new connection according to the template
368 */
369 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
370 src_port, &iph->daddr, dst_port, &param);
371
372 cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
373 skb->mark);
374 if (cp == NULL) {
375 ip_vs_conn_put(ct);
376 *ignored = -1;
377 return NULL;
378 }
379
380 /*
381 * Add its control
382 */
383 ip_vs_control_add(cp, ct);
384 ip_vs_conn_put(ct);
385
386 ip_vs_conn_stats(cp, svc);
387 return cp;
388 }
389
390
391 /*
392 * IPVS main scheduling function
393 * It selects a server according to the virtual service, and
394 * creates a connection entry.
395 * Protocols supported: TCP, UDP
396 *
397 * Usage of *ignored
398 *
399 * 1 : protocol tried to schedule (eg. on SYN), found svc but the
400 * svc/scheduler decides that this packet should be accepted with
401 * NF_ACCEPT because it must not be scheduled.
402 *
403 * 0 : scheduler can not find destination, so try bypass or
404 * return ICMP and then NF_DROP (ip_vs_leave).
405 *
406 * -1 : scheduler tried to schedule but fatal error occurred, eg.
407 * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
408 * failure such as missing Call-ID, ENOMEM on skb_linearize
409 * or pe_data. In this case we should return NF_DROP without
410 * any attempts to send ICMP with ip_vs_leave.
411 */
412 struct ip_vs_conn *
413 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
414 struct ip_vs_proto_data *pd, int *ignored,
415 struct ip_vs_iphdr *iph)
416 {
417 struct ip_vs_protocol *pp = pd->pp;
418 struct ip_vs_conn *cp = NULL;
419 struct ip_vs_scheduler *sched;
420 struct ip_vs_dest *dest;
421 __be16 _ports[2], *pptr;
422 unsigned int flags;
423
424 *ignored = 1;
425 /*
426 * IPv6 frags, only the first hit here.
427 */
428 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
429 if (pptr == NULL)
430 return NULL;
431
432 /*
433 * FTPDATA needs this check when using local real server.
434 * Never schedule Active FTPDATA connections from real server.
435 * For LVS-NAT they must be already created. For other methods
436 * with persistence the connection is created on SYN+ACK.
437 */
438 if (pptr[0] == FTPDATA) {
439 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
440 "Not scheduling FTPDATA");
441 return NULL;
442 }
443
444 /*
445 * Do not schedule replies from local real server.
446 */
447 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
448 (cp = pp->conn_in_get(svc->af, skb, iph, 1))) {
449 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
450 "Not scheduling reply for existing connection");
451 __ip_vs_conn_put(cp);
452 return NULL;
453 }
454
455 /*
456 * Persistent service
457 */
458 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
459 return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored,
460 iph);
461
462 *ignored = 0;
463
464 /*
465 * Non-persistent service
466 */
467 if (!svc->fwmark && pptr[1] != svc->port) {
468 if (!svc->port)
469 pr_err("Schedule: port zero only supported "
470 "in persistent services, "
471 "check your ipvs configuration\n");
472 return NULL;
473 }
474
475 sched = rcu_dereference(svc->scheduler);
476 if (sched) {
477 /* read svc->sched_data after svc->scheduler */
478 smp_rmb();
479 dest = sched->schedule(svc, skb, iph);
480 } else {
481 dest = NULL;
482 }
483 if (dest == NULL) {
484 IP_VS_DBG(1, "Schedule: no dest found.\n");
485 return NULL;
486 }
487
488 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
489 && iph->protocol == IPPROTO_UDP) ?
490 IP_VS_CONN_F_ONE_PACKET : 0;
491
492 /*
493 * Create a connection entry.
494 */
495 {
496 struct ip_vs_conn_param p;
497
498 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
499 &iph->saddr, pptr[0], &iph->daddr,
500 pptr[1], &p);
501 cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
502 dest->port ? dest->port : pptr[1],
503 flags, dest, skb->mark);
504 if (!cp) {
505 *ignored = -1;
506 return NULL;
507 }
508 }
509
510 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
511 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
512 ip_vs_fwd_tag(cp),
513 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
514 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
515 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
516 cp->flags, atomic_read(&cp->refcnt));
517
518 ip_vs_conn_stats(cp, svc);
519 return cp;
520 }
521
522
523 /*
524 * Pass or drop the packet.
525 * Called by ip_vs_in, when the virtual service is available but
526 * no destination is available for a new connection.
527 */
528 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
529 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
530 {
531 __be16 _ports[2], *pptr;
532 #ifdef CONFIG_SYSCTL
533 struct net *net;
534 struct netns_ipvs *ipvs;
535 int unicast;
536 #endif
537
538 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
539 if (pptr == NULL) {
540 return NF_DROP;
541 }
542
543 #ifdef CONFIG_SYSCTL
544 net = skb_net(skb);
545
546 #ifdef CONFIG_IP_VS_IPV6
547 if (svc->af == AF_INET6)
548 unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST;
549 else
550 #endif
551 unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST);
552
553 /* if it is fwmark-based service, the cache_bypass sysctl is up
554 and the destination is a non-local unicast, then create
555 a cache_bypass connection entry */
556 ipvs = net_ipvs(net);
557 if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
558 int ret;
559 struct ip_vs_conn *cp;
560 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
561 iph->protocol == IPPROTO_UDP) ?
562 IP_VS_CONN_F_ONE_PACKET : 0;
563 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
564
565 /* create a new connection entry */
566 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
567 {
568 struct ip_vs_conn_param p;
569 ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
570 &iph->saddr, pptr[0],
571 &iph->daddr, pptr[1], &p);
572 cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
573 IP_VS_CONN_F_BYPASS | flags,
574 NULL, skb->mark);
575 if (!cp)
576 return NF_DROP;
577 }
578
579 /* statistics */
580 ip_vs_in_stats(cp, skb);
581
582 /* set state */
583 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
584
585 /* transmit the first SYN packet */
586 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
587 /* do not touch skb anymore */
588
589 atomic_inc(&cp->in_pkts);
590 ip_vs_conn_put(cp);
591 return ret;
592 }
593 #endif
594
595 /*
596 * When the virtual ftp service is presented, packets destined
597 * for other services on the VIP may get here (except services
598 * listed in the ipvs table), pass the packets, because it is
599 * not ipvs job to decide to drop the packets.
600 */
601 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT))
602 return NF_ACCEPT;
603
604 /*
605 * Notify the client that the destination is unreachable, and
606 * release the socket buffer.
607 * Since it is in IP layer, the TCP socket is not actually
608 * created, the TCP RST packet cannot be sent, instead that
609 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
610 */
611 #ifdef CONFIG_IP_VS_IPV6
612 if (svc->af == AF_INET6) {
613 if (!skb->dev) {
614 struct net *net_ = dev_net(skb_dst(skb)->dev);
615
616 skb->dev = net_->loopback_dev;
617 }
618 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
619 } else
620 #endif
621 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
622
623 return NF_DROP;
624 }
625
626 #ifdef CONFIG_SYSCTL
627
628 static int sysctl_snat_reroute(struct sk_buff *skb)
629 {
630 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
631 return ipvs->sysctl_snat_reroute;
632 }
633
634 static int sysctl_nat_icmp_send(struct net *net)
635 {
636 struct netns_ipvs *ipvs = net_ipvs(net);
637 return ipvs->sysctl_nat_icmp_send;
638 }
639
640 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
641 {
642 return ipvs->sysctl_expire_nodest_conn;
643 }
644
645 #else
646
647 static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
648 static int sysctl_nat_icmp_send(struct net *net) { return 0; }
649 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
650
651 #endif
652
653 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
654 {
655 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
656 }
657
658 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
659 {
660 if (NF_INET_LOCAL_IN == hooknum)
661 return IP_DEFRAG_VS_IN;
662 if (NF_INET_FORWARD == hooknum)
663 return IP_DEFRAG_VS_FWD;
664 return IP_DEFRAG_VS_OUT;
665 }
666
667 static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
668 {
669 int err;
670
671 local_bh_disable();
672 err = ip_defrag(skb, user);
673 local_bh_enable();
674 if (!err)
675 ip_send_check(ip_hdr(skb));
676
677 return err;
678 }
679
680 static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
681 unsigned int hooknum)
682 {
683 if (!sysctl_snat_reroute(skb))
684 return 0;
685 /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
686 if (NF_INET_LOCAL_IN == hooknum)
687 return 0;
688 #ifdef CONFIG_IP_VS_IPV6
689 if (af == AF_INET6) {
690 struct dst_entry *dst = skb_dst(skb);
691
692 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
693 ip6_route_me_harder(skb) != 0)
694 return 1;
695 } else
696 #endif
697 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
698 ip_route_me_harder(skb, RTN_LOCAL) != 0)
699 return 1;
700
701 return 0;
702 }
703
704 /*
705 * Packet has been made sufficiently writable in caller
706 * - inout: 1=in->out, 0=out->in
707 */
708 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
709 struct ip_vs_conn *cp, int inout)
710 {
711 struct iphdr *iph = ip_hdr(skb);
712 unsigned int icmp_offset = iph->ihl*4;
713 struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) +
714 icmp_offset);
715 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
716
717 if (inout) {
718 iph->saddr = cp->vaddr.ip;
719 ip_send_check(iph);
720 ciph->daddr = cp->vaddr.ip;
721 ip_send_check(ciph);
722 } else {
723 iph->daddr = cp->daddr.ip;
724 ip_send_check(iph);
725 ciph->saddr = cp->daddr.ip;
726 ip_send_check(ciph);
727 }
728
729 /* the TCP/UDP/SCTP port */
730 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
731 IPPROTO_SCTP == ciph->protocol) {
732 __be16 *ports = (void *)ciph + ciph->ihl*4;
733
734 if (inout)
735 ports[1] = cp->vport;
736 else
737 ports[0] = cp->dport;
738 }
739
740 /* And finally the ICMP checksum */
741 icmph->checksum = 0;
742 icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
743 skb->ip_summed = CHECKSUM_UNNECESSARY;
744
745 if (inout)
746 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
747 "Forwarding altered outgoing ICMP");
748 else
749 IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
750 "Forwarding altered incoming ICMP");
751 }
752
753 #ifdef CONFIG_IP_VS_IPV6
754 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
755 struct ip_vs_conn *cp, int inout)
756 {
757 struct ipv6hdr *iph = ipv6_hdr(skb);
758 unsigned int icmp_offset = 0;
759 unsigned int offs = 0; /* header offset*/
760 int protocol;
761 struct icmp6hdr *icmph;
762 struct ipv6hdr *ciph;
763 unsigned short fragoffs;
764
765 ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
766 icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
767 offs = icmp_offset + sizeof(struct icmp6hdr);
768 ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
769
770 protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
771
772 if (inout) {
773 iph->saddr = cp->vaddr.in6;
774 ciph->daddr = cp->vaddr.in6;
775 } else {
776 iph->daddr = cp->daddr.in6;
777 ciph->saddr = cp->daddr.in6;
778 }
779
780 /* the TCP/UDP/SCTP port */
781 if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
782 IPPROTO_SCTP == protocol)) {
783 __be16 *ports = (void *)(skb_network_header(skb) + offs);
784
785 IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
786 ntohs(inout ? ports[1] : ports[0]),
787 ntohs(inout ? cp->vport : cp->dport));
788 if (inout)
789 ports[1] = cp->vport;
790 else
791 ports[0] = cp->dport;
792 }
793
794 /* And finally the ICMP checksum */
795 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
796 skb->len - icmp_offset,
797 IPPROTO_ICMPV6, 0);
798 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
799 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
800 skb->ip_summed = CHECKSUM_PARTIAL;
801
802 if (inout)
803 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
804 (void *)ciph - (void *)iph,
805 "Forwarding altered outgoing ICMPv6");
806 else
807 IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
808 (void *)ciph - (void *)iph,
809 "Forwarding altered incoming ICMPv6");
810 }
811 #endif
812
813 /* Handle relevant response ICMP messages - forward to the right
814 * destination host.
815 */
816 static int handle_response_icmp(int af, struct sk_buff *skb,
817 union nf_inet_addr *snet,
818 __u8 protocol, struct ip_vs_conn *cp,
819 struct ip_vs_protocol *pp,
820 unsigned int offset, unsigned int ihl,
821 unsigned int hooknum)
822 {
823 unsigned int verdict = NF_DROP;
824
825 if (IP_VS_FWD_METHOD(cp) != 0) {
826 pr_err("shouldn't reach here, because the box is on the "
827 "half connection in the tun/dr module.\n");
828 }
829
830 /* Ensure the checksum is correct */
831 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
832 /* Failed checksum! */
833 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
834 IP_VS_DBG_ADDR(af, snet));
835 goto out;
836 }
837
838 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
839 IPPROTO_SCTP == protocol)
840 offset += 2 * sizeof(__u16);
841 if (!skb_make_writable(skb, offset))
842 goto out;
843
844 #ifdef CONFIG_IP_VS_IPV6
845 if (af == AF_INET6)
846 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
847 else
848 #endif
849 ip_vs_nat_icmp(skb, pp, cp, 1);
850
851 if (ip_vs_route_me_harder(af, skb, hooknum))
852 goto out;
853
854 /* do the statistics and put it back */
855 ip_vs_out_stats(cp, skb);
856
857 skb->ipvs_property = 1;
858 if (!(cp->flags & IP_VS_CONN_F_NFCT))
859 ip_vs_notrack(skb);
860 else
861 ip_vs_update_conntrack(skb, cp, 0);
862 verdict = NF_ACCEPT;
863
864 out:
865 __ip_vs_conn_put(cp);
866
867 return verdict;
868 }
869
870 /*
871 * Handle ICMP messages in the inside-to-outside direction (outgoing).
872 * Find any that might be relevant, check against existing connections.
873 * Currently handles error types - unreachable, quench, ttl exceeded.
874 */
875 static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
876 unsigned int hooknum)
877 {
878 struct iphdr *iph;
879 struct icmphdr _icmph, *ic;
880 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
881 struct ip_vs_iphdr ciph;
882 struct ip_vs_conn *cp;
883 struct ip_vs_protocol *pp;
884 unsigned int offset, ihl;
885 union nf_inet_addr snet;
886
887 *related = 1;
888
889 /* reassemble IP fragments */
890 if (ip_is_fragment(ip_hdr(skb))) {
891 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
892 return NF_STOLEN;
893 }
894
895 iph = ip_hdr(skb);
896 offset = ihl = iph->ihl * 4;
897 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
898 if (ic == NULL)
899 return NF_DROP;
900
901 IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
902 ic->type, ntohs(icmp_id(ic)),
903 &iph->saddr, &iph->daddr);
904
905 /*
906 * Work through seeing if this is for us.
907 * These checks are supposed to be in an order that means easy
908 * things are checked first to speed up processing.... however
909 * this means that some packets will manage to get a long way
910 * down this stack and then be rejected, but that's life.
911 */
912 if ((ic->type != ICMP_DEST_UNREACH) &&
913 (ic->type != ICMP_SOURCE_QUENCH) &&
914 (ic->type != ICMP_TIME_EXCEEDED)) {
915 *related = 0;
916 return NF_ACCEPT;
917 }
918
919 /* Now find the contained IP header */
920 offset += sizeof(_icmph);
921 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
922 if (cih == NULL)
923 return NF_ACCEPT; /* The packet looks wrong, ignore */
924
925 pp = ip_vs_proto_get(cih->protocol);
926 if (!pp)
927 return NF_ACCEPT;
928
929 /* Is the embedded protocol header present? */
930 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
931 pp->dont_defrag))
932 return NF_ACCEPT;
933
934 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
935 "Checking outgoing ICMP for");
936
937 ip_vs_fill_ip4hdr(cih, &ciph);
938 ciph.len += offset;
939 /* The embedded headers contain source and dest in reverse order */
940 cp = pp->conn_out_get(AF_INET, skb, &ciph, 1);
941 if (!cp)
942 return NF_ACCEPT;
943
944 snet.ip = iph->saddr;
945 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
946 pp, ciph.len, ihl, hooknum);
947 }
948
949 #ifdef CONFIG_IP_VS_IPV6
950 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
951 unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
952 {
953 struct icmp6hdr _icmph, *ic;
954 struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */
955 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
956 struct ip_vs_conn *cp;
957 struct ip_vs_protocol *pp;
958 union nf_inet_addr snet;
959 unsigned int writable;
960
961 *related = 1;
962 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
963 if (ic == NULL)
964 return NF_DROP;
965
966 /*
967 * Work through seeing if this is for us.
968 * These checks are supposed to be in an order that means easy
969 * things are checked first to speed up processing.... however
970 * this means that some packets will manage to get a long way
971 * down this stack and then be rejected, but that's life.
972 */
973 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
974 *related = 0;
975 return NF_ACCEPT;
976 }
977 /* Fragment header that is before ICMP header tells us that:
978 * it's not an error message since they can't be fragmented.
979 */
980 if (ipvsh->flags & IP6_FH_F_FRAG)
981 return NF_DROP;
982
983 IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
984 ic->icmp6_type, ntohs(icmpv6_id(ic)),
985 &ipvsh->saddr, &ipvsh->daddr);
986
987 /* Now find the contained IP header */
988 ciph.len = ipvsh->len + sizeof(_icmph);
989 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
990 if (ip6h == NULL)
991 return NF_ACCEPT; /* The packet looks wrong, ignore */
992 ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */
993 ciph.daddr.in6 = ip6h->daddr;
994 /* skip possible IPv6 exthdrs of contained IPv6 packet */
995 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
996 if (ciph.protocol < 0)
997 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
998
999 pp = ip_vs_proto_get(ciph.protocol);
1000 if (!pp)
1001 return NF_ACCEPT;
1002
1003 /* The embedded headers contain source and dest in reverse order */
1004 cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1);
1005 if (!cp)
1006 return NF_ACCEPT;
1007
1008 snet.in6 = ciph.saddr.in6;
1009 writable = ciph.len;
1010 return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
1011 pp, writable, sizeof(struct ipv6hdr),
1012 hooknum);
1013 }
1014 #endif
1015
1016 /*
1017 * Check if sctp chunc is ABORT chunk
1018 */
1019 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
1020 {
1021 sctp_chunkhdr_t *sch, schunk;
1022 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
1023 sizeof(schunk), &schunk);
1024 if (sch == NULL)
1025 return 0;
1026 if (sch->type == SCTP_CID_ABORT)
1027 return 1;
1028 return 0;
1029 }
1030
1031 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1032 {
1033 struct tcphdr _tcph, *th;
1034
1035 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1036 if (th == NULL)
1037 return 0;
1038 return th->rst;
1039 }
1040
1041 static inline bool is_new_conn(const struct sk_buff *skb,
1042 struct ip_vs_iphdr *iph)
1043 {
1044 switch (iph->protocol) {
1045 case IPPROTO_TCP: {
1046 struct tcphdr _tcph, *th;
1047
1048 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1049 if (th == NULL)
1050 return false;
1051 return th->syn;
1052 }
1053 case IPPROTO_SCTP: {
1054 sctp_chunkhdr_t *sch, schunk;
1055
1056 sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1057 sizeof(schunk), &schunk);
1058 if (sch == NULL)
1059 return false;
1060 return sch->type == SCTP_CID_INIT;
1061 }
1062 default:
1063 return false;
1064 }
1065 }
1066
1067 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
1068 int conn_reuse_mode)
1069 {
1070 /* Controlled (FTP DATA or persistence)? */
1071 if (cp->control)
1072 return false;
1073
1074 switch (cp->protocol) {
1075 case IPPROTO_TCP:
1076 return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
1077 ((conn_reuse_mode & 2) &&
1078 (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
1079 (cp->flags & IP_VS_CONN_F_NOOUTPUT));
1080 case IPPROTO_SCTP:
1081 return cp->state == IP_VS_SCTP_S_CLOSED;
1082 default:
1083 return false;
1084 }
1085 }
1086
1087 /* Handle response packets: rewrite addresses and send away...
1088 */
1089 static unsigned int
1090 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1091 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1092 unsigned int hooknum)
1093 {
1094 struct ip_vs_protocol *pp = pd->pp;
1095
1096 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
1097
1098 if (!skb_make_writable(skb, iph->len))
1099 goto drop;
1100
1101 /* mangle the packet */
1102 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1103 goto drop;
1104
1105 #ifdef CONFIG_IP_VS_IPV6
1106 if (af == AF_INET6)
1107 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1108 else
1109 #endif
1110 {
1111 ip_hdr(skb)->saddr = cp->vaddr.ip;
1112 ip_send_check(ip_hdr(skb));
1113 }
1114
1115 /*
1116 * nf_iterate does not expect change in the skb->dst->dev.
1117 * It looks like it is not fatal to enable this code for hooks
1118 * where our handlers are at the end of the chain list and
1119 * when all next handlers use skb->dst->dev and not outdev.
1120 * It will definitely route properly the inout NAT traffic
1121 * when multiple paths are used.
1122 */
1123
1124 /* For policy routing, packets originating from this
1125 * machine itself may be routed differently to packets
1126 * passing through. We want this packet to be routed as
1127 * if it came from this machine itself. So re-compute
1128 * the routing information.
1129 */
1130 if (ip_vs_route_me_harder(af, skb, hooknum))
1131 goto drop;
1132
1133 IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
1134
1135 ip_vs_out_stats(cp, skb);
1136 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1137 skb->ipvs_property = 1;
1138 if (!(cp->flags & IP_VS_CONN_F_NFCT))
1139 ip_vs_notrack(skb);
1140 else
1141 ip_vs_update_conntrack(skb, cp, 0);
1142 ip_vs_conn_put(cp);
1143
1144 LeaveFunction(11);
1145 return NF_ACCEPT;
1146
1147 drop:
1148 ip_vs_conn_put(cp);
1149 kfree_skb(skb);
1150 LeaveFunction(11);
1151 return NF_STOLEN;
1152 }
1153
1154 /*
1155 * Check if outgoing packet belongs to the established ip_vs_conn.
1156 */
1157 static unsigned int
1158 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1159 {
1160 struct net *net = NULL;
1161 struct ip_vs_iphdr iph;
1162 struct ip_vs_protocol *pp;
1163 struct ip_vs_proto_data *pd;
1164 struct ip_vs_conn *cp;
1165
1166 EnterFunction(11);
1167
1168 /* Already marked as IPVS request or reply? */
1169 if (skb->ipvs_property)
1170 return NF_ACCEPT;
1171
1172 /* Bad... Do not break raw sockets */
1173 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1174 af == AF_INET)) {
1175 struct sock *sk = skb->sk;
1176 struct inet_sock *inet = inet_sk(skb->sk);
1177
1178 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1179 return NF_ACCEPT;
1180 }
1181
1182 if (unlikely(!skb_dst(skb)))
1183 return NF_ACCEPT;
1184
1185 net = skb_net(skb);
1186 if (!net_ipvs(net)->enable)
1187 return NF_ACCEPT;
1188
1189 ip_vs_fill_iph_skb(af, skb, &iph);
1190 #ifdef CONFIG_IP_VS_IPV6
1191 if (af == AF_INET6) {
1192 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1193 int related;
1194 int verdict = ip_vs_out_icmp_v6(skb, &related,
1195 hooknum, &iph);
1196
1197 if (related)
1198 return verdict;
1199 }
1200 } else
1201 #endif
1202 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1203 int related;
1204 int verdict = ip_vs_out_icmp(skb, &related, hooknum);
1205
1206 if (related)
1207 return verdict;
1208 }
1209
1210 pd = ip_vs_proto_data_get(net, iph.protocol);
1211 if (unlikely(!pd))
1212 return NF_ACCEPT;
1213 pp = pd->pp;
1214
1215 /* reassemble IP fragments */
1216 #ifdef CONFIG_IP_VS_IPV6
1217 if (af == AF_INET)
1218 #endif
1219 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1220 if (ip_vs_gather_frags(skb,
1221 ip_vs_defrag_user(hooknum)))
1222 return NF_STOLEN;
1223
1224 ip_vs_fill_ip4hdr(skb_network_header(skb), &iph);
1225 }
1226
1227 /*
1228 * Check if the packet belongs to an existing entry
1229 */
1230 cp = pp->conn_out_get(af, skb, &iph, 0);
1231
1232 if (likely(cp))
1233 return handle_response(af, skb, pd, cp, &iph, hooknum);
1234 if (sysctl_nat_icmp_send(net) &&
1235 (pp->protocol == IPPROTO_TCP ||
1236 pp->protocol == IPPROTO_UDP ||
1237 pp->protocol == IPPROTO_SCTP)) {
1238 __be16 _ports[2], *pptr;
1239
1240 pptr = frag_safe_skb_hp(skb, iph.len,
1241 sizeof(_ports), _ports, &iph);
1242 if (pptr == NULL)
1243 return NF_ACCEPT; /* Not for me */
1244 if (ip_vs_has_real_service(net, af, iph.protocol, &iph.saddr,
1245 pptr[0])) {
1246 /*
1247 * Notify the real server: there is no
1248 * existing entry if it is not RST
1249 * packet or not TCP packet.
1250 */
1251 if ((iph.protocol != IPPROTO_TCP &&
1252 iph.protocol != IPPROTO_SCTP)
1253 || ((iph.protocol == IPPROTO_TCP
1254 && !is_tcp_reset(skb, iph.len))
1255 || (iph.protocol == IPPROTO_SCTP
1256 && !is_sctp_abort(skb,
1257 iph.len)))) {
1258 #ifdef CONFIG_IP_VS_IPV6
1259 if (af == AF_INET6) {
1260 if (!skb->dev)
1261 skb->dev = net->loopback_dev;
1262 icmpv6_send(skb,
1263 ICMPV6_DEST_UNREACH,
1264 ICMPV6_PORT_UNREACH,
1265 0);
1266 } else
1267 #endif
1268 icmp_send(skb,
1269 ICMP_DEST_UNREACH,
1270 ICMP_PORT_UNREACH, 0);
1271 return NF_DROP;
1272 }
1273 }
1274 }
1275 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1276 "ip_vs_out: packet continues traversal as normal");
1277 return NF_ACCEPT;
1278 }
1279
1280 /*
1281 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1282 * used only for VS/NAT.
1283 * Check if packet is reply for established ip_vs_conn.
1284 */
1285 static unsigned int
1286 ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1287 const struct nf_hook_state *state)
1288 {
1289 return ip_vs_out(ops->hooknum, skb, AF_INET);
1290 }
1291
1292 /*
1293 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1294 * Check if packet is reply for established ip_vs_conn.
1295 */
1296 static unsigned int
1297 ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1298 const struct nf_hook_state *state)
1299 {
1300 return ip_vs_out(ops->hooknum, skb, AF_INET);
1301 }
1302
1303 #ifdef CONFIG_IP_VS_IPV6
1304
1305 /*
1306 * It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1307 * used only for VS/NAT.
1308 * Check if packet is reply for established ip_vs_conn.
1309 */
1310 static unsigned int
1311 ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1312 const struct nf_hook_state *state)
1313 {
1314 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1315 }
1316
1317 /*
1318 * It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1319 * Check if packet is reply for established ip_vs_conn.
1320 */
1321 static unsigned int
1322 ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1323 const struct nf_hook_state *state)
1324 {
1325 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1326 }
1327
1328 #endif
1329
1330 /*
1331 * Handle ICMP messages in the outside-to-inside direction (incoming).
1332 * Find any that might be relevant, check against existing connections,
1333 * forward to the right destination host if relevant.
1334 * Currently handles error types - unreachable, quench, ttl exceeded.
1335 */
1336 static int
1337 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1338 {
1339 struct net *net = NULL;
1340 struct iphdr *iph;
1341 struct icmphdr _icmph, *ic;
1342 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1343 struct ip_vs_iphdr ciph;
1344 struct ip_vs_conn *cp;
1345 struct ip_vs_protocol *pp;
1346 struct ip_vs_proto_data *pd;
1347 unsigned int offset, offset2, ihl, verdict;
1348 bool ipip;
1349
1350 *related = 1;
1351
1352 /* reassemble IP fragments */
1353 if (ip_is_fragment(ip_hdr(skb))) {
1354 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
1355 return NF_STOLEN;
1356 }
1357
1358 iph = ip_hdr(skb);
1359 offset = ihl = iph->ihl * 4;
1360 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1361 if (ic == NULL)
1362 return NF_DROP;
1363
1364 IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1365 ic->type, ntohs(icmp_id(ic)),
1366 &iph->saddr, &iph->daddr);
1367
1368 /*
1369 * Work through seeing if this is for us.
1370 * These checks are supposed to be in an order that means easy
1371 * things are checked first to speed up processing.... however
1372 * this means that some packets will manage to get a long way
1373 * down this stack and then be rejected, but that's life.
1374 */
1375 if ((ic->type != ICMP_DEST_UNREACH) &&
1376 (ic->type != ICMP_SOURCE_QUENCH) &&
1377 (ic->type != ICMP_TIME_EXCEEDED)) {
1378 *related = 0;
1379 return NF_ACCEPT;
1380 }
1381
1382 /* Now find the contained IP header */
1383 offset += sizeof(_icmph);
1384 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1385 if (cih == NULL)
1386 return NF_ACCEPT; /* The packet looks wrong, ignore */
1387
1388 net = skb_net(skb);
1389
1390 /* Special case for errors for IPIP packets */
1391 ipip = false;
1392 if (cih->protocol == IPPROTO_IPIP) {
1393 if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1394 return NF_ACCEPT;
1395 /* Error for our IPIP must arrive at LOCAL_IN */
1396 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1397 return NF_ACCEPT;
1398 offset += cih->ihl * 4;
1399 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1400 if (cih == NULL)
1401 return NF_ACCEPT; /* The packet looks wrong, ignore */
1402 ipip = true;
1403 }
1404
1405 pd = ip_vs_proto_data_get(net, cih->protocol);
1406 if (!pd)
1407 return NF_ACCEPT;
1408 pp = pd->pp;
1409
1410 /* Is the embedded protocol header present? */
1411 if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1412 pp->dont_defrag))
1413 return NF_ACCEPT;
1414
1415 IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1416 "Checking incoming ICMP for");
1417
1418 offset2 = offset;
1419 ip_vs_fill_ip4hdr(cih, &ciph);
1420 ciph.len += offset;
1421 offset = ciph.len;
1422 /* The embedded headers contain source and dest in reverse order.
1423 * For IPIP this is error for request, not for reply.
1424 */
1425 cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1);
1426 if (!cp)
1427 return NF_ACCEPT;
1428
1429 verdict = NF_DROP;
1430
1431 /* Ensure the checksum is correct */
1432 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1433 /* Failed checksum! */
1434 IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1435 &iph->saddr);
1436 goto out;
1437 }
1438
1439 if (ipip) {
1440 __be32 info = ic->un.gateway;
1441 __u8 type = ic->type;
1442 __u8 code = ic->code;
1443
1444 /* Update the MTU */
1445 if (ic->type == ICMP_DEST_UNREACH &&
1446 ic->code == ICMP_FRAG_NEEDED) {
1447 struct ip_vs_dest *dest = cp->dest;
1448 u32 mtu = ntohs(ic->un.frag.mtu);
1449 __be16 frag_off = cih->frag_off;
1450
1451 /* Strip outer IP and ICMP, go to IPIP header */
1452 if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1453 goto ignore_ipip;
1454 offset2 -= ihl + sizeof(_icmph);
1455 skb_reset_network_header(skb);
1456 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1457 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1458 ipv4_update_pmtu(skb, dev_net(skb->dev),
1459 mtu, 0, 0, 0, 0);
1460 /* Client uses PMTUD? */
1461 if (!(frag_off & htons(IP_DF)))
1462 goto ignore_ipip;
1463 /* Prefer the resulting PMTU */
1464 if (dest) {
1465 struct ip_vs_dest_dst *dest_dst;
1466
1467 rcu_read_lock();
1468 dest_dst = rcu_dereference(dest->dest_dst);
1469 if (dest_dst)
1470 mtu = dst_mtu(dest_dst->dst_cache);
1471 rcu_read_unlock();
1472 }
1473 if (mtu > 68 + sizeof(struct iphdr))
1474 mtu -= sizeof(struct iphdr);
1475 info = htonl(mtu);
1476 }
1477 /* Strip outer IP, ICMP and IPIP, go to IP header of
1478 * original request.
1479 */
1480 if (pskb_pull(skb, offset2) == NULL)
1481 goto ignore_ipip;
1482 skb_reset_network_header(skb);
1483 IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1484 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1485 type, code, ntohl(info));
1486 icmp_send(skb, type, code, info);
1487 /* ICMP can be shorter but anyways, account it */
1488 ip_vs_out_stats(cp, skb);
1489
1490 ignore_ipip:
1491 consume_skb(skb);
1492 verdict = NF_STOLEN;
1493 goto out;
1494 }
1495
1496 /* do the statistics and put it back */
1497 ip_vs_in_stats(cp, skb);
1498 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1499 IPPROTO_SCTP == cih->protocol)
1500 offset += 2 * sizeof(__u16);
1501 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1502
1503 out:
1504 __ip_vs_conn_put(cp);
1505
1506 return verdict;
1507 }
1508
1509 #ifdef CONFIG_IP_VS_IPV6
1510 static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
1511 unsigned int hooknum, struct ip_vs_iphdr *iph)
1512 {
1513 struct net *net = NULL;
1514 struct ipv6hdr _ip6h, *ip6h;
1515 struct icmp6hdr _icmph, *ic;
1516 struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1517 struct ip_vs_conn *cp;
1518 struct ip_vs_protocol *pp;
1519 struct ip_vs_proto_data *pd;
1520 unsigned int offs_ciph, writable, verdict;
1521
1522 *related = 1;
1523
1524 ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1525 if (ic == NULL)
1526 return NF_DROP;
1527
1528 /*
1529 * Work through seeing if this is for us.
1530 * These checks are supposed to be in an order that means easy
1531 * things are checked first to speed up processing.... however
1532 * this means that some packets will manage to get a long way
1533 * down this stack and then be rejected, but that's life.
1534 */
1535 if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1536 *related = 0;
1537 return NF_ACCEPT;
1538 }
1539 /* Fragment header that is before ICMP header tells us that:
1540 * it's not an error message since they can't be fragmented.
1541 */
1542 if (iph->flags & IP6_FH_F_FRAG)
1543 return NF_DROP;
1544
1545 IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1546 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1547 &iph->saddr, &iph->daddr);
1548
1549 /* Now find the contained IP header */
1550 ciph.len = iph->len + sizeof(_icmph);
1551 offs_ciph = ciph.len; /* Save ip header offset */
1552 ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h);
1553 if (ip6h == NULL)
1554 return NF_ACCEPT; /* The packet looks wrong, ignore */
1555 ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */
1556 ciph.daddr.in6 = ip6h->daddr;
1557 /* skip possible IPv6 exthdrs of contained IPv6 packet */
1558 ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL);
1559 if (ciph.protocol < 0)
1560 return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */
1561
1562 net = skb_net(skb);
1563 pd = ip_vs_proto_data_get(net, ciph.protocol);
1564 if (!pd)
1565 return NF_ACCEPT;
1566 pp = pd->pp;
1567
1568 /* Cannot handle fragmented embedded protocol */
1569 if (ciph.fragoffs)
1570 return NF_ACCEPT;
1571
1572 IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph,
1573 "Checking incoming ICMPv6 for");
1574
1575 /* The embedded headers contain source and dest in reverse order
1576 * if not from localhost
1577 */
1578 cp = pp->conn_in_get(AF_INET6, skb, &ciph,
1579 (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1);
1580
1581 if (!cp)
1582 return NF_ACCEPT;
1583 /* VS/TUN, VS/DR and LOCALNODE just let it go */
1584 if ((hooknum == NF_INET_LOCAL_OUT) &&
1585 (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1586 __ip_vs_conn_put(cp);
1587 return NF_ACCEPT;
1588 }
1589
1590 /* do the statistics and put it back */
1591 ip_vs_in_stats(cp, skb);
1592
1593 /* Need to mangle contained IPv6 header in ICMPv6 packet */
1594 writable = ciph.len;
1595 if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1596 IPPROTO_SCTP == ciph.protocol)
1597 writable += 2 * sizeof(__u16); /* Also mangle ports */
1598
1599 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph);
1600
1601 __ip_vs_conn_put(cp);
1602
1603 return verdict;
1604 }
1605 #endif
1606
1607
1608 /*
1609 * Check if it's for virtual services, look it up,
1610 * and send it on its way...
1611 */
1612 static unsigned int
1613 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1614 {
1615 struct net *net;
1616 struct ip_vs_iphdr iph;
1617 struct ip_vs_protocol *pp;
1618 struct ip_vs_proto_data *pd;
1619 struct ip_vs_conn *cp;
1620 int ret, pkts;
1621 struct netns_ipvs *ipvs;
1622 int conn_reuse_mode;
1623
1624 /* Already marked as IPVS request or reply? */
1625 if (skb->ipvs_property)
1626 return NF_ACCEPT;
1627
1628 /*
1629 * Big tappo:
1630 * - remote client: only PACKET_HOST
1631 * - route: used for struct net when skb->dev is unset
1632 */
1633 if (unlikely((skb->pkt_type != PACKET_HOST &&
1634 hooknum != NF_INET_LOCAL_OUT) ||
1635 !skb_dst(skb))) {
1636 ip_vs_fill_iph_skb(af, skb, &iph);
1637 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1638 " ignored in hook %u\n",
1639 skb->pkt_type, iph.protocol,
1640 IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1641 return NF_ACCEPT;
1642 }
1643 /* ipvs enabled in this netns ? */
1644 net = skb_net(skb);
1645 ipvs = net_ipvs(net);
1646 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1647 return NF_ACCEPT;
1648
1649 ip_vs_fill_iph_skb(af, skb, &iph);
1650
1651 /* Bad... Do not break raw sockets */
1652 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
1653 af == AF_INET)) {
1654 struct sock *sk = skb->sk;
1655 struct inet_sock *inet = inet_sk(skb->sk);
1656
1657 if (inet && sk->sk_family == PF_INET && inet->nodefrag)
1658 return NF_ACCEPT;
1659 }
1660
1661 #ifdef CONFIG_IP_VS_IPV6
1662 if (af == AF_INET6) {
1663 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1664 int related;
1665 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
1666 &iph);
1667
1668 if (related)
1669 return verdict;
1670 }
1671 } else
1672 #endif
1673 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1674 int related;
1675 int verdict = ip_vs_in_icmp(skb, &related, hooknum);
1676
1677 if (related)
1678 return verdict;
1679 }
1680
1681 /* Protocol supported? */
1682 pd = ip_vs_proto_data_get(net, iph.protocol);
1683 if (unlikely(!pd))
1684 return NF_ACCEPT;
1685 pp = pd->pp;
1686 /*
1687 * Check if the packet belongs to an existing connection entry
1688 */
1689 cp = pp->conn_in_get(af, skb, &iph, 0);
1690
1691 conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
1692 if (conn_reuse_mode && !iph.fragoffs &&
1693 is_new_conn(skb, &iph) && cp &&
1694 ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
1695 unlikely(!atomic_read(&cp->dest->weight))) ||
1696 unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
1697 if (!atomic_read(&cp->n_control))
1698 ip_vs_conn_expire_now(cp);
1699 __ip_vs_conn_put(cp);
1700 cp = NULL;
1701 }
1702
1703 if (unlikely(!cp) && !iph.fragoffs) {
1704 /* No (second) fragments need to enter here, as nf_defrag_ipv6
1705 * replayed fragment zero will already have created the cp
1706 */
1707 int v;
1708
1709 /* Schedule and create new connection entry into &cp */
1710 if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph))
1711 return v;
1712 }
1713
1714 if (unlikely(!cp)) {
1715 /* sorry, all this trouble for a no-hit :) */
1716 IP_VS_DBG_PKT(12, af, pp, skb, 0,
1717 "ip_vs_in: packet continues traversal as normal");
1718 if (iph.fragoffs) {
1719 /* Fragment that couldn't be mapped to a conn entry
1720 * is missing module nf_defrag_ipv6
1721 */
1722 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1723 IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment");
1724 }
1725 return NF_ACCEPT;
1726 }
1727
1728 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
1729 /* Check the server status */
1730 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1731 /* the destination server is not available */
1732
1733 if (sysctl_expire_nodest_conn(ipvs)) {
1734 /* try to expire the connection immediately */
1735 ip_vs_conn_expire_now(cp);
1736 }
1737 /* don't restart its timer, and silently
1738 drop the packet. */
1739 __ip_vs_conn_put(cp);
1740 return NF_DROP;
1741 }
1742
1743 ip_vs_in_stats(cp, skb);
1744 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1745 if (cp->packet_xmit)
1746 ret = cp->packet_xmit(skb, cp, pp, &iph);
1747 /* do not touch skb anymore */
1748 else {
1749 IP_VS_DBG_RL("warning: packet_xmit is null");
1750 ret = NF_ACCEPT;
1751 }
1752
1753 /* Increase its packet counter and check if it is needed
1754 * to be synchronized
1755 *
1756 * Sync connection if it is about to close to
1757 * encorage the standby servers to update the connections timeout
1758 *
1759 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1760 */
1761
1762 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1763 pkts = sysctl_sync_threshold(ipvs);
1764 else
1765 pkts = atomic_add_return(1, &cp->in_pkts);
1766
1767 if (ipvs->sync_state & IP_VS_STATE_MASTER)
1768 ip_vs_sync_conn(net, cp, pkts);
1769
1770 ip_vs_conn_put(cp);
1771 return ret;
1772 }
1773
1774 /*
1775 * AF_INET handler in NF_INET_LOCAL_IN chain
1776 * Schedule and forward packets from remote clients
1777 */
1778 static unsigned int
1779 ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1780 const struct nf_hook_state *state)
1781 {
1782 return ip_vs_in(ops->hooknum, skb, AF_INET);
1783 }
1784
1785 /*
1786 * AF_INET handler in NF_INET_LOCAL_OUT chain
1787 * Schedule and forward packets from local clients
1788 */
1789 static unsigned int
1790 ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1791 const struct nf_hook_state *state)
1792 {
1793 return ip_vs_in(ops->hooknum, skb, AF_INET);
1794 }
1795
1796 #ifdef CONFIG_IP_VS_IPV6
1797
1798 /*
1799 * AF_INET6 handler in NF_INET_LOCAL_IN chain
1800 * Schedule and forward packets from remote clients
1801 */
1802 static unsigned int
1803 ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1804 const struct nf_hook_state *state)
1805 {
1806 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1807 }
1808
1809 /*
1810 * AF_INET6 handler in NF_INET_LOCAL_OUT chain
1811 * Schedule and forward packets from local clients
1812 */
1813 static unsigned int
1814 ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1815 const struct nf_hook_state *state)
1816 {
1817 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1818 }
1819
1820 #endif
1821
1822
1823 /*
1824 * It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1825 * related packets destined for 0.0.0.0/0.
1826 * When fwmark-based virtual service is used, such as transparent
1827 * cache cluster, TCP packets can be marked and routed to ip_vs_in,
1828 * but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1829 * sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1830 * and send them to ip_vs_in_icmp.
1831 */
1832 static unsigned int
1833 ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
1834 const struct nf_hook_state *state)
1835 {
1836 int r;
1837 struct net *net;
1838 struct netns_ipvs *ipvs;
1839
1840 if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1841 return NF_ACCEPT;
1842
1843 /* ipvs enabled in this netns ? */
1844 net = skb_net(skb);
1845 ipvs = net_ipvs(net);
1846 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1847 return NF_ACCEPT;
1848
1849 return ip_vs_in_icmp(skb, &r, ops->hooknum);
1850 }
1851
1852 #ifdef CONFIG_IP_VS_IPV6
1853 static unsigned int
1854 ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1855 const struct nf_hook_state *state)
1856 {
1857 int r;
1858 struct net *net;
1859 struct netns_ipvs *ipvs;
1860 struct ip_vs_iphdr iphdr;
1861
1862 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
1863 if (iphdr.protocol != IPPROTO_ICMPV6)
1864 return NF_ACCEPT;
1865
1866 /* ipvs enabled in this netns ? */
1867 net = skb_net(skb);
1868 ipvs = net_ipvs(net);
1869 if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1870 return NF_ACCEPT;
1871
1872 return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
1873 }
1874 #endif
1875
1876
1877 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1878 /* After packet filtering, change source only for VS/NAT */
1879 {
1880 .hook = ip_vs_reply4,
1881 .owner = THIS_MODULE,
1882 .pf = NFPROTO_IPV4,
1883 .hooknum = NF_INET_LOCAL_IN,
1884 .priority = NF_IP_PRI_NAT_SRC - 2,
1885 },
1886 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1887 * or VS/NAT(change destination), so that filtering rules can be
1888 * applied to IPVS. */
1889 {
1890 .hook = ip_vs_remote_request4,
1891 .owner = THIS_MODULE,
1892 .pf = NFPROTO_IPV4,
1893 .hooknum = NF_INET_LOCAL_IN,
1894 .priority = NF_IP_PRI_NAT_SRC - 1,
1895 },
1896 /* Before ip_vs_in, change source only for VS/NAT */
1897 {
1898 .hook = ip_vs_local_reply4,
1899 .owner = THIS_MODULE,
1900 .pf = NFPROTO_IPV4,
1901 .hooknum = NF_INET_LOCAL_OUT,
1902 .priority = NF_IP_PRI_NAT_DST + 1,
1903 },
1904 /* After mangle, schedule and forward local requests */
1905 {
1906 .hook = ip_vs_local_request4,
1907 .owner = THIS_MODULE,
1908 .pf = NFPROTO_IPV4,
1909 .hooknum = NF_INET_LOCAL_OUT,
1910 .priority = NF_IP_PRI_NAT_DST + 2,
1911 },
1912 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1913 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1914 {
1915 .hook = ip_vs_forward_icmp,
1916 .owner = THIS_MODULE,
1917 .pf = NFPROTO_IPV4,
1918 .hooknum = NF_INET_FORWARD,
1919 .priority = 99,
1920 },
1921 /* After packet filtering, change source only for VS/NAT */
1922 {
1923 .hook = ip_vs_reply4,
1924 .owner = THIS_MODULE,
1925 .pf = NFPROTO_IPV4,
1926 .hooknum = NF_INET_FORWARD,
1927 .priority = 100,
1928 },
1929 #ifdef CONFIG_IP_VS_IPV6
1930 /* After packet filtering, change source only for VS/NAT */
1931 {
1932 .hook = ip_vs_reply6,
1933 .owner = THIS_MODULE,
1934 .pf = NFPROTO_IPV6,
1935 .hooknum = NF_INET_LOCAL_IN,
1936 .priority = NF_IP6_PRI_NAT_SRC - 2,
1937 },
1938 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1939 * or VS/NAT(change destination), so that filtering rules can be
1940 * applied to IPVS. */
1941 {
1942 .hook = ip_vs_remote_request6,
1943 .owner = THIS_MODULE,
1944 .pf = NFPROTO_IPV6,
1945 .hooknum = NF_INET_LOCAL_IN,
1946 .priority = NF_IP6_PRI_NAT_SRC - 1,
1947 },
1948 /* Before ip_vs_in, change source only for VS/NAT */
1949 {
1950 .hook = ip_vs_local_reply6,
1951 .owner = THIS_MODULE,
1952 .pf = NFPROTO_IPV6,
1953 .hooknum = NF_INET_LOCAL_OUT,
1954 .priority = NF_IP6_PRI_NAT_DST + 1,
1955 },
1956 /* After mangle, schedule and forward local requests */
1957 {
1958 .hook = ip_vs_local_request6,
1959 .owner = THIS_MODULE,
1960 .pf = NFPROTO_IPV6,
1961 .hooknum = NF_INET_LOCAL_OUT,
1962 .priority = NF_IP6_PRI_NAT_DST + 2,
1963 },
1964 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1965 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1966 {
1967 .hook = ip_vs_forward_icmp_v6,
1968 .owner = THIS_MODULE,
1969 .pf = NFPROTO_IPV6,
1970 .hooknum = NF_INET_FORWARD,
1971 .priority = 99,
1972 },
1973 /* After packet filtering, change source only for VS/NAT */
1974 {
1975 .hook = ip_vs_reply6,
1976 .owner = THIS_MODULE,
1977 .pf = NFPROTO_IPV6,
1978 .hooknum = NF_INET_FORWARD,
1979 .priority = 100,
1980 },
1981 #endif
1982 };
1983 /*
1984 * Initialize IP Virtual Server netns mem.
1985 */
1986 static int __net_init __ip_vs_init(struct net *net)
1987 {
1988 struct netns_ipvs *ipvs;
1989
1990 ipvs = net_generic(net, ip_vs_net_id);
1991 if (ipvs == NULL)
1992 return -ENOMEM;
1993
1994 /* Hold the beast until a service is registerd */
1995 ipvs->enable = 0;
1996 ipvs->net = net;
1997 /* Counters used for creating unique names */
1998 ipvs->gen = atomic_read(&ipvs_netns_cnt);
1999 atomic_inc(&ipvs_netns_cnt);
2000 net->ipvs = ipvs;
2001
2002 if (ip_vs_estimator_net_init(net) < 0)
2003 goto estimator_fail;
2004
2005 if (ip_vs_control_net_init(net) < 0)
2006 goto control_fail;
2007
2008 if (ip_vs_protocol_net_init(net) < 0)
2009 goto protocol_fail;
2010
2011 if (ip_vs_app_net_init(net) < 0)
2012 goto app_fail;
2013
2014 if (ip_vs_conn_net_init(net) < 0)
2015 goto conn_fail;
2016
2017 if (ip_vs_sync_net_init(net) < 0)
2018 goto sync_fail;
2019
2020 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
2021 sizeof(struct netns_ipvs), ipvs->gen);
2022 return 0;
2023 /*
2024 * Error handling
2025 */
2026
2027 sync_fail:
2028 ip_vs_conn_net_cleanup(net);
2029 conn_fail:
2030 ip_vs_app_net_cleanup(net);
2031 app_fail:
2032 ip_vs_protocol_net_cleanup(net);
2033 protocol_fail:
2034 ip_vs_control_net_cleanup(net);
2035 control_fail:
2036 ip_vs_estimator_net_cleanup(net);
2037 estimator_fail:
2038 net->ipvs = NULL;
2039 return -ENOMEM;
2040 }
2041
2042 static void __net_exit __ip_vs_cleanup(struct net *net)
2043 {
2044 ip_vs_service_net_cleanup(net); /* ip_vs_flush() with locks */
2045 ip_vs_conn_net_cleanup(net);
2046 ip_vs_app_net_cleanup(net);
2047 ip_vs_protocol_net_cleanup(net);
2048 ip_vs_control_net_cleanup(net);
2049 ip_vs_estimator_net_cleanup(net);
2050 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
2051 net->ipvs = NULL;
2052 }
2053
2054 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2055 {
2056 EnterFunction(2);
2057 net_ipvs(net)->enable = 0; /* Disable packet reception */
2058 smp_wmb();
2059 ip_vs_sync_net_cleanup(net);
2060 LeaveFunction(2);
2061 }
2062
2063 static struct pernet_operations ipvs_core_ops = {
2064 .init = __ip_vs_init,
2065 .exit = __ip_vs_cleanup,
2066 .id = &ip_vs_net_id,
2067 .size = sizeof(struct netns_ipvs),
2068 };
2069
2070 static struct pernet_operations ipvs_core_dev_ops = {
2071 .exit = __ip_vs_dev_cleanup,
2072 };
2073
2074 /*
2075 * Initialize IP Virtual Server
2076 */
2077 static int __init ip_vs_init(void)
2078 {
2079 int ret;
2080
2081 ret = ip_vs_control_init();
2082 if (ret < 0) {
2083 pr_err("can't setup control.\n");
2084 goto exit;
2085 }
2086
2087 ip_vs_protocol_init();
2088
2089 ret = ip_vs_conn_init();
2090 if (ret < 0) {
2091 pr_err("can't setup connection table.\n");
2092 goto cleanup_protocol;
2093 }
2094
2095 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
2096 if (ret < 0)
2097 goto cleanup_conn;
2098
2099 ret = register_pernet_device(&ipvs_core_dev_ops);
2100 if (ret < 0)
2101 goto cleanup_sub;
2102
2103 ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2104 if (ret < 0) {
2105 pr_err("can't register hooks.\n");
2106 goto cleanup_dev;
2107 }
2108
2109 ret = ip_vs_register_nl_ioctl();
2110 if (ret < 0) {
2111 pr_err("can't register netlink/ioctl.\n");
2112 goto cleanup_hooks;
2113 }
2114
2115 pr_info("ipvs loaded.\n");
2116
2117 return ret;
2118
2119 cleanup_hooks:
2120 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2121 cleanup_dev:
2122 unregister_pernet_device(&ipvs_core_dev_ops);
2123 cleanup_sub:
2124 unregister_pernet_subsys(&ipvs_core_ops);
2125 cleanup_conn:
2126 ip_vs_conn_cleanup();
2127 cleanup_protocol:
2128 ip_vs_protocol_cleanup();
2129 ip_vs_control_cleanup();
2130 exit:
2131 return ret;
2132 }
2133
2134 static void __exit ip_vs_cleanup(void)
2135 {
2136 ip_vs_unregister_nl_ioctl();
2137 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2138 unregister_pernet_device(&ipvs_core_dev_ops);
2139 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2140 ip_vs_conn_cleanup();
2141 ip_vs_protocol_cleanup();
2142 ip_vs_control_cleanup();
2143 pr_info("ipvs unloaded.\n");
2144 }
2145
2146 module_init(ip_vs_init);
2147 module_exit(ip_vs_cleanup);
2148 MODULE_LICENSE("GPL");
This page took 0.072878 seconds and 6 git commands to generate.