6754e3595a72da4c7697655bdc56831ed932ee85
[deliverable/linux.git] / net / netfilter / ipvs / ip_vs_conn.c
1 /*
2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the Netfilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
6 * cluster of servers.
7 *
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
9 * Peter Kese <peter.kese@ijs.si>
10 * Julian Anastasov <ja@ssi.bg>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18 * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19 * and others. Many code here is taken from IP MASQ code of kernel 2.2.
20 *
21 * Changes:
22 *
23 */
24
25 #define KMSG_COMPONENT "IPVS"
26 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
27
28 #include <linux/interrupt.h>
29 #include <linux/in.h>
30 #include <linux/inet.h>
31 #include <linux/net.h>
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/vmalloc.h>
35 #include <linux/proc_fs.h> /* for proc_net_* */
36 #include <linux/slab.h>
37 #include <linux/seq_file.h>
38 #include <linux/jhash.h>
39 #include <linux/random.h>
40
41 #include <net/net_namespace.h>
42 #include <net/ip_vs.h>
43
44
45 #ifndef CONFIG_IP_VS_TAB_BITS
46 #define CONFIG_IP_VS_TAB_BITS 12
47 #endif
48
49 /*
50 * Connection hash size. Default is what was selected at compile time.
51 */
52 static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
53 module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
54 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
55
56 /* size and mask values */
57 int ip_vs_conn_tab_size __read_mostly;
58 static int ip_vs_conn_tab_mask __read_mostly;
59
60 /*
61 * Connection hash table: for input and output packets lookups of IPVS
62 */
63 static struct hlist_head *ip_vs_conn_tab __read_mostly;
64
65 /* SLAB cache for IPVS connections */
66 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
67
68 /* counter for no client port connections */
69 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
70
71 /* random value for IPVS connection hash */
72 static unsigned int ip_vs_conn_rnd __read_mostly;
73
74 /*
75 * Fine locking granularity for big connection hash table
76 */
77 #define CT_LOCKARRAY_BITS 5
78 #define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
79 #define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
80
81 /* We need an addrstrlen that works with or without v6 */
82 #ifdef CONFIG_IP_VS_IPV6
83 #define IP_VS_ADDRSTRLEN INET6_ADDRSTRLEN
84 #else
85 #define IP_VS_ADDRSTRLEN (8+1)
86 #endif
87
88 struct ip_vs_aligned_lock
89 {
90 spinlock_t l;
91 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
92
93 /* lock array for conn table */
94 static struct ip_vs_aligned_lock
95 __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
96
97 static inline void ct_write_lock_bh(unsigned int key)
98 {
99 spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
100 }
101
102 static inline void ct_write_unlock_bh(unsigned int key)
103 {
104 spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
105 }
106
107
108 /*
109 * Returns hash value for IPVS connection entry
110 */
111 static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
112 const union nf_inet_addr *addr,
113 __be16 port)
114 {
115 #ifdef CONFIG_IP_VS_IPV6
116 if (af == AF_INET6)
117 return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
118 (__force u32)port, proto, ip_vs_conn_rnd) ^
119 ((size_t)net>>8)) & ip_vs_conn_tab_mask;
120 #endif
121 return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
122 ip_vs_conn_rnd) ^
123 ((size_t)net>>8)) & ip_vs_conn_tab_mask;
124 }
125
126 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
127 bool inverse)
128 {
129 const union nf_inet_addr *addr;
130 __be16 port;
131
132 if (p->pe_data && p->pe->hashkey_raw)
133 return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
134 ip_vs_conn_tab_mask;
135
136 if (likely(!inverse)) {
137 addr = p->caddr;
138 port = p->cport;
139 } else {
140 addr = p->vaddr;
141 port = p->vport;
142 }
143
144 return ip_vs_conn_hashkey(p->ipvs->net, p->af, p->protocol, addr, port);
145 }
146
147 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
148 {
149 struct ip_vs_conn_param p;
150
151 ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol,
152 &cp->caddr, cp->cport, NULL, 0, &p);
153
154 if (cp->pe) {
155 p.pe = cp->pe;
156 p.pe_data = cp->pe_data;
157 p.pe_data_len = cp->pe_data_len;
158 }
159
160 return ip_vs_conn_hashkey_param(&p, false);
161 }
162
163 /*
164 * Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
165 * returns bool success.
166 */
167 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
168 {
169 unsigned int hash;
170 int ret;
171
172 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
173 return 0;
174
175 /* Hash by protocol, client address and port */
176 hash = ip_vs_conn_hashkey_conn(cp);
177
178 ct_write_lock_bh(hash);
179 spin_lock(&cp->lock);
180
181 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
182 cp->flags |= IP_VS_CONN_F_HASHED;
183 atomic_inc(&cp->refcnt);
184 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
185 ret = 1;
186 } else {
187 pr_err("%s(): request for already hashed, called from %pF\n",
188 __func__, __builtin_return_address(0));
189 ret = 0;
190 }
191
192 spin_unlock(&cp->lock);
193 ct_write_unlock_bh(hash);
194
195 return ret;
196 }
197
198
199 /*
200 * UNhashes ip_vs_conn from ip_vs_conn_tab.
201 * returns bool success. Caller should hold conn reference.
202 */
203 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
204 {
205 unsigned int hash;
206 int ret;
207
208 /* unhash it and decrease its reference counter */
209 hash = ip_vs_conn_hashkey_conn(cp);
210
211 ct_write_lock_bh(hash);
212 spin_lock(&cp->lock);
213
214 if (cp->flags & IP_VS_CONN_F_HASHED) {
215 hlist_del_rcu(&cp->c_list);
216 cp->flags &= ~IP_VS_CONN_F_HASHED;
217 atomic_dec(&cp->refcnt);
218 ret = 1;
219 } else
220 ret = 0;
221
222 spin_unlock(&cp->lock);
223 ct_write_unlock_bh(hash);
224
225 return ret;
226 }
227
228 /* Try to unlink ip_vs_conn from ip_vs_conn_tab.
229 * returns bool success.
230 */
231 static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
232 {
233 unsigned int hash;
234 bool ret;
235
236 hash = ip_vs_conn_hashkey_conn(cp);
237
238 ct_write_lock_bh(hash);
239 spin_lock(&cp->lock);
240
241 if (cp->flags & IP_VS_CONN_F_HASHED) {
242 ret = false;
243 /* Decrease refcnt and unlink conn only if we are last user */
244 if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) {
245 hlist_del_rcu(&cp->c_list);
246 cp->flags &= ~IP_VS_CONN_F_HASHED;
247 ret = true;
248 }
249 } else
250 ret = atomic_read(&cp->refcnt) ? false : true;
251
252 spin_unlock(&cp->lock);
253 ct_write_unlock_bh(hash);
254
255 return ret;
256 }
257
258
259 /*
260 * Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
261 * Called for pkts coming from OUTside-to-INside.
262 * p->caddr, p->cport: pkt source address (foreign host)
263 * p->vaddr, p->vport: pkt dest address (load balancer)
264 */
265 static inline struct ip_vs_conn *
266 __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
267 {
268 unsigned int hash;
269 struct ip_vs_conn *cp;
270
271 hash = ip_vs_conn_hashkey_param(p, false);
272
273 rcu_read_lock();
274
275 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
276 if (p->cport == cp->cport && p->vport == cp->vport &&
277 cp->af == p->af &&
278 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
279 ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
280 ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
281 p->protocol == cp->protocol &&
282 cp->ipvs == p->ipvs) {
283 if (!__ip_vs_conn_get(cp))
284 continue;
285 /* HIT */
286 rcu_read_unlock();
287 return cp;
288 }
289 }
290
291 rcu_read_unlock();
292
293 return NULL;
294 }
295
296 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
297 {
298 struct ip_vs_conn *cp;
299
300 cp = __ip_vs_conn_in_get(p);
301 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
302 struct ip_vs_conn_param cport_zero_p = *p;
303 cport_zero_p.cport = 0;
304 cp = __ip_vs_conn_in_get(&cport_zero_p);
305 }
306
307 IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
308 ip_vs_proto_name(p->protocol),
309 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
310 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
311 cp ? "hit" : "not hit");
312
313 return cp;
314 }
315
316 static int
317 ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
318 int af, const struct sk_buff *skb,
319 const struct ip_vs_iphdr *iph,
320 struct ip_vs_conn_param *p)
321 {
322 __be16 _ports[2], *pptr;
323
324 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
325 if (pptr == NULL)
326 return 1;
327
328 if (likely(!ip_vs_iph_inverse(iph)))
329 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr,
330 pptr[0], &iph->daddr, pptr[1], p);
331 else
332 ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr,
333 pptr[1], &iph->saddr, pptr[0], p);
334 return 0;
335 }
336
337 struct ip_vs_conn *
338 ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
339 const struct ip_vs_iphdr *iph)
340 {
341 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
342 struct ip_vs_conn_param p;
343
344 if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
345 return NULL;
346
347 return ip_vs_conn_in_get(&p);
348 }
349 EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
350
351 /* Get reference to connection template */
352 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
353 {
354 unsigned int hash;
355 struct ip_vs_conn *cp;
356
357 hash = ip_vs_conn_hashkey_param(p, false);
358
359 rcu_read_lock();
360
361 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
362 if (unlikely(p->pe_data && p->pe->ct_match)) {
363 if (cp->ipvs != p->ipvs)
364 continue;
365 if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
366 if (__ip_vs_conn_get(cp))
367 goto out;
368 }
369 continue;
370 }
371
372 if (cp->af == p->af &&
373 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
374 /* protocol should only be IPPROTO_IP if
375 * p->vaddr is a fwmark */
376 ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
377 p->af, p->vaddr, &cp->vaddr) &&
378 p->vport == cp->vport && p->cport == cp->cport &&
379 cp->flags & IP_VS_CONN_F_TEMPLATE &&
380 p->protocol == cp->protocol &&
381 cp->ipvs == p->ipvs) {
382 if (__ip_vs_conn_get(cp))
383 goto out;
384 }
385 }
386 cp = NULL;
387
388 out:
389 rcu_read_unlock();
390
391 IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
392 ip_vs_proto_name(p->protocol),
393 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
394 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
395 cp ? "hit" : "not hit");
396
397 return cp;
398 }
399
400 /* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
401 * Called for pkts coming from inside-to-OUTside.
402 * p->caddr, p->cport: pkt source address (inside host)
403 * p->vaddr, p->vport: pkt dest address (foreign host) */
404 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
405 {
406 unsigned int hash;
407 struct ip_vs_conn *cp, *ret=NULL;
408
409 /*
410 * Check for "full" addressed entries
411 */
412 hash = ip_vs_conn_hashkey_param(p, true);
413
414 rcu_read_lock();
415
416 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
417 if (p->vport == cp->cport && p->cport == cp->dport &&
418 cp->af == p->af &&
419 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
420 ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
421 p->protocol == cp->protocol &&
422 cp->ipvs == p->ipvs) {
423 if (!__ip_vs_conn_get(cp))
424 continue;
425 /* HIT */
426 ret = cp;
427 break;
428 }
429 }
430
431 rcu_read_unlock();
432
433 IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
434 ip_vs_proto_name(p->protocol),
435 IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
436 IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
437 ret ? "hit" : "not hit");
438
439 return ret;
440 }
441
442 struct ip_vs_conn *
443 ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
444 const struct ip_vs_iphdr *iph)
445 {
446 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
447 struct ip_vs_conn_param p;
448
449 if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
450 return NULL;
451
452 return ip_vs_conn_out_get(&p);
453 }
454 EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
455
456 /*
457 * Put back the conn and restart its timer with its timeout
458 */
459 void ip_vs_conn_put(struct ip_vs_conn *cp)
460 {
461 unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
462 0 : cp->timeout;
463 mod_timer(&cp->timer, jiffies+t);
464
465 __ip_vs_conn_put(cp);
466 }
467
468
469 /*
470 * Fill a no_client_port connection with a client port number
471 */
472 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
473 {
474 if (ip_vs_conn_unhash(cp)) {
475 spin_lock_bh(&cp->lock);
476 if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
477 atomic_dec(&ip_vs_conn_no_cport_cnt);
478 cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
479 cp->cport = cport;
480 }
481 spin_unlock_bh(&cp->lock);
482
483 /* hash on new dport */
484 ip_vs_conn_hash(cp);
485 }
486 }
487
488
489 /*
490 * Bind a connection entry with the corresponding packet_xmit.
491 * Called by ip_vs_conn_new.
492 */
493 static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
494 {
495 switch (IP_VS_FWD_METHOD(cp)) {
496 case IP_VS_CONN_F_MASQ:
497 cp->packet_xmit = ip_vs_nat_xmit;
498 break;
499
500 case IP_VS_CONN_F_TUNNEL:
501 #ifdef CONFIG_IP_VS_IPV6
502 if (cp->daf == AF_INET6)
503 cp->packet_xmit = ip_vs_tunnel_xmit_v6;
504 else
505 #endif
506 cp->packet_xmit = ip_vs_tunnel_xmit;
507 break;
508
509 case IP_VS_CONN_F_DROUTE:
510 cp->packet_xmit = ip_vs_dr_xmit;
511 break;
512
513 case IP_VS_CONN_F_LOCALNODE:
514 cp->packet_xmit = ip_vs_null_xmit;
515 break;
516
517 case IP_VS_CONN_F_BYPASS:
518 cp->packet_xmit = ip_vs_bypass_xmit;
519 break;
520 }
521 }
522
523 #ifdef CONFIG_IP_VS_IPV6
524 static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
525 {
526 switch (IP_VS_FWD_METHOD(cp)) {
527 case IP_VS_CONN_F_MASQ:
528 cp->packet_xmit = ip_vs_nat_xmit_v6;
529 break;
530
531 case IP_VS_CONN_F_TUNNEL:
532 if (cp->daf == AF_INET6)
533 cp->packet_xmit = ip_vs_tunnel_xmit_v6;
534 else
535 cp->packet_xmit = ip_vs_tunnel_xmit;
536 break;
537
538 case IP_VS_CONN_F_DROUTE:
539 cp->packet_xmit = ip_vs_dr_xmit_v6;
540 break;
541
542 case IP_VS_CONN_F_LOCALNODE:
543 cp->packet_xmit = ip_vs_null_xmit;
544 break;
545
546 case IP_VS_CONN_F_BYPASS:
547 cp->packet_xmit = ip_vs_bypass_xmit_v6;
548 break;
549 }
550 }
551 #endif
552
553
554 static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
555 {
556 return atomic_read(&dest->activeconns)
557 + atomic_read(&dest->inactconns);
558 }
559
560 /*
561 * Bind a connection entry with a virtual service destination
562 * Called just after a new connection entry is created.
563 */
564 static inline void
565 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
566 {
567 unsigned int conn_flags;
568 __u32 flags;
569
570 /* if dest is NULL, then return directly */
571 if (!dest)
572 return;
573
574 /* Increase the refcnt counter of the dest */
575 ip_vs_dest_hold(dest);
576
577 conn_flags = atomic_read(&dest->conn_flags);
578 if (cp->protocol != IPPROTO_UDP)
579 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
580 flags = cp->flags;
581 /* Bind with the destination and its corresponding transmitter */
582 if (flags & IP_VS_CONN_F_SYNC) {
583 /* if the connection is not template and is created
584 * by sync, preserve the activity flag.
585 */
586 if (!(flags & IP_VS_CONN_F_TEMPLATE))
587 conn_flags &= ~IP_VS_CONN_F_INACTIVE;
588 /* connections inherit forwarding method from dest */
589 flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
590 }
591 flags |= conn_flags;
592 cp->flags = flags;
593 cp->dest = dest;
594
595 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
596 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
597 "dest->refcnt:%d\n",
598 ip_vs_proto_name(cp->protocol),
599 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
600 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
601 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
602 ip_vs_fwd_tag(cp), cp->state,
603 cp->flags, atomic_read(&cp->refcnt),
604 atomic_read(&dest->refcnt));
605
606 /* Update the connection counters */
607 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
608 /* It is a normal connection, so modify the counters
609 * according to the flags, later the protocol can
610 * update them on state change
611 */
612 if (!(flags & IP_VS_CONN_F_INACTIVE))
613 atomic_inc(&dest->activeconns);
614 else
615 atomic_inc(&dest->inactconns);
616 } else {
617 /* It is a persistent connection/template, so increase
618 the persistent connection counter */
619 atomic_inc(&dest->persistconns);
620 }
621
622 if (dest->u_threshold != 0 &&
623 ip_vs_dest_totalconns(dest) >= dest->u_threshold)
624 dest->flags |= IP_VS_DEST_F_OVERLOAD;
625 }
626
627
628 /*
629 * Check if there is a destination for the connection, if so
630 * bind the connection to the destination.
631 */
632 void ip_vs_try_bind_dest(struct ip_vs_conn *cp)
633 {
634 struct ip_vs_dest *dest;
635
636 rcu_read_lock();
637
638 /* This function is only invoked by the synchronization code. We do
639 * not currently support heterogeneous pools with synchronization,
640 * so we can make the assumption that the svc_af is the same as the
641 * dest_af
642 */
643 dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr,
644 cp->dport, &cp->vaddr, cp->vport,
645 cp->protocol, cp->fwmark, cp->flags);
646 if (dest) {
647 struct ip_vs_proto_data *pd;
648
649 spin_lock_bh(&cp->lock);
650 if (cp->dest) {
651 spin_unlock_bh(&cp->lock);
652 rcu_read_unlock();
653 return;
654 }
655
656 /* Applications work depending on the forwarding method
657 * but better to reassign them always when binding dest */
658 if (cp->app)
659 ip_vs_unbind_app(cp);
660
661 ip_vs_bind_dest(cp, dest);
662 spin_unlock_bh(&cp->lock);
663
664 /* Update its packet transmitter */
665 cp->packet_xmit = NULL;
666 #ifdef CONFIG_IP_VS_IPV6
667 if (cp->af == AF_INET6)
668 ip_vs_bind_xmit_v6(cp);
669 else
670 #endif
671 ip_vs_bind_xmit(cp);
672
673 pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol);
674 if (pd && atomic_read(&pd->appcnt))
675 ip_vs_bind_app(cp, pd->pp);
676 }
677 rcu_read_unlock();
678 }
679
680
681 /*
682 * Unbind a connection entry with its VS destination
683 * Called by the ip_vs_conn_expire function.
684 */
685 static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
686 {
687 struct ip_vs_dest *dest = cp->dest;
688
689 if (!dest)
690 return;
691
692 IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
693 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
694 "dest->refcnt:%d\n",
695 ip_vs_proto_name(cp->protocol),
696 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
697 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
698 IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
699 ip_vs_fwd_tag(cp), cp->state,
700 cp->flags, atomic_read(&cp->refcnt),
701 atomic_read(&dest->refcnt));
702
703 /* Update the connection counters */
704 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
705 /* It is a normal connection, so decrease the inactconns
706 or activeconns counter */
707 if (cp->flags & IP_VS_CONN_F_INACTIVE) {
708 atomic_dec(&dest->inactconns);
709 } else {
710 atomic_dec(&dest->activeconns);
711 }
712 } else {
713 /* It is a persistent connection/template, so decrease
714 the persistent connection counter */
715 atomic_dec(&dest->persistconns);
716 }
717
718 if (dest->l_threshold != 0) {
719 if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
720 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
721 } else if (dest->u_threshold != 0) {
722 if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
723 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
724 } else {
725 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
726 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
727 }
728
729 ip_vs_dest_put(dest);
730 }
731
732 static int expire_quiescent_template(struct netns_ipvs *ipvs,
733 struct ip_vs_dest *dest)
734 {
735 #ifdef CONFIG_SYSCTL
736 return ipvs->sysctl_expire_quiescent_template &&
737 (atomic_read(&dest->weight) == 0);
738 #else
739 return 0;
740 #endif
741 }
742
743 /*
744 * Checking if the destination of a connection template is available.
745 * If available, return 1, otherwise invalidate this connection
746 * template and return 0.
747 */
748 int ip_vs_check_template(struct ip_vs_conn *ct)
749 {
750 struct ip_vs_dest *dest = ct->dest;
751 struct netns_ipvs *ipvs = ct->ipvs;
752
753 /*
754 * Checking the dest server status.
755 */
756 if ((dest == NULL) ||
757 !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
758 expire_quiescent_template(ipvs, dest)) {
759 IP_VS_DBG_BUF(9, "check_template: dest not available for "
760 "protocol %s s:%s:%d v:%s:%d "
761 "-> d:%s:%d\n",
762 ip_vs_proto_name(ct->protocol),
763 IP_VS_DBG_ADDR(ct->af, &ct->caddr),
764 ntohs(ct->cport),
765 IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
766 ntohs(ct->vport),
767 IP_VS_DBG_ADDR(ct->daf, &ct->daddr),
768 ntohs(ct->dport));
769
770 /*
771 * Invalidate the connection template
772 */
773 if (ct->vport != htons(0xffff)) {
774 if (ip_vs_conn_unhash(ct)) {
775 ct->dport = htons(0xffff);
776 ct->vport = htons(0xffff);
777 ct->cport = 0;
778 ip_vs_conn_hash(ct);
779 }
780 }
781
782 /*
783 * Simply decrease the refcnt of the template,
784 * don't restart its timer.
785 */
786 __ip_vs_conn_put(ct);
787 return 0;
788 }
789 return 1;
790 }
791
792 static void ip_vs_conn_rcu_free(struct rcu_head *head)
793 {
794 struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn,
795 rcu_head);
796
797 ip_vs_pe_put(cp->pe);
798 kfree(cp->pe_data);
799 kmem_cache_free(ip_vs_conn_cachep, cp);
800 }
801
802 static void ip_vs_conn_expire(unsigned long data)
803 {
804 struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
805 struct netns_ipvs *ipvs = cp->ipvs;
806
807 /*
808 * do I control anybody?
809 */
810 if (atomic_read(&cp->n_control))
811 goto expire_later;
812
813 /* Unlink conn if not referenced anymore */
814 if (likely(ip_vs_conn_unlink(cp))) {
815 /* delete the timer if it is activated by other users */
816 del_timer(&cp->timer);
817
818 /* does anybody control me? */
819 if (cp->control)
820 ip_vs_control_del(cp);
821
822 if (cp->flags & IP_VS_CONN_F_NFCT) {
823 /* Do not access conntracks during subsys cleanup
824 * because nf_conntrack_find_get can not be used after
825 * conntrack cleanup for the net.
826 */
827 smp_rmb();
828 if (ipvs->enable)
829 ip_vs_conn_drop_conntrack(cp);
830 }
831
832 if (unlikely(cp->app != NULL))
833 ip_vs_unbind_app(cp);
834 ip_vs_unbind_dest(cp);
835 if (cp->flags & IP_VS_CONN_F_NO_CPORT)
836 atomic_dec(&ip_vs_conn_no_cport_cnt);
837 call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
838 atomic_dec(&ipvs->conn_count);
839 return;
840 }
841
842 expire_later:
843 IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
844 atomic_read(&cp->refcnt),
845 atomic_read(&cp->n_control));
846
847 atomic_inc(&cp->refcnt);
848 cp->timeout = 60*HZ;
849
850 if (ipvs->sync_state & IP_VS_STATE_MASTER)
851 ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
852
853 ip_vs_conn_put(cp);
854 }
855
856 /* Modify timer, so that it expires as soon as possible.
857 * Can be called without reference only if under RCU lock.
858 */
859 void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
860 {
861 /* Using mod_timer_pending will ensure the timer is not
862 * modified after the final del_timer in ip_vs_conn_expire.
863 */
864 if (timer_pending(&cp->timer) &&
865 time_after(cp->timer.expires, jiffies))
866 mod_timer_pending(&cp->timer, jiffies);
867 }
868
869
870 /*
871 * Create a new connection entry and hash it into the ip_vs_conn_tab
872 */
873 struct ip_vs_conn *
874 ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
875 const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
876 struct ip_vs_dest *dest, __u32 fwmark)
877 {
878 struct ip_vs_conn *cp;
879 struct netns_ipvs *ipvs = p->ipvs;
880 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
881 p->protocol);
882
883 cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
884 if (cp == NULL) {
885 IP_VS_ERR_RL("%s(): no memory\n", __func__);
886 return NULL;
887 }
888
889 INIT_HLIST_NODE(&cp->c_list);
890 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
891 cp->ipvs = ipvs;
892 cp->af = p->af;
893 cp->daf = dest_af;
894 cp->protocol = p->protocol;
895 ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
896 cp->cport = p->cport;
897 /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
898 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
899 &cp->vaddr, p->vaddr);
900 cp->vport = p->vport;
901 ip_vs_addr_set(cp->daf, &cp->daddr, daddr);
902 cp->dport = dport;
903 cp->flags = flags;
904 cp->fwmark = fwmark;
905 if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
906 ip_vs_pe_get(p->pe);
907 cp->pe = p->pe;
908 cp->pe_data = p->pe_data;
909 cp->pe_data_len = p->pe_data_len;
910 } else {
911 cp->pe = NULL;
912 cp->pe_data = NULL;
913 cp->pe_data_len = 0;
914 }
915 spin_lock_init(&cp->lock);
916
917 /*
918 * Set the entry is referenced by the current thread before hashing
919 * it in the table, so that other thread run ip_vs_random_dropentry
920 * but cannot drop this entry.
921 */
922 atomic_set(&cp->refcnt, 1);
923
924 cp->control = NULL;
925 atomic_set(&cp->n_control, 0);
926 atomic_set(&cp->in_pkts, 0);
927
928 cp->packet_xmit = NULL;
929 cp->app = NULL;
930 cp->app_data = NULL;
931 /* reset struct ip_vs_seq */
932 cp->in_seq.delta = 0;
933 cp->out_seq.delta = 0;
934
935 atomic_inc(&ipvs->conn_count);
936 if (flags & IP_VS_CONN_F_NO_CPORT)
937 atomic_inc(&ip_vs_conn_no_cport_cnt);
938
939 /* Bind the connection with a destination server */
940 cp->dest = NULL;
941 ip_vs_bind_dest(cp, dest);
942
943 /* Set its state and timeout */
944 cp->state = 0;
945 cp->old_state = 0;
946 cp->timeout = 3*HZ;
947 cp->sync_endtime = jiffies & ~3UL;
948
949 /* Bind its packet transmitter */
950 #ifdef CONFIG_IP_VS_IPV6
951 if (p->af == AF_INET6)
952 ip_vs_bind_xmit_v6(cp);
953 else
954 #endif
955 ip_vs_bind_xmit(cp);
956
957 if (unlikely(pd && atomic_read(&pd->appcnt)))
958 ip_vs_bind_app(cp, pd->pp);
959
960 /*
961 * Allow conntrack to be preserved. By default, conntrack
962 * is created and destroyed for every packet.
963 * Sometimes keeping conntrack can be useful for
964 * IP_VS_CONN_F_ONE_PACKET too.
965 */
966
967 if (ip_vs_conntrack_enabled(ipvs))
968 cp->flags |= IP_VS_CONN_F_NFCT;
969
970 /* Hash it in the ip_vs_conn_tab finally */
971 ip_vs_conn_hash(cp);
972
973 return cp;
974 }
975
976 /*
977 * /proc/net/ip_vs_conn entries
978 */
979 #ifdef CONFIG_PROC_FS
980 struct ip_vs_iter_state {
981 struct seq_net_private p;
982 struct hlist_head *l;
983 };
984
985 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
986 {
987 int idx;
988 struct ip_vs_conn *cp;
989 struct ip_vs_iter_state *iter = seq->private;
990
991 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
992 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
993 /* __ip_vs_conn_get() is not needed by
994 * ip_vs_conn_seq_show and ip_vs_conn_sync_seq_show
995 */
996 if (pos-- == 0) {
997 iter->l = &ip_vs_conn_tab[idx];
998 return cp;
999 }
1000 }
1001 cond_resched_rcu();
1002 }
1003
1004 return NULL;
1005 }
1006
1007 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
1008 __acquires(RCU)
1009 {
1010 struct ip_vs_iter_state *iter = seq->private;
1011
1012 iter->l = NULL;
1013 rcu_read_lock();
1014 return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
1015 }
1016
1017 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1018 {
1019 struct ip_vs_conn *cp = v;
1020 struct ip_vs_iter_state *iter = seq->private;
1021 struct hlist_node *e;
1022 struct hlist_head *l = iter->l;
1023 int idx;
1024
1025 ++*pos;
1026 if (v == SEQ_START_TOKEN)
1027 return ip_vs_conn_array(seq, 0);
1028
1029 /* more on same hash chain? */
1030 e = rcu_dereference(hlist_next_rcu(&cp->c_list));
1031 if (e)
1032 return hlist_entry(e, struct ip_vs_conn, c_list);
1033
1034 idx = l - ip_vs_conn_tab;
1035 while (++idx < ip_vs_conn_tab_size) {
1036 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1037 iter->l = &ip_vs_conn_tab[idx];
1038 return cp;
1039 }
1040 cond_resched_rcu();
1041 }
1042 iter->l = NULL;
1043 return NULL;
1044 }
1045
1046 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
1047 __releases(RCU)
1048 {
1049 rcu_read_unlock();
1050 }
1051
1052 static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
1053 {
1054
1055 if (v == SEQ_START_TOKEN)
1056 seq_puts(seq,
1057 "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
1058 else {
1059 const struct ip_vs_conn *cp = v;
1060 struct net *net = seq_file_net(seq);
1061 char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
1062 size_t len = 0;
1063 char dbuf[IP_VS_ADDRSTRLEN];
1064
1065 if (!net_eq(cp->ipvs->net, net))
1066 return 0;
1067 if (cp->pe_data) {
1068 pe_data[0] = ' ';
1069 len = strlen(cp->pe->name);
1070 memcpy(pe_data + 1, cp->pe->name, len);
1071 pe_data[len + 1] = ' ';
1072 len += 2;
1073 len += cp->pe->show_pe_data(cp, pe_data + len);
1074 }
1075 pe_data[len] = '\0';
1076
1077 #ifdef CONFIG_IP_VS_IPV6
1078 if (cp->daf == AF_INET6)
1079 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
1080 else
1081 #endif
1082 snprintf(dbuf, sizeof(dbuf), "%08X",
1083 ntohl(cp->daddr.ip));
1084
1085 #ifdef CONFIG_IP_VS_IPV6
1086 if (cp->af == AF_INET6)
1087 seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
1088 "%s %04X %-11s %7lu%s\n",
1089 ip_vs_proto_name(cp->protocol),
1090 &cp->caddr.in6, ntohs(cp->cport),
1091 &cp->vaddr.in6, ntohs(cp->vport),
1092 dbuf, ntohs(cp->dport),
1093 ip_vs_state_name(cp->protocol, cp->state),
1094 (cp->timer.expires-jiffies)/HZ, pe_data);
1095 else
1096 #endif
1097 seq_printf(seq,
1098 "%-3s %08X %04X %08X %04X"
1099 " %s %04X %-11s %7lu%s\n",
1100 ip_vs_proto_name(cp->protocol),
1101 ntohl(cp->caddr.ip), ntohs(cp->cport),
1102 ntohl(cp->vaddr.ip), ntohs(cp->vport),
1103 dbuf, ntohs(cp->dport),
1104 ip_vs_state_name(cp->protocol, cp->state),
1105 (cp->timer.expires-jiffies)/HZ, pe_data);
1106 }
1107 return 0;
1108 }
1109
1110 static const struct seq_operations ip_vs_conn_seq_ops = {
1111 .start = ip_vs_conn_seq_start,
1112 .next = ip_vs_conn_seq_next,
1113 .stop = ip_vs_conn_seq_stop,
1114 .show = ip_vs_conn_seq_show,
1115 };
1116
1117 static int ip_vs_conn_open(struct inode *inode, struct file *file)
1118 {
1119 return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
1120 sizeof(struct ip_vs_iter_state));
1121 }
1122
1123 static const struct file_operations ip_vs_conn_fops = {
1124 .owner = THIS_MODULE,
1125 .open = ip_vs_conn_open,
1126 .read = seq_read,
1127 .llseek = seq_lseek,
1128 .release = seq_release_net,
1129 };
1130
1131 static const char *ip_vs_origin_name(unsigned int flags)
1132 {
1133 if (flags & IP_VS_CONN_F_SYNC)
1134 return "SYNC";
1135 else
1136 return "LOCAL";
1137 }
1138
1139 static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
1140 {
1141 char dbuf[IP_VS_ADDRSTRLEN];
1142
1143 if (v == SEQ_START_TOKEN)
1144 seq_puts(seq,
1145 "Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
1146 else {
1147 const struct ip_vs_conn *cp = v;
1148 struct net *net = seq_file_net(seq);
1149
1150 if (!net_eq(cp->ipvs->net, net))
1151 return 0;
1152
1153 #ifdef CONFIG_IP_VS_IPV6
1154 if (cp->daf == AF_INET6)
1155 snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
1156 else
1157 #endif
1158 snprintf(dbuf, sizeof(dbuf), "%08X",
1159 ntohl(cp->daddr.ip));
1160
1161 #ifdef CONFIG_IP_VS_IPV6
1162 if (cp->af == AF_INET6)
1163 seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
1164 "%s %04X %-11s %-6s %7lu\n",
1165 ip_vs_proto_name(cp->protocol),
1166 &cp->caddr.in6, ntohs(cp->cport),
1167 &cp->vaddr.in6, ntohs(cp->vport),
1168 dbuf, ntohs(cp->dport),
1169 ip_vs_state_name(cp->protocol, cp->state),
1170 ip_vs_origin_name(cp->flags),
1171 (cp->timer.expires-jiffies)/HZ);
1172 else
1173 #endif
1174 seq_printf(seq,
1175 "%-3s %08X %04X %08X %04X "
1176 "%s %04X %-11s %-6s %7lu\n",
1177 ip_vs_proto_name(cp->protocol),
1178 ntohl(cp->caddr.ip), ntohs(cp->cport),
1179 ntohl(cp->vaddr.ip), ntohs(cp->vport),
1180 dbuf, ntohs(cp->dport),
1181 ip_vs_state_name(cp->protocol, cp->state),
1182 ip_vs_origin_name(cp->flags),
1183 (cp->timer.expires-jiffies)/HZ);
1184 }
1185 return 0;
1186 }
1187
1188 static const struct seq_operations ip_vs_conn_sync_seq_ops = {
1189 .start = ip_vs_conn_seq_start,
1190 .next = ip_vs_conn_seq_next,
1191 .stop = ip_vs_conn_seq_stop,
1192 .show = ip_vs_conn_sync_seq_show,
1193 };
1194
1195 static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
1196 {
1197 return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
1198 sizeof(struct ip_vs_iter_state));
1199 }
1200
1201 static const struct file_operations ip_vs_conn_sync_fops = {
1202 .owner = THIS_MODULE,
1203 .open = ip_vs_conn_sync_open,
1204 .read = seq_read,
1205 .llseek = seq_lseek,
1206 .release = seq_release_net,
1207 };
1208
1209 #endif
1210
1211
1212 /*
1213 * Randomly drop connection entries before running out of memory
1214 */
1215 static inline int todrop_entry(struct ip_vs_conn *cp)
1216 {
1217 /*
1218 * The drop rate array needs tuning for real environments.
1219 * Called from timer bh only => no locking
1220 */
1221 static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
1222 static char todrop_counter[9] = {0};
1223 int i;
1224
1225 /* if the conn entry hasn't lasted for 60 seconds, don't drop it.
1226 This will leave enough time for normal connection to get
1227 through. */
1228 if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
1229 return 0;
1230
1231 /* Don't drop the entry if its number of incoming packets is not
1232 located in [0, 8] */
1233 i = atomic_read(&cp->in_pkts);
1234 if (i > 8 || i < 0) return 0;
1235
1236 if (!todrop_rate[i]) return 0;
1237 if (--todrop_counter[i] > 0) return 0;
1238
1239 todrop_counter[i] = todrop_rate[i];
1240 return 1;
1241 }
1242
1243 /* Called from keventd and must protect itself from softirqs */
1244 void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
1245 {
1246 int idx;
1247 struct ip_vs_conn *cp, *cp_c;
1248
1249 rcu_read_lock();
1250 /*
1251 * Randomly scan 1/32 of the whole table every second
1252 */
1253 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1254 unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask;
1255
1256 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
1257 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
1258 /* connection template */
1259 continue;
1260 if (cp->ipvs != ipvs)
1261 continue;
1262 if (cp->protocol == IPPROTO_TCP) {
1263 switch(cp->state) {
1264 case IP_VS_TCP_S_SYN_RECV:
1265 case IP_VS_TCP_S_SYNACK:
1266 break;
1267
1268 case IP_VS_TCP_S_ESTABLISHED:
1269 if (todrop_entry(cp))
1270 break;
1271 continue;
1272
1273 default:
1274 continue;
1275 }
1276 } else if (cp->protocol == IPPROTO_SCTP) {
1277 switch (cp->state) {
1278 case IP_VS_SCTP_S_INIT1:
1279 case IP_VS_SCTP_S_INIT:
1280 break;
1281 case IP_VS_SCTP_S_ESTABLISHED:
1282 if (todrop_entry(cp))
1283 break;
1284 continue;
1285 default:
1286 continue;
1287 }
1288 } else {
1289 if (!todrop_entry(cp))
1290 continue;
1291 }
1292
1293 IP_VS_DBG(4, "del connection\n");
1294 ip_vs_conn_expire_now(cp);
1295 cp_c = cp->control;
1296 /* cp->control is valid only with reference to cp */
1297 if (cp_c && __ip_vs_conn_get(cp)) {
1298 IP_VS_DBG(4, "del conn template\n");
1299 ip_vs_conn_expire_now(cp_c);
1300 __ip_vs_conn_put(cp);
1301 }
1302 }
1303 cond_resched_rcu();
1304 }
1305 rcu_read_unlock();
1306 }
1307
1308
1309 /*
1310 * Flush all the connection entries in the ip_vs_conn_tab
1311 */
1312 static void ip_vs_conn_flush(struct net *net)
1313 {
1314 int idx;
1315 struct ip_vs_conn *cp, *cp_c;
1316 struct netns_ipvs *ipvs = net_ipvs(net);
1317
1318 flush_again:
1319 rcu_read_lock();
1320 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1321
1322 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1323 if (cp->ipvs != ipvs)
1324 continue;
1325 IP_VS_DBG(4, "del connection\n");
1326 ip_vs_conn_expire_now(cp);
1327 cp_c = cp->control;
1328 /* cp->control is valid only with reference to cp */
1329 if (cp_c && __ip_vs_conn_get(cp)) {
1330 IP_VS_DBG(4, "del conn template\n");
1331 ip_vs_conn_expire_now(cp_c);
1332 __ip_vs_conn_put(cp);
1333 }
1334 }
1335 cond_resched_rcu();
1336 }
1337 rcu_read_unlock();
1338
1339 /* the counter may be not NULL, because maybe some conn entries
1340 are run by slow timer handler or unhashed but still referred */
1341 if (atomic_read(&ipvs->conn_count) != 0) {
1342 schedule();
1343 goto flush_again;
1344 }
1345 }
1346 /*
1347 * per netns init and exit
1348 */
1349 int __net_init ip_vs_conn_net_init(struct net *net)
1350 {
1351 struct netns_ipvs *ipvs = net_ipvs(net);
1352
1353 atomic_set(&ipvs->conn_count, 0);
1354
1355 proc_create("ip_vs_conn", 0, net->proc_net, &ip_vs_conn_fops);
1356 proc_create("ip_vs_conn_sync", 0, net->proc_net, &ip_vs_conn_sync_fops);
1357 return 0;
1358 }
1359
1360 void __net_exit ip_vs_conn_net_cleanup(struct net *net)
1361 {
1362 /* flush all the connection entries first */
1363 ip_vs_conn_flush(net);
1364 remove_proc_entry("ip_vs_conn", net->proc_net);
1365 remove_proc_entry("ip_vs_conn_sync", net->proc_net);
1366 }
1367
1368 int __init ip_vs_conn_init(void)
1369 {
1370 int idx;
1371
1372 /* Compute size and mask */
1373 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
1374 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
1375
1376 /*
1377 * Allocate the connection hash table and initialize its list heads
1378 */
1379 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
1380 if (!ip_vs_conn_tab)
1381 return -ENOMEM;
1382
1383 /* Allocate ip_vs_conn slab cache */
1384 ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
1385 sizeof(struct ip_vs_conn), 0,
1386 SLAB_HWCACHE_ALIGN, NULL);
1387 if (!ip_vs_conn_cachep) {
1388 vfree(ip_vs_conn_tab);
1389 return -ENOMEM;
1390 }
1391
1392 pr_info("Connection hash table configured "
1393 "(size=%d, memory=%ldKbytes)\n",
1394 ip_vs_conn_tab_size,
1395 (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
1396 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
1397 sizeof(struct ip_vs_conn));
1398
1399 for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
1400 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
1401
1402 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
1403 spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l);
1404 }
1405
1406 /* calculate the random value for connection hash */
1407 get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
1408
1409 return 0;
1410 }
1411
1412 void ip_vs_conn_cleanup(void)
1413 {
1414 /* Wait all ip_vs_conn_rcu_free() callbacks to complete */
1415 rcu_barrier();
1416 /* Release the empty cache */
1417 kmem_cache_destroy(ip_vs_conn_cachep);
1418 vfree(ip_vs_conn_tab);
1419 }
This page took 0.058012 seconds and 4 git commands to generate.