Commit | Line | Data |
---|---|---|
77d8bf9c ACM |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Generic INET transport hashtables | |
7 | * | |
8 | * Authors: Lotsa people, from code originally in tcp | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | */ | |
15 | ||
2d8c4ce5 | 16 | #include <linux/module.h> |
a7f5e7f1 | 17 | #include <linux/random.h> |
f3f05f70 | 18 | #include <linux/sched.h> |
77d8bf9c | 19 | #include <linux/slab.h> |
f3f05f70 | 20 | #include <linux/wait.h> |
095dc8e0 | 21 | #include <linux/vmalloc.h> |
77d8bf9c | 22 | |
463c84b9 | 23 | #include <net/inet_connection_sock.h> |
77d8bf9c | 24 | #include <net/inet_hashtables.h> |
6e5714ea | 25 | #include <net/secure_seq.h> |
a7f5e7f1 | 26 | #include <net/ip.h> |
77d8bf9c | 27 | |
6eada011 ED |
28 | static u32 inet_ehashfn(const struct net *net, const __be32 laddr, |
29 | const __u16 lport, const __be32 faddr, | |
30 | const __be16 fport) | |
65cd8033 | 31 | { |
1bbdceef HFS |
32 | static u32 inet_ehash_secret __read_mostly; |
33 | ||
34 | net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); | |
35 | ||
65cd8033 HFS |
36 | return __inet_ehashfn(laddr, lport, faddr, fport, |
37 | inet_ehash_secret + net_hash_mix(net)); | |
38 | } | |
39 | ||
d1e559d0 ED |
40 | /* This function handles inet_sock, but also timewait and request sockets |
41 | * for IPv4/IPv6. | |
42 | */ | |
5b441f76 | 43 | u32 sk_ehashfn(const struct sock *sk) |
65cd8033 | 44 | { |
d1e559d0 ED |
45 | #if IS_ENABLED(CONFIG_IPV6) |
46 | if (sk->sk_family == AF_INET6 && | |
47 | !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) | |
48 | return inet6_ehashfn(sock_net(sk), | |
49 | &sk->sk_v6_rcv_saddr, sk->sk_num, | |
50 | &sk->sk_v6_daddr, sk->sk_dport); | |
51 | #endif | |
5b441f76 ED |
52 | return inet_ehashfn(sock_net(sk), |
53 | sk->sk_rcv_saddr, sk->sk_num, | |
54 | sk->sk_daddr, sk->sk_dport); | |
65cd8033 HFS |
55 | } |
56 | ||
77d8bf9c ACM |
57 | /* |
58 | * Allocate and initialize a new local port bind bucket. | |
59 | * The bindhash mutex for snum's hash chain must be held here. | |
60 | */ | |
e18b890b | 61 | struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, |
941b1d22 | 62 | struct net *net, |
77d8bf9c ACM |
63 | struct inet_bind_hashbucket *head, |
64 | const unsigned short snum) | |
65 | { | |
54e6ecb2 | 66 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); |
77d8bf9c | 67 | |
00db4124 | 68 | if (tb) { |
efd7ef1c | 69 | write_pnet(&tb->ib_net, net); |
77d8bf9c ACM |
70 | tb->port = snum; |
71 | tb->fastreuse = 0; | |
da5e3630 | 72 | tb->fastreuseport = 0; |
a9d8f911 | 73 | tb->num_owners = 0; |
77d8bf9c ACM |
74 | INIT_HLIST_HEAD(&tb->owners); |
75 | hlist_add_head(&tb->node, &head->chain); | |
76 | } | |
77 | return tb; | |
78 | } | |
79 | ||
77d8bf9c ACM |
80 | /* |
81 | * Caller must hold hashbucket lock for this tb with local BH disabled | |
82 | */ | |
e18b890b | 83 | void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) |
77d8bf9c ACM |
84 | { |
85 | if (hlist_empty(&tb->owners)) { | |
86 | __hlist_del(&tb->node); | |
87 | kmem_cache_free(cachep, tb); | |
88 | } | |
89 | } | |
2d8c4ce5 ACM |
90 | |
91 | void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | |
92 | const unsigned short snum) | |
93 | { | |
c720c7e8 | 94 | inet_sk(sk)->inet_num = snum; |
2d8c4ce5 | 95 | sk_add_bind_node(sk, &tb->owners); |
a9d8f911 | 96 | tb->num_owners++; |
463c84b9 | 97 | inet_csk(sk)->icsk_bind_hash = tb; |
2d8c4ce5 ACM |
98 | } |
99 | ||
2d8c4ce5 ACM |
100 | /* |
101 | * Get rid of any references to a local port held by the given sock. | |
102 | */ | |
ab1e0a13 | 103 | static void __inet_put_port(struct sock *sk) |
2d8c4ce5 | 104 | { |
39d8cda7 | 105 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
c720c7e8 | 106 | const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, |
7f635ab7 | 107 | hashinfo->bhash_size); |
2d8c4ce5 ACM |
108 | struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; |
109 | struct inet_bind_bucket *tb; | |
110 | ||
111 | spin_lock(&head->lock); | |
463c84b9 | 112 | tb = inet_csk(sk)->icsk_bind_hash; |
2d8c4ce5 | 113 | __sk_del_bind_node(sk); |
a9d8f911 | 114 | tb->num_owners--; |
463c84b9 | 115 | inet_csk(sk)->icsk_bind_hash = NULL; |
c720c7e8 | 116 | inet_sk(sk)->inet_num = 0; |
2d8c4ce5 ACM |
117 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); |
118 | spin_unlock(&head->lock); | |
119 | } | |
120 | ||
ab1e0a13 | 121 | void inet_put_port(struct sock *sk) |
2d8c4ce5 ACM |
122 | { |
123 | local_bh_disable(); | |
ab1e0a13 | 124 | __inet_put_port(sk); |
2d8c4ce5 ACM |
125 | local_bh_enable(); |
126 | } | |
2d8c4ce5 | 127 | EXPORT_SYMBOL(inet_put_port); |
f3f05f70 | 128 | |
1ce31c9e | 129 | int __inet_inherit_port(const struct sock *sk, struct sock *child) |
53083773 PE |
130 | { |
131 | struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; | |
093d2823 BS |
132 | unsigned short port = inet_sk(child)->inet_num; |
133 | const int bhash = inet_bhashfn(sock_net(sk), port, | |
7f635ab7 | 134 | table->bhash_size); |
53083773 PE |
135 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; |
136 | struct inet_bind_bucket *tb; | |
137 | ||
138 | spin_lock(&head->lock); | |
139 | tb = inet_csk(sk)->icsk_bind_hash; | |
c2f34a65 ED |
140 | if (unlikely(!tb)) { |
141 | spin_unlock(&head->lock); | |
142 | return -ENOENT; | |
143 | } | |
093d2823 BS |
144 | if (tb->port != port) { |
145 | /* NOTE: using tproxy and redirecting skbs to a proxy | |
146 | * on a different listener port breaks the assumption | |
147 | * that the listener socket's icsk_bind_hash is the same | |
148 | * as that of the child socket. We have to look up or | |
149 | * create a new bind bucket for the child here. */ | |
b67bfe0d | 150 | inet_bind_bucket_for_each(tb, &head->chain) { |
093d2823 BS |
151 | if (net_eq(ib_net(tb), sock_net(sk)) && |
152 | tb->port == port) | |
153 | break; | |
154 | } | |
b67bfe0d | 155 | if (!tb) { |
093d2823 BS |
156 | tb = inet_bind_bucket_create(table->bind_bucket_cachep, |
157 | sock_net(sk), head, port); | |
158 | if (!tb) { | |
159 | spin_unlock(&head->lock); | |
160 | return -ENOMEM; | |
161 | } | |
162 | } | |
163 | } | |
b4ff3c90 | 164 | inet_bind_hash(child, tb, port); |
53083773 | 165 | spin_unlock(&head->lock); |
093d2823 BS |
166 | |
167 | return 0; | |
53083773 | 168 | } |
53083773 PE |
169 | EXPORT_SYMBOL_GPL(__inet_inherit_port); |
170 | ||
c25eb3bf ED |
171 | static inline int compute_score(struct sock *sk, struct net *net, |
172 | const unsigned short hnum, const __be32 daddr, | |
173 | const int dif) | |
174 | { | |
175 | int score = -1; | |
176 | struct inet_sock *inet = inet_sk(sk); | |
177 | ||
c720c7e8 | 178 | if (net_eq(sock_net(sk), net) && inet->inet_num == hnum && |
c25eb3bf | 179 | !ipv6_only_sock(sk)) { |
c720c7e8 | 180 | __be32 rcv_saddr = inet->inet_rcv_saddr; |
da5e3630 | 181 | score = sk->sk_family == PF_INET ? 2 : 1; |
c25eb3bf ED |
182 | if (rcv_saddr) { |
183 | if (rcv_saddr != daddr) | |
184 | return -1; | |
da5e3630 | 185 | score += 4; |
c25eb3bf ED |
186 | } |
187 | if (sk->sk_bound_dev_if) { | |
188 | if (sk->sk_bound_dev_if != dif) | |
189 | return -1; | |
da5e3630 | 190 | score += 4; |
c25eb3bf | 191 | } |
70da268b ED |
192 | if (sk->sk_incoming_cpu == raw_smp_processor_id()) |
193 | score++; | |
c25eb3bf ED |
194 | } |
195 | return score; | |
196 | } | |
197 | ||
33b62231 ACM |
198 | /* |
199 | * Don't inline this cruft. Here are some nice properties to exploit here. The | |
200 | * BSD API does not allow a listening sock to specify the remote port nor the | |
201 | * remote address for the connection. So always assume those are both | |
202 | * wildcarded during the search since they can never be otherwise. | |
203 | */ | |
e48c414e | 204 | |
c25eb3bf | 205 | |
c67499c0 PE |
206 | struct sock *__inet_lookup_listener(struct net *net, |
207 | struct inet_hashinfo *hashinfo, | |
da5e3630 | 208 | const __be32 saddr, __be16 sport, |
fb99c848 | 209 | const __be32 daddr, const unsigned short hnum, |
8f491069 | 210 | const int dif) |
99a92ff5 | 211 | { |
c25eb3bf ED |
212 | struct sock *sk, *result; |
213 | struct hlist_nulls_node *node; | |
214 | unsigned int hash = inet_lhashfn(net, hnum); | |
215 | struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; | |
da5e3630 TH |
216 | int score, hiscore, matches = 0, reuseport = 0; |
217 | u32 phash = 0; | |
99a92ff5 | 218 | |
c25eb3bf ED |
219 | rcu_read_lock(); |
220 | begin: | |
221 | result = NULL; | |
da5e3630 | 222 | hiscore = 0; |
c25eb3bf ED |
223 | sk_nulls_for_each_rcu(sk, node, &ilb->head) { |
224 | score = compute_score(sk, net, hnum, daddr, dif); | |
225 | if (score > hiscore) { | |
226 | result = sk; | |
227 | hiscore = score; | |
da5e3630 TH |
228 | reuseport = sk->sk_reuseport; |
229 | if (reuseport) { | |
230 | phash = inet_ehashfn(net, daddr, hnum, | |
231 | saddr, sport); | |
232 | matches = 1; | |
233 | } | |
234 | } else if (score == hiscore && reuseport) { | |
235 | matches++; | |
8fc54f68 | 236 | if (reciprocal_scale(phash, matches) == 0) |
da5e3630 TH |
237 | result = sk; |
238 | phash = next_pseudo_random32(phash); | |
c25eb3bf | 239 | } |
99a92ff5 | 240 | } |
c25eb3bf ED |
241 | /* |
242 | * if the nulls value we got at the end of this lookup is | |
243 | * not the expected one, we must restart lookup. | |
244 | * We probably met an item that was moved to another chain. | |
245 | */ | |
246 | if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE) | |
247 | goto begin; | |
248 | if (result) { | |
249 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | |
250 | result = NULL; | |
251 | else if (unlikely(compute_score(result, net, hnum, daddr, | |
252 | dif) < hiscore)) { | |
253 | sock_put(result); | |
254 | goto begin; | |
255 | } | |
99a92ff5 | 256 | } |
c25eb3bf ED |
257 | rcu_read_unlock(); |
258 | return result; | |
99a92ff5 | 259 | } |
8f491069 | 260 | EXPORT_SYMBOL_GPL(__inet_lookup_listener); |
a7f5e7f1 | 261 | |
05dbc7b5 ED |
262 | /* All sockets share common refcount, but have different destructors */ |
263 | void sock_gen_put(struct sock *sk) | |
264 | { | |
265 | if (!atomic_dec_and_test(&sk->sk_refcnt)) | |
266 | return; | |
267 | ||
268 | if (sk->sk_state == TCP_TIME_WAIT) | |
269 | inet_twsk_free(inet_twsk(sk)); | |
41b822c5 ED |
270 | else if (sk->sk_state == TCP_NEW_SYN_RECV) |
271 | reqsk_free(inet_reqsk(sk)); | |
05dbc7b5 ED |
272 | else |
273 | sk_free(sk); | |
274 | } | |
275 | EXPORT_SYMBOL_GPL(sock_gen_put); | |
276 | ||
2c13270b ED |
277 | void sock_edemux(struct sk_buff *skb) |
278 | { | |
279 | sock_gen_put(skb->sk); | |
280 | } | |
281 | EXPORT_SYMBOL(sock_edemux); | |
282 | ||
5e73ea1a | 283 | struct sock *__inet_lookup_established(struct net *net, |
c67499c0 | 284 | struct inet_hashinfo *hashinfo, |
77a5ba55 PE |
285 | const __be32 saddr, const __be16 sport, |
286 | const __be32 daddr, const u16 hnum, | |
287 | const int dif) | |
288 | { | |
c7228317 | 289 | INET_ADDR_COOKIE(acookie, saddr, daddr); |
77a5ba55 PE |
290 | const __portpair ports = INET_COMBINED_PORTS(sport, hnum); |
291 | struct sock *sk; | |
3ab5aee7 | 292 | const struct hlist_nulls_node *node; |
77a5ba55 PE |
293 | /* Optimize here for direct hit, only listening connections can |
294 | * have wildcards anyways. | |
295 | */ | |
9f26b3ad | 296 | unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); |
f373b53b | 297 | unsigned int slot = hash & hashinfo->ehash_mask; |
3ab5aee7 | 298 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; |
77a5ba55 | 299 | |
3ab5aee7 ED |
300 | rcu_read_lock(); |
301 | begin: | |
302 | sk_nulls_for_each_rcu(sk, node, &head->chain) { | |
ce43b03e ED |
303 | if (sk->sk_hash != hash) |
304 | continue; | |
305 | if (likely(INET_MATCH(sk, net, acookie, | |
306 | saddr, daddr, ports, dif))) { | |
3ab5aee7 | 307 | if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) |
05dbc7b5 | 308 | goto out; |
ce43b03e ED |
309 | if (unlikely(!INET_MATCH(sk, net, acookie, |
310 | saddr, daddr, ports, dif))) { | |
05dbc7b5 | 311 | sock_gen_put(sk); |
3ab5aee7 ED |
312 | goto begin; |
313 | } | |
05dbc7b5 | 314 | goto found; |
3ab5aee7 | 315 | } |
77a5ba55 | 316 | } |
3ab5aee7 ED |
317 | /* |
318 | * if the nulls value we got at the end of this lookup is | |
319 | * not the expected one, we must restart lookup. | |
320 | * We probably met an item that was moved to another chain. | |
321 | */ | |
322 | if (get_nulls_value(node) != slot) | |
323 | goto begin; | |
77a5ba55 | 324 | out: |
05dbc7b5 ED |
325 | sk = NULL; |
326 | found: | |
3ab5aee7 | 327 | rcu_read_unlock(); |
77a5ba55 | 328 | return sk; |
77a5ba55 PE |
329 | } |
330 | EXPORT_SYMBOL_GPL(__inet_lookup_established); | |
331 | ||
a7f5e7f1 ACM |
332 | /* called with local bh disabled */ |
333 | static int __inet_check_established(struct inet_timewait_death_row *death_row, | |
334 | struct sock *sk, __u16 lport, | |
335 | struct inet_timewait_sock **twp) | |
336 | { | |
337 | struct inet_hashinfo *hinfo = death_row->hashinfo; | |
338 | struct inet_sock *inet = inet_sk(sk); | |
c720c7e8 ED |
339 | __be32 daddr = inet->inet_rcv_saddr; |
340 | __be32 saddr = inet->inet_daddr; | |
a7f5e7f1 | 341 | int dif = sk->sk_bound_dev_if; |
c7228317 | 342 | INET_ADDR_COOKIE(acookie, saddr, daddr); |
c720c7e8 | 343 | const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); |
9f26b3ad | 344 | struct net *net = sock_net(sk); |
c720c7e8 ED |
345 | unsigned int hash = inet_ehashfn(net, daddr, lport, |
346 | saddr, inet->inet_dport); | |
a7f5e7f1 | 347 | struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); |
9db66bdc | 348 | spinlock_t *lock = inet_ehash_lockp(hinfo, hash); |
a7f5e7f1 | 349 | struct sock *sk2; |
3ab5aee7 | 350 | const struct hlist_nulls_node *node; |
05dbc7b5 | 351 | struct inet_timewait_sock *tw = NULL; |
a7f5e7f1 | 352 | |
9db66bdc | 353 | spin_lock(lock); |
a7f5e7f1 | 354 | |
3ab5aee7 | 355 | sk_nulls_for_each(sk2, node, &head->chain) { |
ce43b03e ED |
356 | if (sk2->sk_hash != hash) |
357 | continue; | |
05dbc7b5 | 358 | |
ce43b03e | 359 | if (likely(INET_MATCH(sk2, net, acookie, |
05dbc7b5 ED |
360 | saddr, daddr, ports, dif))) { |
361 | if (sk2->sk_state == TCP_TIME_WAIT) { | |
362 | tw = inet_twsk(sk2); | |
363 | if (twsk_unique(sk, sk2, twp)) | |
364 | break; | |
365 | } | |
a7f5e7f1 | 366 | goto not_unique; |
05dbc7b5 | 367 | } |
a7f5e7f1 ACM |
368 | } |
369 | ||
a7f5e7f1 | 370 | /* Must record num and sport now. Otherwise we will see |
05dbc7b5 ED |
371 | * in hash table socket with a funny identity. |
372 | */ | |
c720c7e8 ED |
373 | inet->inet_num = lport; |
374 | inet->inet_sport = htons(lport); | |
a7f5e7f1 | 375 | sk->sk_hash = hash; |
547b792c | 376 | WARN_ON(!sk_unhashed(sk)); |
3ab5aee7 | 377 | __sk_nulls_add_node_rcu(sk, &head->chain); |
13475a30 | 378 | if (tw) { |
fc01538f | 379 | sk_nulls_del_node_init_rcu((struct sock *)tw); |
13475a30 ED |
380 | NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); |
381 | } | |
9db66bdc | 382 | spin_unlock(lock); |
c29a0bc4 | 383 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
a7f5e7f1 ACM |
384 | |
385 | if (twp) { | |
386 | *twp = tw; | |
a7f5e7f1 ACM |
387 | } else if (tw) { |
388 | /* Silly. Should hash-dance instead... */ | |
dbe7faa4 | 389 | inet_twsk_deschedule_put(tw); |
a7f5e7f1 | 390 | } |
a7f5e7f1 ACM |
391 | return 0; |
392 | ||
393 | not_unique: | |
9db66bdc | 394 | spin_unlock(lock); |
a7f5e7f1 ACM |
395 | return -EADDRNOTAVAIL; |
396 | } | |
397 | ||
e2baad9e | 398 | static u32 inet_sk_port_offset(const struct sock *sk) |
a7f5e7f1 ACM |
399 | { |
400 | const struct inet_sock *inet = inet_sk(sk); | |
e2baad9e | 401 | |
c720c7e8 ED |
402 | return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, |
403 | inet->inet_daddr, | |
404 | inet->inet_dport); | |
a7f5e7f1 ACM |
405 | } |
406 | ||
079096f1 ED |
407 | /* insert a socket into ehash, and eventually remove another one |
408 | * (The another one can be a SYN_RECV or TIMEWAIT | |
409 | */ | |
5e0724d0 | 410 | bool inet_ehash_insert(struct sock *sk, struct sock *osk) |
152da81d | 411 | { |
39d8cda7 | 412 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
3ab5aee7 | 413 | struct hlist_nulls_head *list; |
152da81d | 414 | struct inet_ehash_bucket *head; |
5b441f76 | 415 | spinlock_t *lock; |
5e0724d0 | 416 | bool ret = true; |
152da81d | 417 | |
079096f1 | 418 | WARN_ON_ONCE(!sk_unhashed(sk)); |
152da81d | 419 | |
5b441f76 | 420 | sk->sk_hash = sk_ehashfn(sk); |
152da81d PE |
421 | head = inet_ehash_bucket(hashinfo, sk->sk_hash); |
422 | list = &head->chain; | |
423 | lock = inet_ehash_lockp(hashinfo, sk->sk_hash); | |
424 | ||
9db66bdc | 425 | spin_lock(lock); |
fc01538f | 426 | if (osk) { |
5e0724d0 ED |
427 | WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); |
428 | ret = sk_nulls_del_node_init_rcu(osk); | |
9327f705 | 429 | } |
5e0724d0 ED |
430 | if (ret) |
431 | __sk_nulls_add_node_rcu(sk, list); | |
9db66bdc | 432 | spin_unlock(lock); |
079096f1 ED |
433 | return ret; |
434 | } | |
435 | ||
5e0724d0 | 436 | bool inet_ehash_nolisten(struct sock *sk, struct sock *osk) |
079096f1 | 437 | { |
5e0724d0 ED |
438 | bool ok = inet_ehash_insert(sk, osk); |
439 | ||
440 | if (ok) { | |
441 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | |
442 | } else { | |
443 | percpu_counter_inc(sk->sk_prot->orphan_count); | |
444 | sk->sk_state = TCP_CLOSE; | |
445 | sock_set_flag(sk, SOCK_DEAD); | |
446 | inet_csk_destroy_sock(sk); | |
447 | } | |
448 | return ok; | |
152da81d | 449 | } |
5e0724d0 | 450 | EXPORT_SYMBOL_GPL(inet_ehash_nolisten); |
152da81d | 451 | |
fc01538f | 452 | void __inet_hash(struct sock *sk, struct sock *osk) |
152da81d | 453 | { |
39d8cda7 | 454 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
5caea4ea | 455 | struct inet_listen_hashbucket *ilb; |
152da81d | 456 | |
5e0724d0 ED |
457 | if (sk->sk_state != TCP_LISTEN) { |
458 | inet_ehash_nolisten(sk, osk); | |
459 | return; | |
460 | } | |
547b792c | 461 | WARN_ON(!sk_unhashed(sk)); |
5caea4ea | 462 | ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; |
152da81d | 463 | |
5caea4ea | 464 | spin_lock(&ilb->lock); |
c25eb3bf | 465 | __sk_nulls_add_node_rcu(sk, &ilb->head); |
c29a0bc4 | 466 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
5caea4ea | 467 | spin_unlock(&ilb->lock); |
152da81d | 468 | } |
77a6a471 | 469 | EXPORT_SYMBOL(__inet_hash); |
ab1e0a13 ACM |
470 | |
471 | void inet_hash(struct sock *sk) | |
472 | { | |
473 | if (sk->sk_state != TCP_CLOSE) { | |
474 | local_bh_disable(); | |
77a6a471 | 475 | __inet_hash(sk, NULL); |
ab1e0a13 ACM |
476 | local_bh_enable(); |
477 | } | |
478 | } | |
479 | EXPORT_SYMBOL_GPL(inet_hash); | |
480 | ||
481 | void inet_unhash(struct sock *sk) | |
482 | { | |
39d8cda7 | 483 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
c25eb3bf ED |
484 | spinlock_t *lock; |
485 | int done; | |
ab1e0a13 ACM |
486 | |
487 | if (sk_unhashed(sk)) | |
5caea4ea | 488 | return; |
ab1e0a13 | 489 | |
c25eb3bf ED |
490 | if (sk->sk_state == TCP_LISTEN) |
491 | lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock; | |
492 | else | |
493 | lock = inet_ehash_lockp(hashinfo, sk->sk_hash); | |
5caea4ea | 494 | |
c25eb3bf | 495 | spin_lock_bh(lock); |
3b8ccd44 | 496 | done = __sk_nulls_del_node_init_rcu(sk); |
c25eb3bf ED |
497 | if (done) |
498 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | |
920de804 | 499 | spin_unlock_bh(lock); |
ab1e0a13 ACM |
500 | } |
501 | EXPORT_SYMBOL_GPL(inet_unhash); | |
152da81d | 502 | |
5ee31fc1 | 503 | int __inet_hash_connect(struct inet_timewait_death_row *death_row, |
5d8c0aa9 | 504 | struct sock *sk, u32 port_offset, |
5ee31fc1 | 505 | int (*check_established)(struct inet_timewait_death_row *, |
b4d6444e | 506 | struct sock *, __u16, struct inet_timewait_sock **)) |
a7f5e7f1 ACM |
507 | { |
508 | struct inet_hashinfo *hinfo = death_row->hashinfo; | |
c720c7e8 | 509 | const unsigned short snum = inet_sk(sk)->inet_num; |
e905a9ed YH |
510 | struct inet_bind_hashbucket *head; |
511 | struct inet_bind_bucket *tb; | |
a7f5e7f1 | 512 | int ret; |
3b1e0a65 | 513 | struct net *net = sock_net(sk); |
a7f5e7f1 | 514 | |
e905a9ed | 515 | if (!snum) { |
227b60f5 | 516 | int i, remaining, low, high, port; |
a7f5e7f1 | 517 | static u32 hint; |
5d8c0aa9 | 518 | u32 offset = hint + port_offset; |
e905a9ed | 519 | struct inet_timewait_sock *tw = NULL; |
a7f5e7f1 | 520 | |
0bbf87d8 | 521 | inet_get_local_port_range(net, &low, &high); |
a25de534 | 522 | remaining = (high - low) + 1; |
227b60f5 | 523 | |
07f4c900 ED |
524 | /* By starting with offset being an even number, |
525 | * we tend to leave about 50% of ports for other uses, | |
526 | * like bind(0). | |
527 | */ | |
528 | offset &= ~1; | |
529 | ||
e905a9ed | 530 | local_bh_disable(); |
07f4c900 | 531 | for (i = 0; i < remaining; i++) { |
227b60f5 | 532 | port = low + (i + offset) % remaining; |
122ff243 | 533 | if (inet_is_local_reserved_port(net, port)) |
e3826f1e | 534 | continue; |
7f635ab7 PE |
535 | head = &hinfo->bhash[inet_bhashfn(net, port, |
536 | hinfo->bhash_size)]; | |
e905a9ed | 537 | spin_lock(&head->lock); |
a7f5e7f1 | 538 | |
e905a9ed YH |
539 | /* Does not bother with rcv_saddr checks, |
540 | * because the established check is already | |
541 | * unique enough. | |
542 | */ | |
b67bfe0d | 543 | inet_bind_bucket_for_each(tb, &head->chain) { |
09ad9bc7 OP |
544 | if (net_eq(ib_net(tb), net) && |
545 | tb->port == port) { | |
da5e3630 TH |
546 | if (tb->fastreuse >= 0 || |
547 | tb->fastreuseport >= 0) | |
e905a9ed | 548 | goto next_port; |
a9d8f911 | 549 | WARN_ON(hlist_empty(&tb->owners)); |
5ee31fc1 PE |
550 | if (!check_established(death_row, sk, |
551 | port, &tw)) | |
e905a9ed YH |
552 | goto ok; |
553 | goto next_port; | |
554 | } | |
555 | } | |
556 | ||
941b1d22 PE |
557 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, |
558 | net, head, port); | |
e905a9ed YH |
559 | if (!tb) { |
560 | spin_unlock(&head->lock); | |
561 | break; | |
562 | } | |
563 | tb->fastreuse = -1; | |
da5e3630 | 564 | tb->fastreuseport = -1; |
e905a9ed YH |
565 | goto ok; |
566 | ||
567 | next_port: | |
568 | spin_unlock(&head->lock); | |
569 | } | |
570 | local_bh_enable(); | |
571 | ||
572 | return -EADDRNOTAVAIL; | |
a7f5e7f1 ACM |
573 | |
574 | ok: | |
07f4c900 | 575 | hint += (i + 2) & ~1; |
a7f5e7f1 | 576 | |
e905a9ed YH |
577 | /* Head lock still held and bh's disabled */ |
578 | inet_bind_hash(sk, tb, port); | |
a7f5e7f1 | 579 | if (sk_unhashed(sk)) { |
c720c7e8 | 580 | inet_sk(sk)->inet_sport = htons(port); |
5e0724d0 | 581 | inet_ehash_nolisten(sk, (struct sock *)tw); |
e905a9ed | 582 | } |
3cdaedae | 583 | if (tw) |
fc01538f | 584 | inet_twsk_bind_unhash(tw, hinfo); |
e905a9ed | 585 | spin_unlock(&head->lock); |
a7f5e7f1 | 586 | |
dbe7faa4 ED |
587 | if (tw) |
588 | inet_twsk_deschedule_put(tw); | |
a7f5e7f1 ACM |
589 | |
590 | ret = 0; | |
591 | goto out; | |
e905a9ed | 592 | } |
a7f5e7f1 | 593 | |
7f635ab7 | 594 | head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)]; |
e905a9ed | 595 | tb = inet_csk(sk)->icsk_bind_hash; |
a7f5e7f1 ACM |
596 | spin_lock_bh(&head->lock); |
597 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | |
5e0724d0 | 598 | inet_ehash_nolisten(sk, NULL); |
a7f5e7f1 ACM |
599 | spin_unlock_bh(&head->lock); |
600 | return 0; | |
601 | } else { | |
602 | spin_unlock(&head->lock); | |
603 | /* No definite answer... Walk to established hash table */ | |
5ee31fc1 | 604 | ret = check_established(death_row, sk, snum, NULL); |
a7f5e7f1 ACM |
605 | out: |
606 | local_bh_enable(); | |
607 | return ret; | |
608 | } | |
609 | } | |
5ee31fc1 PE |
610 | |
611 | /* | |
612 | * Bind a port for a connect operation and hash it. | |
613 | */ | |
614 | int inet_hash_connect(struct inet_timewait_death_row *death_row, | |
615 | struct sock *sk) | |
616 | { | |
e2baad9e ED |
617 | u32 port_offset = 0; |
618 | ||
619 | if (!inet_sk(sk)->inet_num) | |
620 | port_offset = inet_sk_port_offset(sk); | |
621 | return __inet_hash_connect(death_row, sk, port_offset, | |
b4d6444e | 622 | __inet_check_established); |
5ee31fc1 | 623 | } |
a7f5e7f1 | 624 | EXPORT_SYMBOL_GPL(inet_hash_connect); |
5caea4ea ED |
625 | |
626 | void inet_hashinfo_init(struct inet_hashinfo *h) | |
627 | { | |
628 | int i; | |
629 | ||
c25eb3bf | 630 | for (i = 0; i < INET_LHTABLE_SIZE; i++) { |
5caea4ea | 631 | spin_lock_init(&h->listening_hash[i].lock); |
c25eb3bf ED |
632 | INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, |
633 | i + LISTENING_NULLS_BASE); | |
634 | } | |
5caea4ea | 635 | } |
5caea4ea | 636 | EXPORT_SYMBOL_GPL(inet_hashinfo_init); |
095dc8e0 ED |
637 | |
638 | int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) | |
639 | { | |
89e478a2 | 640 | unsigned int locksz = sizeof(spinlock_t); |
095dc8e0 ED |
641 | unsigned int i, nblocks = 1; |
642 | ||
89e478a2 | 643 | if (locksz != 0) { |
095dc8e0 | 644 | /* allocate 2 cache lines or at least one spinlock per cpu */ |
89e478a2 | 645 | nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); |
095dc8e0 ED |
646 | nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); |
647 | ||
648 | /* no more locks than number of hash buckets */ | |
649 | nblocks = min(nblocks, hashinfo->ehash_mask + 1); | |
650 | ||
89e478a2 | 651 | hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, |
095dc8e0 ED |
652 | GFP_KERNEL | __GFP_NOWARN); |
653 | if (!hashinfo->ehash_locks) | |
89e478a2 | 654 | hashinfo->ehash_locks = vmalloc(nblocks * locksz); |
095dc8e0 ED |
655 | |
656 | if (!hashinfo->ehash_locks) | |
657 | return -ENOMEM; | |
658 | ||
659 | for (i = 0; i < nblocks; i++) | |
660 | spin_lock_init(&hashinfo->ehash_locks[i]); | |
661 | } | |
662 | hashinfo->ehash_locks_mask = nblocks - 1; | |
663 | return 0; | |
664 | } | |
665 | EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); |