Merge remote-tracking branch 'vfio/next'
[deliverable/linux.git] / net / rxrpc / peer_object.c
1 /* RxRPC remote transport endpoint record management
2 *
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/udp.h>
18 #include <linux/in.h>
19 #include <linux/slab.h>
20 #include <linux/hashtable.h>
21 #include <net/sock.h>
22 #include <net/af_rxrpc.h>
23 #include <net/ip.h>
24 #include <net/route.h>
25 #include "ar-internal.h"
26
27 static DEFINE_HASHTABLE(rxrpc_peer_hash, 10);
28 static DEFINE_SPINLOCK(rxrpc_peer_hash_lock);
29
30 /*
31 * Hash a peer key.
32 */
33 static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
34 const struct sockaddr_rxrpc *srx)
35 {
36 const u16 *p;
37 unsigned int i, size;
38 unsigned long hash_key;
39
40 _enter("");
41
42 hash_key = (unsigned long)local / __alignof__(*local);
43 hash_key += srx->transport_type;
44 hash_key += srx->transport_len;
45 hash_key += srx->transport.family;
46
47 switch (srx->transport.family) {
48 case AF_INET:
49 hash_key += (u16 __force)srx->transport.sin.sin_port;
50 size = sizeof(srx->transport.sin.sin_addr);
51 p = (u16 *)&srx->transport.sin.sin_addr;
52 break;
53 default:
54 WARN(1, "AF_RXRPC: Unsupported transport address family\n");
55 return 0;
56 }
57
58 /* Step through the peer address in 16-bit portions for speed */
59 for (i = 0; i < size; i += sizeof(*p), p++)
60 hash_key += *p;
61
62 _leave(" 0x%lx", hash_key);
63 return hash_key;
64 }
65
66 /*
67 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
68 * or greater than.
69 *
70 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
71 * buckets and mid-bucket insertion, so we don't make full use of this
72 * information at this point.
73 */
74 static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
75 struct rxrpc_local *local,
76 const struct sockaddr_rxrpc *srx,
77 unsigned long hash_key)
78 {
79 long diff;
80
81 diff = ((peer->hash_key - hash_key) ?:
82 ((unsigned long)peer->local - (unsigned long)local) ?:
83 (peer->srx.transport_type - srx->transport_type) ?:
84 (peer->srx.transport_len - srx->transport_len) ?:
85 (peer->srx.transport.family - srx->transport.family));
86 if (diff != 0)
87 return diff;
88
89 switch (srx->transport.family) {
90 case AF_INET:
91 return ((u16 __force)peer->srx.transport.sin.sin_port -
92 (u16 __force)srx->transport.sin.sin_port) ?:
93 memcmp(&peer->srx.transport.sin.sin_addr,
94 &srx->transport.sin.sin_addr,
95 sizeof(struct in_addr));
96 default:
97 BUG();
98 }
99 }
100
101 /*
102 * Look up a remote transport endpoint for the specified address using RCU.
103 */
104 static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
105 struct rxrpc_local *local,
106 const struct sockaddr_rxrpc *srx,
107 unsigned long hash_key)
108 {
109 struct rxrpc_peer *peer;
110
111 hash_for_each_possible_rcu(rxrpc_peer_hash, peer, hash_link, hash_key) {
112 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
113 if (atomic_read(&peer->usage) == 0)
114 return NULL;
115 return peer;
116 }
117 }
118
119 return NULL;
120 }
121
122 /*
123 * Look up a remote transport endpoint for the specified address using RCU.
124 */
125 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
126 const struct sockaddr_rxrpc *srx)
127 {
128 struct rxrpc_peer *peer;
129 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
130
131 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
132 if (peer) {
133 switch (srx->transport.family) {
134 case AF_INET:
135 _net("PEER %d {%d,%u,%pI4+%hu}",
136 peer->debug_id,
137 peer->srx.transport_type,
138 peer->srx.transport.family,
139 &peer->srx.transport.sin.sin_addr,
140 ntohs(peer->srx.transport.sin.sin_port));
141 break;
142 }
143
144 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
145 }
146 return peer;
147 }
148
149 /*
150 * assess the MTU size for the network interface through which this peer is
151 * reached
152 */
153 static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
154 {
155 struct rtable *rt;
156 struct flowi4 fl4;
157
158 peer->if_mtu = 1500;
159
160 rt = ip_route_output_ports(&init_net, &fl4, NULL,
161 peer->srx.transport.sin.sin_addr.s_addr, 0,
162 htons(7000), htons(7001),
163 IPPROTO_UDP, 0, 0);
164 if (IS_ERR(rt)) {
165 _leave(" [route err %ld]", PTR_ERR(rt));
166 return;
167 }
168
169 peer->if_mtu = dst_mtu(&rt->dst);
170 dst_release(&rt->dst);
171
172 _leave(" [if_mtu %u]", peer->if_mtu);
173 }
174
175 /*
176 * Allocate a peer.
177 */
178 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
179 {
180 struct rxrpc_peer *peer;
181
182 _enter("");
183
184 peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
185 if (peer) {
186 atomic_set(&peer->usage, 1);
187 peer->local = local;
188 INIT_HLIST_HEAD(&peer->error_targets);
189 INIT_WORK(&peer->error_distributor,
190 &rxrpc_peer_error_distributor);
191 peer->service_conns = RB_ROOT;
192 seqlock_init(&peer->service_conn_lock);
193 spin_lock_init(&peer->lock);
194 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
195 }
196
197 _leave(" = %p", peer);
198 return peer;
199 }
200
201 /*
202 * Initialise peer record.
203 */
204 static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
205 {
206 rxrpc_assess_MTU_size(peer);
207 peer->mtu = peer->if_mtu;
208
209 if (peer->srx.transport.family == AF_INET) {
210 peer->hdrsize = sizeof(struct iphdr);
211 switch (peer->srx.transport_type) {
212 case SOCK_DGRAM:
213 peer->hdrsize += sizeof(struct udphdr);
214 break;
215 default:
216 BUG();
217 break;
218 }
219 } else {
220 BUG();
221 }
222
223 peer->hdrsize += sizeof(struct rxrpc_wire_header);
224 peer->maxdata = peer->mtu - peer->hdrsize;
225 }
226
227 /*
228 * Set up a new peer.
229 */
230 static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
231 struct sockaddr_rxrpc *srx,
232 unsigned long hash_key,
233 gfp_t gfp)
234 {
235 struct rxrpc_peer *peer;
236
237 _enter("");
238
239 peer = rxrpc_alloc_peer(local, gfp);
240 if (peer) {
241 peer->hash_key = hash_key;
242 memcpy(&peer->srx, srx, sizeof(*srx));
243 rxrpc_init_peer(peer, hash_key);
244 }
245
246 _leave(" = %p", peer);
247 return peer;
248 }
249
250 /*
251 * Set up a new incoming peer. The address is prestored in the preallocated
252 * peer.
253 */
254 struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
255 struct rxrpc_peer *prealloc)
256 {
257 struct rxrpc_peer *peer;
258 unsigned long hash_key;
259
260 hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
261 prealloc->local = local;
262 rxrpc_init_peer(prealloc, hash_key);
263
264 spin_lock(&rxrpc_peer_hash_lock);
265
266 /* Need to check that we aren't racing with someone else */
267 peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
268 if (peer && !rxrpc_get_peer_maybe(peer))
269 peer = NULL;
270 if (!peer) {
271 peer = prealloc;
272 hash_add_rcu(rxrpc_peer_hash, &peer->hash_link, hash_key);
273 }
274
275 spin_unlock(&rxrpc_peer_hash_lock);
276 return peer;
277 }
278
279 /*
280 * obtain a remote transport endpoint for the specified address
281 */
282 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
283 struct sockaddr_rxrpc *srx, gfp_t gfp)
284 {
285 struct rxrpc_peer *peer, *candidate;
286 unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
287
288 _enter("{%d,%d,%pI4+%hu}",
289 srx->transport_type,
290 srx->transport_len,
291 &srx->transport.sin.sin_addr,
292 ntohs(srx->transport.sin.sin_port));
293
294 /* search the peer list first */
295 rcu_read_lock();
296 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
297 if (peer && !rxrpc_get_peer_maybe(peer))
298 peer = NULL;
299 rcu_read_unlock();
300
301 if (!peer) {
302 /* The peer is not yet present in hash - create a candidate
303 * for a new record and then redo the search.
304 */
305 candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
306 if (!candidate) {
307 _leave(" = NULL [nomem]");
308 return NULL;
309 }
310
311 spin_lock_bh(&rxrpc_peer_hash_lock);
312
313 /* Need to check that we aren't racing with someone else */
314 peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
315 if (peer && !rxrpc_get_peer_maybe(peer))
316 peer = NULL;
317 if (!peer)
318 hash_add_rcu(rxrpc_peer_hash,
319 &candidate->hash_link, hash_key);
320
321 spin_unlock_bh(&rxrpc_peer_hash_lock);
322
323 if (peer)
324 kfree(candidate);
325 else
326 peer = candidate;
327 }
328
329 _net("PEER %d {%d,%pI4+%hu}",
330 peer->debug_id,
331 peer->srx.transport_type,
332 &peer->srx.transport.sin.sin_addr,
333 ntohs(peer->srx.transport.sin.sin_port));
334
335 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
336 return peer;
337 }
338
339 /*
340 * Discard a ref on a remote peer record.
341 */
342 void __rxrpc_put_peer(struct rxrpc_peer *peer)
343 {
344 ASSERT(hlist_empty(&peer->error_targets));
345
346 spin_lock_bh(&rxrpc_peer_hash_lock);
347 hash_del_rcu(&peer->hash_link);
348 spin_unlock_bh(&rxrpc_peer_hash_lock);
349
350 kfree_rcu(peer, rcu);
351 }
352
353 /**
354 * rxrpc_kernel_get_peer - Get the peer address of a call
355 * @sock: The socket on which the call is in progress.
356 * @call: The call to query
357 * @_srx: Where to place the result
358 *
359 * Get the address of the remote peer in a call.
360 */
361 void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
362 struct sockaddr_rxrpc *_srx)
363 {
364 *_srx = call->peer->srx;
365 }
366 EXPORT_SYMBOL(rxrpc_kernel_get_peer);
This page took 0.058876 seconds and 5 git commands to generate.