Merge remote-tracking branch 'vfio/next'
[deliverable/linux.git] / net / rxrpc / local_object.c
1 /* Local endpoint object management
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/udp.h>
19 #include <linux/ip.h>
20 #include <linux/hashtable.h>
21 #include <net/sock.h>
22 #include <net/af_rxrpc.h>
23 #include "ar-internal.h"
24
25 static void rxrpc_local_processor(struct work_struct *);
26 static void rxrpc_local_rcu(struct rcu_head *);
27
28 static DEFINE_MUTEX(rxrpc_local_mutex);
29 static LIST_HEAD(rxrpc_local_endpoints);
30
31 /*
32 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
33 * same or greater than.
34 *
35 * We explicitly don't compare the RxRPC service ID as we want to reject
36 * conflicting uses by differing services. Further, we don't want to share
37 * addresses with different options (IPv6), so we don't compare those bits
38 * either.
39 */
40 static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
41 const struct sockaddr_rxrpc *srx)
42 {
43 long diff;
44
45 diff = ((local->srx.transport_type - srx->transport_type) ?:
46 (local->srx.transport_len - srx->transport_len) ?:
47 (local->srx.transport.family - srx->transport.family));
48 if (diff != 0)
49 return diff;
50
51 switch (srx->transport.family) {
52 case AF_INET:
53 /* If the choice of UDP port is left up to the transport, then
54 * the endpoint record doesn't match.
55 */
56 return ((u16 __force)local->srx.transport.sin.sin_port -
57 (u16 __force)srx->transport.sin.sin_port) ?:
58 memcmp(&local->srx.transport.sin.sin_addr,
59 &srx->transport.sin.sin_addr,
60 sizeof(struct in_addr));
61 default:
62 BUG();
63 }
64 }
65
66 /*
67 * Allocate a new local endpoint.
68 */
69 static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
70 {
71 struct rxrpc_local *local;
72
73 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
74 if (local) {
75 atomic_set(&local->usage, 1);
76 INIT_LIST_HEAD(&local->link);
77 INIT_WORK(&local->processor, rxrpc_local_processor);
78 INIT_HLIST_HEAD(&local->services);
79 init_rwsem(&local->defrag_sem);
80 skb_queue_head_init(&local->reject_queue);
81 skb_queue_head_init(&local->event_queue);
82 local->client_conns = RB_ROOT;
83 spin_lock_init(&local->client_conns_lock);
84 spin_lock_init(&local->lock);
85 rwlock_init(&local->services_lock);
86 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
87 memcpy(&local->srx, srx, sizeof(*srx));
88 }
89
90 _leave(" = %p", local);
91 return local;
92 }
93
94 /*
95 * create the local socket
96 * - must be called with rxrpc_local_mutex locked
97 */
98 static int rxrpc_open_socket(struct rxrpc_local *local)
99 {
100 struct sock *sock;
101 int ret, opt;
102
103 _enter("%p{%d}", local, local->srx.transport_type);
104
105 /* create a socket to represent the local endpoint */
106 ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
107 IPPROTO_UDP, &local->socket);
108 if (ret < 0) {
109 _leave(" = %d [socket]", ret);
110 return ret;
111 }
112
113 /* if a local address was supplied then bind it */
114 if (local->srx.transport_len > sizeof(sa_family_t)) {
115 _debug("bind");
116 ret = kernel_bind(local->socket,
117 (struct sockaddr *)&local->srx.transport,
118 local->srx.transport_len);
119 if (ret < 0) {
120 _debug("bind failed %d", ret);
121 goto error;
122 }
123 }
124
125 /* we want to receive ICMP errors */
126 opt = 1;
127 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
128 (char *) &opt, sizeof(opt));
129 if (ret < 0) {
130 _debug("setsockopt failed");
131 goto error;
132 }
133
134 /* we want to set the don't fragment bit */
135 opt = IP_PMTUDISC_DO;
136 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
137 (char *) &opt, sizeof(opt));
138 if (ret < 0) {
139 _debug("setsockopt failed");
140 goto error;
141 }
142
143 /* set the socket up */
144 sock = local->socket->sk;
145 sock->sk_user_data = local;
146 sock->sk_data_ready = rxrpc_data_ready;
147 sock->sk_error_report = rxrpc_error_report;
148 _leave(" = 0");
149 return 0;
150
151 error:
152 kernel_sock_shutdown(local->socket, SHUT_RDWR);
153 local->socket->sk->sk_user_data = NULL;
154 sock_release(local->socket);
155 local->socket = NULL;
156
157 _leave(" = %d", ret);
158 return ret;
159 }
160
161 /*
162 * Look up or create a new local endpoint using the specified local address.
163 */
164 struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
165 {
166 struct rxrpc_local *local;
167 struct list_head *cursor;
168 const char *age;
169 long diff;
170 int ret;
171
172 if (srx->transport.family == AF_INET) {
173 _enter("{%d,%u,%pI4+%hu}",
174 srx->transport_type,
175 srx->transport.family,
176 &srx->transport.sin.sin_addr,
177 ntohs(srx->transport.sin.sin_port));
178 } else {
179 _enter("{%d,%u}",
180 srx->transport_type,
181 srx->transport.family);
182 return ERR_PTR(-EAFNOSUPPORT);
183 }
184
185 mutex_lock(&rxrpc_local_mutex);
186
187 for (cursor = rxrpc_local_endpoints.next;
188 cursor != &rxrpc_local_endpoints;
189 cursor = cursor->next) {
190 local = list_entry(cursor, struct rxrpc_local, link);
191
192 diff = rxrpc_local_cmp_key(local, srx);
193 if (diff < 0)
194 continue;
195 if (diff > 0)
196 break;
197
198 /* Services aren't allowed to share transport sockets, so
199 * reject that here. It is possible that the object is dying -
200 * but it may also still have the local transport address that
201 * we want bound.
202 */
203 if (srx->srx_service) {
204 local = NULL;
205 goto addr_in_use;
206 }
207
208 /* Found a match. We replace a dying object. Attempting to
209 * bind the transport socket may still fail if we're attempting
210 * to use a local address that the dying object is still using.
211 */
212 if (!rxrpc_get_local_maybe(local)) {
213 cursor = cursor->next;
214 list_del_init(&local->link);
215 break;
216 }
217
218 age = "old";
219 goto found;
220 }
221
222 local = rxrpc_alloc_local(srx);
223 if (!local)
224 goto nomem;
225
226 ret = rxrpc_open_socket(local);
227 if (ret < 0)
228 goto sock_error;
229
230 list_add_tail(&local->link, cursor);
231 age = "new";
232
233 found:
234 mutex_unlock(&rxrpc_local_mutex);
235
236 _net("LOCAL %s %d {%d,%u,%pI4+%hu}",
237 age,
238 local->debug_id,
239 local->srx.transport_type,
240 local->srx.transport.family,
241 &local->srx.transport.sin.sin_addr,
242 ntohs(local->srx.transport.sin.sin_port));
243
244 _leave(" = %p", local);
245 return local;
246
247 nomem:
248 ret = -ENOMEM;
249 sock_error:
250 mutex_unlock(&rxrpc_local_mutex);
251 kfree(local);
252 _leave(" = %d", ret);
253 return ERR_PTR(ret);
254
255 addr_in_use:
256 mutex_unlock(&rxrpc_local_mutex);
257 _leave(" = -EADDRINUSE");
258 return ERR_PTR(-EADDRINUSE);
259 }
260
261 /*
262 * A local endpoint reached its end of life.
263 */
264 void __rxrpc_put_local(struct rxrpc_local *local)
265 {
266 _enter("%d", local->debug_id);
267 rxrpc_queue_work(&local->processor);
268 }
269
270 /*
271 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
272 * of.
273 *
274 * Closing the socket cannot be done from bottom half context or RCU callback
275 * context because it might sleep.
276 */
277 static void rxrpc_local_destroyer(struct rxrpc_local *local)
278 {
279 struct socket *socket = local->socket;
280
281 _enter("%d", local->debug_id);
282
283 /* We can get a race between an incoming call packet queueing the
284 * processor again and the work processor starting the destruction
285 * process which will shut down the UDP socket.
286 */
287 if (local->dead) {
288 _leave(" [already dead]");
289 return;
290 }
291 local->dead = true;
292
293 mutex_lock(&rxrpc_local_mutex);
294 list_del_init(&local->link);
295 mutex_unlock(&rxrpc_local_mutex);
296
297 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
298 ASSERT(hlist_empty(&local->services));
299
300 if (socket) {
301 local->socket = NULL;
302 kernel_sock_shutdown(socket, SHUT_RDWR);
303 socket->sk->sk_user_data = NULL;
304 sock_release(socket);
305 }
306
307 /* At this point, there should be no more packets coming in to the
308 * local endpoint.
309 */
310 rxrpc_purge_queue(&local->reject_queue);
311 rxrpc_purge_queue(&local->event_queue);
312
313 _debug("rcu local %d", local->debug_id);
314 call_rcu(&local->rcu, rxrpc_local_rcu);
315 }
316
317 /*
318 * Process events on an endpoint
319 */
320 static void rxrpc_local_processor(struct work_struct *work)
321 {
322 struct rxrpc_local *local =
323 container_of(work, struct rxrpc_local, processor);
324 bool again;
325
326 _enter("%d", local->debug_id);
327
328 do {
329 again = false;
330 if (atomic_read(&local->usage) == 0)
331 return rxrpc_local_destroyer(local);
332
333 if (!skb_queue_empty(&local->reject_queue)) {
334 rxrpc_reject_packets(local);
335 again = true;
336 }
337
338 if (!skb_queue_empty(&local->event_queue)) {
339 rxrpc_process_local_events(local);
340 again = true;
341 }
342 } while (again);
343 }
344
345 /*
346 * Destroy a local endpoint after the RCU grace period expires.
347 */
348 static void rxrpc_local_rcu(struct rcu_head *rcu)
349 {
350 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
351
352 _enter("%d", local->debug_id);
353
354 ASSERT(!work_pending(&local->processor));
355
356 _net("DESTROY LOCAL %d", local->debug_id);
357 kfree(local);
358 _leave("");
359 }
360
361 /*
362 * Verify the local endpoint list is empty by this point.
363 */
364 void __exit rxrpc_destroy_all_locals(void)
365 {
366 struct rxrpc_local *local;
367
368 _enter("");
369
370 flush_workqueue(rxrpc_workqueue);
371
372 if (!list_empty(&rxrpc_local_endpoints)) {
373 mutex_lock(&rxrpc_local_mutex);
374 list_for_each_entry(local, &rxrpc_local_endpoints, link) {
375 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
376 local, atomic_read(&local->usage));
377 }
378 mutex_unlock(&rxrpc_local_mutex);
379 BUG();
380 }
381
382 rcu_barrier();
383 }
This page took 0.039792 seconds and 5 git commands to generate.