Merge branches 'x86/amd', 'x86/vt-d', 'arm/exynos', 'arm/mediatek' and 'arm/renesas...
[deliverable/linux.git] / net / rxrpc / conn_client.c
1 /* Client connection-specific management code.
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/idr.h>
16 #include <linux/timer.h>
17 #include "ar-internal.h"
18
19 /*
20 * We use machine-unique IDs for our client connections.
21 */
22 DEFINE_IDR(rxrpc_client_conn_ids);
23 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
24
25 /*
26 * Get a connection ID and epoch for a client connection from the global pool.
27 * The connection struct pointer is then recorded in the idr radix tree. The
28 * epoch is changed if this wraps.
29 *
30 * TODO: The IDR tree gets very expensive on memory if the connection IDs are
31 * widely scattered throughout the number space, so we shall need to retire
32 * connections that have, say, an ID more than four times the maximum number of
33 * client conns away from the current allocation point to try and keep the IDs
34 * concentrated. We will also need to retire connections from an old epoch.
35 */
36 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
37 gfp_t gfp)
38 {
39 u32 epoch;
40 int id;
41
42 _enter("");
43
44 idr_preload(gfp);
45 spin_lock(&rxrpc_conn_id_lock);
46
47 epoch = rxrpc_epoch;
48
49 /* We could use idr_alloc_cyclic() here, but we really need to know
50 * when the thing wraps so that we can advance the epoch.
51 */
52 if (rxrpc_client_conn_ids.cur == 0)
53 rxrpc_client_conn_ids.cur = 1;
54 id = idr_alloc(&rxrpc_client_conn_ids, conn,
55 rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
56 if (id < 0) {
57 if (id != -ENOSPC)
58 goto error;
59 id = idr_alloc(&rxrpc_client_conn_ids, conn,
60 1, 0x40000000, GFP_NOWAIT);
61 if (id < 0)
62 goto error;
63 epoch++;
64 rxrpc_epoch = epoch;
65 }
66 rxrpc_client_conn_ids.cur = id + 1;
67
68 spin_unlock(&rxrpc_conn_id_lock);
69 idr_preload_end();
70
71 conn->proto.epoch = epoch;
72 conn->proto.cid = id << RXRPC_CIDSHIFT;
73 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
74 _leave(" [CID %x:%x]", epoch, conn->proto.cid);
75 return 0;
76
77 error:
78 spin_unlock(&rxrpc_conn_id_lock);
79 idr_preload_end();
80 _leave(" = %d", id);
81 return id;
82 }
83
84 /*
85 * Release a connection ID for a client connection from the global pool.
86 */
87 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
88 {
89 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
90 spin_lock(&rxrpc_conn_id_lock);
91 idr_remove(&rxrpc_client_conn_ids,
92 conn->proto.cid >> RXRPC_CIDSHIFT);
93 spin_unlock(&rxrpc_conn_id_lock);
94 }
95 }
96
97 /*
98 * Destroy the client connection ID tree.
99 */
100 void rxrpc_destroy_client_conn_ids(void)
101 {
102 struct rxrpc_connection *conn;
103 int id;
104
105 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
106 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
107 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
108 conn, atomic_read(&conn->usage));
109 }
110 BUG();
111 }
112
113 idr_destroy(&rxrpc_client_conn_ids);
114 }
115
116 /*
117 * Allocate a client connection. The caller must take care to clear any
118 * padding bytes in *cp.
119 */
120 static struct rxrpc_connection *
121 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
122 {
123 struct rxrpc_connection *conn;
124 int ret;
125
126 _enter("");
127
128 conn = rxrpc_alloc_connection(gfp);
129 if (!conn) {
130 _leave(" = -ENOMEM");
131 return ERR_PTR(-ENOMEM);
132 }
133
134 conn->params = *cp;
135 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
136 conn->state = RXRPC_CONN_CLIENT;
137
138 ret = rxrpc_get_client_connection_id(conn, gfp);
139 if (ret < 0)
140 goto error_0;
141
142 ret = rxrpc_init_client_conn_security(conn);
143 if (ret < 0)
144 goto error_1;
145
146 ret = conn->security->prime_packet_security(conn);
147 if (ret < 0)
148 goto error_2;
149
150 write_lock(&rxrpc_connection_lock);
151 list_add_tail(&conn->link, &rxrpc_connections);
152 write_unlock(&rxrpc_connection_lock);
153
154 /* We steal the caller's peer ref. */
155 cp->peer = NULL;
156 rxrpc_get_local(conn->params.local);
157 key_get(conn->params.key);
158
159 _leave(" = %p", conn);
160 return conn;
161
162 error_2:
163 conn->security->clear(conn);
164 error_1:
165 rxrpc_put_client_connection_id(conn);
166 error_0:
167 kfree(conn);
168 _leave(" = %d", ret);
169 return ERR_PTR(ret);
170 }
171
172 /*
173 * find a connection for a call
174 * - called in process context with IRQs enabled
175 */
176 int rxrpc_connect_call(struct rxrpc_call *call,
177 struct rxrpc_conn_parameters *cp,
178 struct sockaddr_rxrpc *srx,
179 gfp_t gfp)
180 {
181 struct rxrpc_connection *conn, *candidate = NULL;
182 struct rxrpc_local *local = cp->local;
183 struct rb_node *p, **pp, *parent;
184 long diff;
185 int chan;
186
187 DECLARE_WAITQUEUE(myself, current);
188
189 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
190
191 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
192 if (!cp->peer)
193 return -ENOMEM;
194
195 if (!cp->exclusive) {
196 /* Search for a existing client connection unless this is going
197 * to be a connection that's used exclusively for a single call.
198 */
199 _debug("search 1");
200 spin_lock(&local->client_conns_lock);
201 p = local->client_conns.rb_node;
202 while (p) {
203 conn = rb_entry(p, struct rxrpc_connection, client_node);
204
205 #define cmp(X) ((long)conn->params.X - (long)cp->X)
206 diff = (cmp(peer) ?:
207 cmp(key) ?:
208 cmp(security_level));
209 if (diff < 0)
210 p = p->rb_left;
211 else if (diff > 0)
212 p = p->rb_right;
213 else
214 goto found_extant_conn;
215 }
216 spin_unlock(&local->client_conns_lock);
217 }
218
219 /* We didn't find a connection or we want an exclusive one. */
220 _debug("get new conn");
221 candidate = rxrpc_alloc_client_connection(cp, gfp);
222 if (!candidate) {
223 _leave(" = -ENOMEM");
224 return -ENOMEM;
225 }
226
227 if (cp->exclusive) {
228 /* Assign the call on an exclusive connection to channel 0 and
229 * don't add the connection to the endpoint's shareable conn
230 * lookup tree.
231 */
232 _debug("exclusive chan 0");
233 conn = candidate;
234 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
235 spin_lock(&conn->channel_lock);
236 chan = 0;
237 goto found_channel;
238 }
239
240 /* We need to redo the search before attempting to add a new connection
241 * lest we race with someone else adding a conflicting instance.
242 */
243 _debug("search 2");
244 spin_lock(&local->client_conns_lock);
245
246 pp = &local->client_conns.rb_node;
247 parent = NULL;
248 while (*pp) {
249 parent = *pp;
250 conn = rb_entry(parent, struct rxrpc_connection, client_node);
251
252 diff = (cmp(peer) ?:
253 cmp(key) ?:
254 cmp(security_level));
255 if (diff < 0)
256 pp = &(*pp)->rb_left;
257 else if (diff > 0)
258 pp = &(*pp)->rb_right;
259 else
260 goto found_extant_conn;
261 }
262
263 /* The second search also failed; simply add the new connection with
264 * the new call in channel 0. Note that we need to take the channel
265 * lock before dropping the client conn lock.
266 */
267 _debug("new conn");
268 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
269 rb_link_node(&candidate->client_node, parent, pp);
270 rb_insert_color(&candidate->client_node, &local->client_conns);
271 attached:
272 conn = candidate;
273 candidate = NULL;
274
275 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
276 spin_lock(&conn->channel_lock);
277 spin_unlock(&local->client_conns_lock);
278 chan = 0;
279
280 found_channel:
281 _debug("found chan");
282 call->conn = conn;
283 call->channel = chan;
284 call->epoch = conn->proto.epoch;
285 call->cid = conn->proto.cid | chan;
286 call->call_id = ++conn->channels[chan].call_counter;
287 conn->channels[chan].call_id = call->call_id;
288 rcu_assign_pointer(conn->channels[chan].call, call);
289
290 _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
291
292 spin_unlock(&conn->channel_lock);
293 rxrpc_put_peer(cp->peer);
294 cp->peer = NULL;
295 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
296 return 0;
297
298 /* We found a potentially suitable connection already in existence. If
299 * we can reuse it (ie. its usage count hasn't been reduced to 0 by the
300 * reaper), discard any candidate we may have allocated, and try to get
301 * a channel on this one, otherwise we have to replace it.
302 */
303 found_extant_conn:
304 _debug("found conn");
305 if (!rxrpc_get_connection_maybe(conn)) {
306 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
307 rb_replace_node(&conn->client_node,
308 &candidate->client_node,
309 &local->client_conns);
310 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
311 goto attached;
312 }
313
314 spin_unlock(&local->client_conns_lock);
315
316 rxrpc_put_connection(candidate);
317
318 if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
319 if (!gfpflags_allow_blocking(gfp)) {
320 rxrpc_put_connection(conn);
321 _leave(" = -EAGAIN");
322 return -EAGAIN;
323 }
324
325 add_wait_queue(&conn->channel_wq, &myself);
326 for (;;) {
327 set_current_state(TASK_INTERRUPTIBLE);
328 if (atomic_add_unless(&conn->avail_chans, -1, 0))
329 break;
330 if (signal_pending(current))
331 goto interrupted;
332 schedule();
333 }
334 remove_wait_queue(&conn->channel_wq, &myself);
335 __set_current_state(TASK_RUNNING);
336 }
337
338 /* The connection allegedly now has a free channel and we can now
339 * attach the call to it.
340 */
341 spin_lock(&conn->channel_lock);
342
343 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
344 if (!conn->channels[chan].call)
345 goto found_channel;
346 BUG();
347
348 interrupted:
349 remove_wait_queue(&conn->channel_wq, &myself);
350 __set_current_state(TASK_RUNNING);
351 rxrpc_put_connection(conn);
352 rxrpc_put_peer(cp->peer);
353 cp->peer = NULL;
354 _leave(" = -ERESTARTSYS");
355 return -ERESTARTSYS;
356 }
357
358 /*
359 * Remove a client connection from the local endpoint's tree, thereby removing
360 * it as a target for reuse for new client calls.
361 */
362 void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn)
363 {
364 struct rxrpc_local *local = conn->params.local;
365
366 spin_lock(&local->client_conns_lock);
367 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags))
368 rb_erase(&conn->client_node, &local->client_conns);
369 spin_unlock(&local->client_conns_lock);
370
371 rxrpc_put_client_connection_id(conn);
372 }
This page took 0.037169 seconds and 5 git commands to generate.