Merge remote-tracking branch 'battery/for-next'
[deliverable/linux.git] / net / rxrpc / conn_object.c
1 /* RxRPC virtual connection handler, common bits.
2 *
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/net.h>
17 #include <linux/skbuff.h>
18 #include "ar-internal.h"
19
20 /*
21 * Time till a connection expires after last use (in seconds).
22 */
23 unsigned int rxrpc_connection_expiry = 10 * 60;
24
25 static void rxrpc_connection_reaper(struct work_struct *work);
26
27 LIST_HEAD(rxrpc_connections);
28 LIST_HEAD(rxrpc_connection_proc_list);
29 DEFINE_RWLOCK(rxrpc_connection_lock);
30 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
31
32 static void rxrpc_destroy_connection(struct rcu_head *);
33
34 /*
35 * allocate a new connection
36 */
37 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
38 {
39 struct rxrpc_connection *conn;
40
41 _enter("");
42
43 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
44 if (conn) {
45 INIT_LIST_HEAD(&conn->cache_link);
46 spin_lock_init(&conn->channel_lock);
47 INIT_LIST_HEAD(&conn->waiting_calls);
48 INIT_WORK(&conn->processor, &rxrpc_process_connection);
49 INIT_LIST_HEAD(&conn->proc_link);
50 INIT_LIST_HEAD(&conn->link);
51 skb_queue_head_init(&conn->rx_queue);
52 conn->security = &rxrpc_no_security;
53 spin_lock_init(&conn->state_lock);
54 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
55 conn->size_align = 4;
56 conn->header_size = sizeof(struct rxrpc_wire_header);
57 conn->idle_timestamp = jiffies;
58 }
59
60 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
61 return conn;
62 }
63
64 /*
65 * Look up a connection in the cache by protocol parameters.
66 *
67 * If successful, a pointer to the connection is returned, but no ref is taken.
68 * NULL is returned if there is no match.
69 *
70 * The caller must be holding the RCU read lock.
71 */
72 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
73 struct sk_buff *skb)
74 {
75 struct rxrpc_connection *conn;
76 struct rxrpc_conn_proto k;
77 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
78 struct sockaddr_rxrpc srx;
79 struct rxrpc_peer *peer;
80
81 _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
82
83 if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
84 goto not_found;
85
86 k.epoch = sp->hdr.epoch;
87 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
88
89 /* We may have to handle mixing IPv4 and IPv6 */
90 if (srx.transport.family != local->srx.transport.family) {
91 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
92 srx.transport.family,
93 local->srx.transport.family);
94 goto not_found;
95 }
96
97 k.epoch = sp->hdr.epoch;
98 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
99
100 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
101 /* We need to look up service connections by the full protocol
102 * parameter set. We look up the peer first as an intermediate
103 * step and then the connection from the peer's tree.
104 */
105 peer = rxrpc_lookup_peer_rcu(local, &srx);
106 if (!peer)
107 goto not_found;
108 conn = rxrpc_find_service_conn_rcu(peer, skb);
109 if (!conn || atomic_read(&conn->usage) == 0)
110 goto not_found;
111 _leave(" = %p", conn);
112 return conn;
113 } else {
114 /* Look up client connections by connection ID alone as their
115 * IDs are unique for this machine.
116 */
117 conn = idr_find(&rxrpc_client_conn_ids,
118 sp->hdr.cid >> RXRPC_CIDSHIFT);
119 if (!conn || atomic_read(&conn->usage) == 0) {
120 _debug("no conn");
121 goto not_found;
122 }
123
124 if (conn->proto.epoch != k.epoch ||
125 conn->params.local != local)
126 goto not_found;
127
128 peer = conn->params.peer;
129 switch (srx.transport.family) {
130 case AF_INET:
131 if (peer->srx.transport.sin.sin_port !=
132 srx.transport.sin.sin_port ||
133 peer->srx.transport.sin.sin_addr.s_addr !=
134 srx.transport.sin.sin_addr.s_addr)
135 goto not_found;
136 break;
137 default:
138 BUG();
139 }
140
141 _leave(" = %p", conn);
142 return conn;
143 }
144
145 not_found:
146 _leave(" = NULL");
147 return NULL;
148 }
149
150 /*
151 * Disconnect a call and clear any channel it occupies when that call
152 * terminates. The caller must hold the channel_lock and must release the
153 * call's ref on the connection.
154 */
155 void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
156 struct rxrpc_call *call)
157 {
158 struct rxrpc_channel *chan =
159 &conn->channels[call->cid & RXRPC_CHANNELMASK];
160
161 _enter("%d,%x", conn->debug_id, call->cid);
162
163 if (rcu_access_pointer(chan->call) == call) {
164 /* Save the result of the call so that we can repeat it if necessary
165 * through the channel, whilst disposing of the actual call record.
166 */
167 chan->last_service_id = call->service_id;
168 if (call->abort_code) {
169 chan->last_abort = call->abort_code;
170 chan->last_type = RXRPC_PACKET_TYPE_ABORT;
171 } else {
172 chan->last_seq = call->rx_hard_ack;
173 chan->last_type = RXRPC_PACKET_TYPE_ACK;
174 }
175 /* Sync with rxrpc_conn_retransmit(). */
176 smp_wmb();
177 chan->last_call = chan->call_id;
178 chan->call_id = chan->call_counter;
179
180 rcu_assign_pointer(chan->call, NULL);
181 }
182
183 _leave("");
184 }
185
186 /*
187 * Disconnect a call and clear any channel it occupies when that call
188 * terminates.
189 */
190 void rxrpc_disconnect_call(struct rxrpc_call *call)
191 {
192 struct rxrpc_connection *conn = call->conn;
193
194 spin_lock_bh(&conn->params.peer->lock);
195 hlist_del_init(&call->error_link);
196 spin_unlock_bh(&conn->params.peer->lock);
197
198 if (rxrpc_is_client_call(call))
199 return rxrpc_disconnect_client_call(call);
200
201 spin_lock(&conn->channel_lock);
202 __rxrpc_disconnect_call(conn, call);
203 spin_unlock(&conn->channel_lock);
204
205 call->conn = NULL;
206 conn->idle_timestamp = jiffies;
207 rxrpc_put_connection(conn);
208 }
209
210 /*
211 * Kill off a connection.
212 */
213 void rxrpc_kill_connection(struct rxrpc_connection *conn)
214 {
215 ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
216 !rcu_access_pointer(conn->channels[1].call) &&
217 !rcu_access_pointer(conn->channels[2].call) &&
218 !rcu_access_pointer(conn->channels[3].call));
219 ASSERT(list_empty(&conn->cache_link));
220
221 write_lock(&rxrpc_connection_lock);
222 list_del_init(&conn->proc_link);
223 write_unlock(&rxrpc_connection_lock);
224
225 /* Drain the Rx queue. Note that even though we've unpublished, an
226 * incoming packet could still be being added to our Rx queue, so we
227 * will need to drain it again in the RCU cleanup handler.
228 */
229 rxrpc_purge_queue(&conn->rx_queue);
230
231 /* Leave final destruction to RCU. The connection processor work item
232 * must carry a ref on the connection to prevent us getting here whilst
233 * it is queued or running.
234 */
235 call_rcu(&conn->rcu, rxrpc_destroy_connection);
236 }
237
238 /*
239 * release a virtual connection
240 */
241 void __rxrpc_put_connection(struct rxrpc_connection *conn)
242 {
243 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
244 }
245
246 /*
247 * destroy a virtual connection
248 */
249 static void rxrpc_destroy_connection(struct rcu_head *rcu)
250 {
251 struct rxrpc_connection *conn =
252 container_of(rcu, struct rxrpc_connection, rcu);
253
254 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
255
256 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
257
258 _net("DESTROY CONN %d", conn->debug_id);
259
260 rxrpc_purge_queue(&conn->rx_queue);
261
262 conn->security->clear(conn);
263 key_put(conn->params.key);
264 key_put(conn->server_key);
265 rxrpc_put_peer(conn->params.peer);
266 rxrpc_put_local(conn->params.local);
267
268 kfree(conn);
269 _leave("");
270 }
271
272 /*
273 * reap dead service connections
274 */
275 static void rxrpc_connection_reaper(struct work_struct *work)
276 {
277 struct rxrpc_connection *conn, *_p;
278 unsigned long reap_older_than, earliest, idle_timestamp, now;
279
280 LIST_HEAD(graveyard);
281
282 _enter("");
283
284 now = jiffies;
285 reap_older_than = now - rxrpc_connection_expiry * HZ;
286 earliest = ULONG_MAX;
287
288 write_lock(&rxrpc_connection_lock);
289 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
290 ASSERTCMP(atomic_read(&conn->usage), >, 0);
291 if (likely(atomic_read(&conn->usage) > 1))
292 continue;
293 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
294 continue;
295
296 idle_timestamp = READ_ONCE(conn->idle_timestamp);
297 _debug("reap CONN %d { u=%d,t=%ld }",
298 conn->debug_id, atomic_read(&conn->usage),
299 (long)reap_older_than - (long)idle_timestamp);
300
301 if (time_after(idle_timestamp, reap_older_than)) {
302 if (time_before(idle_timestamp, earliest))
303 earliest = idle_timestamp;
304 continue;
305 }
306
307 /* The usage count sits at 1 whilst the object is unused on the
308 * list; we reduce that to 0 to make the object unavailable.
309 */
310 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
311 continue;
312
313 if (rxrpc_conn_is_client(conn))
314 BUG();
315 else
316 rxrpc_unpublish_service_conn(conn);
317
318 list_move_tail(&conn->link, &graveyard);
319 }
320 write_unlock(&rxrpc_connection_lock);
321
322 if (earliest != ULONG_MAX) {
323 _debug("reschedule reaper %ld", (long) earliest - now);
324 ASSERT(time_after(earliest, now));
325 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
326 earliest - now);
327 }
328
329 while (!list_empty(&graveyard)) {
330 conn = list_entry(graveyard.next, struct rxrpc_connection,
331 link);
332 list_del_init(&conn->link);
333
334 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
335 rxrpc_kill_connection(conn);
336 }
337
338 _leave("");
339 }
340
341 /*
342 * preemptively destroy all the service connection records rather than
343 * waiting for them to time out
344 */
345 void __exit rxrpc_destroy_all_connections(void)
346 {
347 struct rxrpc_connection *conn, *_p;
348 bool leak = false;
349
350 _enter("");
351
352 rxrpc_destroy_all_client_connections();
353
354 rxrpc_connection_expiry = 0;
355 cancel_delayed_work(&rxrpc_connection_reap);
356 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
357 flush_workqueue(rxrpc_workqueue);
358
359 write_lock(&rxrpc_connection_lock);
360 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
361 pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
362 conn, atomic_read(&conn->usage));
363 leak = true;
364 }
365 write_unlock(&rxrpc_connection_lock);
366 BUG_ON(leak);
367
368 ASSERT(list_empty(&rxrpc_connection_proc_list));
369
370 /* Make sure the local and peer records pinned by any dying connections
371 * are released.
372 */
373 rcu_barrier();
374 rxrpc_destroy_client_conn_ids();
375
376 _leave("");
377 }
This page took 0.047661 seconds and 6 git commands to generate.