Commit | Line | Data |
---|---|---|
5c115590 AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/gfp.h> |
5c115590 AG |
35 | #include <net/sock.h> |
36 | #include <linux/in.h> | |
37 | #include <linux/list.h> | |
38 | ||
39 | #include "rds.h" | |
5c115590 AG |
40 | |
41 | /* When transmitting messages in rds_send_xmit, we need to emerge from | |
42 | * time to time and briefly release the CPU. Otherwise the softlock watchdog | |
43 | * will kick our shin. | |
44 | * Also, it seems fairer to not let one busy connection stall all the | |
45 | * others. | |
46 | * | |
47 | * send_batch_count is the number of times we'll loop in send_xmit. Setting | |
48 | * it to 0 will restore the old behavior (where we looped until we had | |
49 | * drained the queue). | |
50 | */ | |
51 | static int send_batch_count = 64; | |
52 | module_param(send_batch_count, int, 0444); | |
53 | MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); | |
54 | ||
55 | /* | |
56 | * Reset the send state. Caller must hold c_send_lock when calling here. | |
57 | */ | |
58 | void rds_send_reset(struct rds_connection *conn) | |
59 | { | |
60 | struct rds_message *rm, *tmp; | |
61 | unsigned long flags; | |
62 | ||
63 | if (conn->c_xmit_rm) { | |
64 | /* Tell the user the RDMA op is no longer mapped by the | |
65 | * transport. This isn't entirely true (it's flushed out | |
66 | * independently) but as the connection is down, there's | |
67 | * no ongoing RDMA to/from that memory */ | |
68 | rds_message_unmapped(conn->c_xmit_rm); | |
69 | rds_message_put(conn->c_xmit_rm); | |
70 | conn->c_xmit_rm = NULL; | |
71 | } | |
72 | conn->c_xmit_sg = 0; | |
73 | conn->c_xmit_hdr_off = 0; | |
74 | conn->c_xmit_data_off = 0; | |
15133f6e | 75 | conn->c_xmit_atomic_sent = 0; |
5b2366bd AG |
76 | conn->c_xmit_rdma_sent = 0; |
77 | conn->c_xmit_data_sent = 0; | |
5c115590 AG |
78 | |
79 | conn->c_map_queued = 0; | |
80 | ||
81 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | |
82 | conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; | |
83 | ||
84 | /* Mark messages as retransmissions, and move them to the send q */ | |
85 | spin_lock_irqsave(&conn->c_lock, flags); | |
86 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | |
87 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | |
88 | set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); | |
89 | } | |
90 | list_splice_init(&conn->c_retrans, &conn->c_send_queue); | |
91 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
92 | } | |
93 | ||
94 | /* | |
95 | * We're making the concious trade-off here to only send one message | |
96 | * down the connection at a time. | |
97 | * Pro: | |
98 | * - tx queueing is a simple fifo list | |
99 | * - reassembly is optional and easily done by transports per conn | |
100 | * - no per flow rx lookup at all, straight to the socket | |
101 | * - less per-frag memory and wire overhead | |
102 | * Con: | |
103 | * - queued acks can be delayed behind large messages | |
104 | * Depends: | |
105 | * - small message latency is higher behind queued large messages | |
106 | * - large message latency isn't starved by intervening small sends | |
107 | */ | |
108 | int rds_send_xmit(struct rds_connection *conn) | |
109 | { | |
110 | struct rds_message *rm; | |
111 | unsigned long flags; | |
112 | unsigned int tmp; | |
113 | unsigned int send_quota = send_batch_count; | |
114 | struct scatterlist *sg; | |
115 | int ret = 0; | |
116 | int was_empty = 0; | |
117 | LIST_HEAD(to_be_dropped); | |
118 | ||
119 | /* | |
120 | * sendmsg calls here after having queued its message on the send | |
121 | * queue. We only have one task feeding the connection at a time. If | |
122 | * another thread is already feeding the queue then we back off. This | |
123 | * avoids blocking the caller and trading per-connection data between | |
124 | * caches per message. | |
125 | * | |
126 | * The sem holder will issue a retry if they notice that someone queued | |
127 | * a message after they stopped walking the send queue but before they | |
128 | * dropped the sem. | |
129 | */ | |
130 | if (!mutex_trylock(&conn->c_send_lock)) { | |
131 | rds_stats_inc(s_send_sem_contention); | |
132 | ret = -ENOMEM; | |
133 | goto out; | |
134 | } | |
135 | ||
136 | if (conn->c_trans->xmit_prepare) | |
137 | conn->c_trans->xmit_prepare(conn); | |
138 | ||
139 | /* | |
140 | * spin trying to push headers and data down the connection until | |
5b2366bd | 141 | * the connection doesn't make forward progress. |
5c115590 AG |
142 | */ |
143 | while (--send_quota) { | |
5c115590 | 144 | |
5c115590 | 145 | rm = conn->c_xmit_rm; |
5c115590 | 146 | |
5b2366bd AG |
147 | /* |
148 | * If between sending messages, we can send a pending congestion | |
149 | * map update. | |
150 | * | |
151 | * Transports either define a special xmit_cong_map function, | |
152 | * or we allocate a cong_map message and treat it just like any | |
153 | * other send. | |
5c115590 | 154 | */ |
8690bfa1 AG |
155 | if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { |
156 | if (conn->c_trans->xmit_cong_map) { | |
5b2366bd AG |
157 | unsigned long map_offset = 0; |
158 | unsigned long map_bytes = sizeof(struct rds_header) + | |
5c115590 | 159 | RDS_CONG_MAP_BYTES; |
5c115590 | 160 | |
5b2366bd AG |
161 | while (map_bytes) { |
162 | ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong, | |
163 | map_offset); | |
164 | if (ret <= 0) { | |
165 | /* too far down the rabbithole! */ | |
166 | mutex_unlock(&conn->c_send_lock); | |
167 | rds_conn_error(conn, "Cong map xmit failed\n"); | |
168 | goto out; | |
169 | } | |
170 | ||
171 | map_offset += ret; | |
172 | map_bytes -= ret; | |
173 | } | |
174 | } else { | |
175 | /* send cong update like a normal rm */ | |
176 | rm = rds_cong_update_alloc(conn); | |
177 | if (IS_ERR(rm)) { | |
178 | ret = PTR_ERR(rm); | |
179 | break; | |
180 | } | |
181 | rm->data.op_active = 1; | |
5c115590 | 182 | |
5b2366bd AG |
183 | conn->c_xmit_rm = rm; |
184 | } | |
5c115590 AG |
185 | } |
186 | ||
187 | /* | |
5b2366bd | 188 | * If not already working on one, grab the next message. |
5c115590 AG |
189 | * |
190 | * c_xmit_rm holds a ref while we're sending this message down | |
191 | * the connction. We can use this ref while holding the | |
192 | * send_sem.. rds_send_reset() is serialized with it. | |
193 | */ | |
8690bfa1 | 194 | if (!rm) { |
5c115590 AG |
195 | unsigned int len; |
196 | ||
197 | spin_lock_irqsave(&conn->c_lock, flags); | |
198 | ||
199 | if (!list_empty(&conn->c_send_queue)) { | |
200 | rm = list_entry(conn->c_send_queue.next, | |
201 | struct rds_message, | |
202 | m_conn_item); | |
203 | rds_message_addref(rm); | |
204 | ||
205 | /* | |
206 | * Move the message from the send queue to the retransmit | |
207 | * list right away. | |
208 | */ | |
209 | list_move_tail(&rm->m_conn_item, &conn->c_retrans); | |
210 | } | |
211 | ||
212 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
213 | ||
8690bfa1 | 214 | if (!rm) { |
5c115590 AG |
215 | was_empty = 1; |
216 | break; | |
217 | } | |
218 | ||
219 | /* Unfortunately, the way Infiniband deals with | |
220 | * RDMA to a bad MR key is by moving the entire | |
221 | * queue pair to error state. We cold possibly | |
222 | * recover from that, but right now we drop the | |
223 | * connection. | |
224 | * Therefore, we never retransmit messages with RDMA ops. | |
225 | */ | |
f8b3aaf2 | 226 | if (rm->rdma.op_active && |
f64f9e71 | 227 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { |
5c115590 AG |
228 | spin_lock_irqsave(&conn->c_lock, flags); |
229 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) | |
230 | list_move(&rm->m_conn_item, &to_be_dropped); | |
231 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
232 | rds_message_put(rm); | |
233 | continue; | |
234 | } | |
235 | ||
236 | /* Require an ACK every once in a while */ | |
237 | len = ntohl(rm->m_inc.i_hdr.h_len); | |
f64f9e71 JP |
238 | if (conn->c_unacked_packets == 0 || |
239 | conn->c_unacked_bytes < len) { | |
5c115590 AG |
240 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
241 | ||
242 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | |
243 | conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; | |
244 | rds_stats_inc(s_send_ack_required); | |
245 | } else { | |
246 | conn->c_unacked_bytes -= len; | |
247 | conn->c_unacked_packets--; | |
248 | } | |
249 | ||
250 | conn->c_xmit_rm = rm; | |
251 | } | |
252 | ||
2c3a5f9a AG |
253 | /* The transport either sends the whole rdma or none of it */ |
254 | if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { | |
ff3d7d36 | 255 | rm->m_final_op = &rm->rdma; |
2c3a5f9a | 256 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); |
15133f6e AG |
257 | if (ret) |
258 | break; | |
2c3a5f9a AG |
259 | conn->c_xmit_rdma_sent = 1; |
260 | ||
15133f6e AG |
261 | /* The transport owns the mapped memory for now. |
262 | * You can't unmap it while it's on the send queue */ | |
263 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
264 | } | |
265 | ||
2c3a5f9a | 266 | if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { |
ff3d7d36 AG |
267 | rm->m_final_op = &rm->atomic; |
268 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); | |
5c115590 AG |
269 | if (ret) |
270 | break; | |
2c3a5f9a | 271 | conn->c_xmit_atomic_sent = 1; |
ff3d7d36 | 272 | |
5c115590 AG |
273 | /* The transport owns the mapped memory for now. |
274 | * You can't unmap it while it's on the send queue */ | |
275 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
276 | } | |
277 | ||
2c3a5f9a AG |
278 | /* |
279 | * A number of cases require an RDS header to be sent | |
280 | * even if there is no data. | |
281 | * We permit 0-byte sends; rds-ping depends on this. | |
282 | * However, if there are exclusively attached silent ops, | |
283 | * we skip the hdr/data send, to enable silent operation. | |
284 | */ | |
285 | if (rm->data.op_nents == 0) { | |
286 | int ops_present; | |
287 | int all_ops_are_silent = 1; | |
288 | ||
289 | ops_present = (rm->atomic.op_active || rm->rdma.op_active); | |
290 | if (rm->atomic.op_active && !rm->atomic.op_silent) | |
291 | all_ops_are_silent = 0; | |
292 | if (rm->rdma.op_active && !rm->rdma.op_silent) | |
293 | all_ops_are_silent = 0; | |
294 | ||
295 | if (ops_present && all_ops_are_silent | |
296 | && !rm->m_rdma_cookie) | |
297 | rm->data.op_active = 0; | |
298 | } | |
299 | ||
5b2366bd | 300 | if (rm->data.op_active && !conn->c_xmit_data_sent) { |
ff3d7d36 | 301 | rm->m_final_op = &rm->data; |
5c115590 AG |
302 | ret = conn->c_trans->xmit(conn, rm, |
303 | conn->c_xmit_hdr_off, | |
304 | conn->c_xmit_sg, | |
305 | conn->c_xmit_data_off); | |
306 | if (ret <= 0) | |
307 | break; | |
308 | ||
309 | if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { | |
310 | tmp = min_t(int, ret, | |
311 | sizeof(struct rds_header) - | |
312 | conn->c_xmit_hdr_off); | |
313 | conn->c_xmit_hdr_off += tmp; | |
314 | ret -= tmp; | |
315 | } | |
316 | ||
6c7cc6e4 | 317 | sg = &rm->data.op_sg[conn->c_xmit_sg]; |
5c115590 AG |
318 | while (ret) { |
319 | tmp = min_t(int, ret, sg->length - | |
320 | conn->c_xmit_data_off); | |
321 | conn->c_xmit_data_off += tmp; | |
322 | ret -= tmp; | |
323 | if (conn->c_xmit_data_off == sg->length) { | |
324 | conn->c_xmit_data_off = 0; | |
325 | sg++; | |
326 | conn->c_xmit_sg++; | |
327 | BUG_ON(ret != 0 && | |
6c7cc6e4 | 328 | conn->c_xmit_sg == rm->data.op_nents); |
5c115590 AG |
329 | } |
330 | } | |
5b2366bd AG |
331 | |
332 | if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && | |
333 | (conn->c_xmit_sg == rm->data.op_nents)) | |
334 | conn->c_xmit_data_sent = 1; | |
335 | } | |
336 | ||
337 | /* | |
338 | * A rm will only take multiple times through this loop | |
339 | * if there is a data op. Thus, if the data is sent (or there was | |
340 | * none), then we're done with the rm. | |
341 | */ | |
342 | if (!rm->data.op_active || conn->c_xmit_data_sent) { | |
343 | conn->c_xmit_rm = NULL; | |
344 | conn->c_xmit_sg = 0; | |
345 | conn->c_xmit_hdr_off = 0; | |
346 | conn->c_xmit_data_off = 0; | |
347 | conn->c_xmit_rdma_sent = 0; | |
348 | conn->c_xmit_atomic_sent = 0; | |
349 | conn->c_xmit_data_sent = 0; | |
350 | ||
351 | rds_message_put(rm); | |
5c115590 AG |
352 | } |
353 | } | |
354 | ||
355 | /* Nuke any messages we decided not to retransmit. */ | |
356 | if (!list_empty(&to_be_dropped)) | |
357 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); | |
358 | ||
359 | if (conn->c_trans->xmit_complete) | |
360 | conn->c_trans->xmit_complete(conn); | |
361 | ||
362 | /* | |
363 | * We might be racing with another sender who queued a message but | |
364 | * backed off on noticing that we held the c_send_lock. If we check | |
365 | * for queued messages after dropping the sem then either we'll | |
366 | * see the queued message or the queuer will get the sem. If we | |
367 | * notice the queued message then we trigger an immediate retry. | |
368 | * | |
369 | * We need to be careful only to do this when we stopped processing | |
370 | * the send queue because it was empty. It's the only way we | |
371 | * stop processing the loop when the transport hasn't taken | |
372 | * responsibility for forward progress. | |
373 | */ | |
374 | mutex_unlock(&conn->c_send_lock); | |
375 | ||
5b2366bd | 376 | if (send_quota == 0 && !was_empty) { |
5c115590 AG |
377 | /* We exhausted the send quota, but there's work left to |
378 | * do. Return and (re-)schedule the send worker. | |
379 | */ | |
380 | ret = -EAGAIN; | |
381 | } | |
382 | ||
383 | if (ret == 0 && was_empty) { | |
384 | /* A simple bit test would be way faster than taking the | |
385 | * spin lock */ | |
386 | spin_lock_irqsave(&conn->c_lock, flags); | |
387 | if (!list_empty(&conn->c_send_queue)) { | |
388 | rds_stats_inc(s_send_sem_queue_raced); | |
389 | ret = -EAGAIN; | |
390 | } | |
391 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
392 | } | |
393 | out: | |
394 | return ret; | |
395 | } | |
396 | ||
397 | static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) | |
398 | { | |
399 | u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | |
400 | ||
401 | assert_spin_locked(&rs->rs_lock); | |
402 | ||
403 | BUG_ON(rs->rs_snd_bytes < len); | |
404 | rs->rs_snd_bytes -= len; | |
405 | ||
406 | if (rs->rs_snd_bytes == 0) | |
407 | rds_stats_inc(s_send_queue_empty); | |
408 | } | |
409 | ||
410 | static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, | |
411 | is_acked_func is_acked) | |
412 | { | |
413 | if (is_acked) | |
414 | return is_acked(rm, ack); | |
415 | return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; | |
416 | } | |
417 | ||
418 | /* | |
419 | * Returns true if there are no messages on the send and retransmit queues | |
420 | * which have a sequence number greater than or equal to the given sequence | |
421 | * number. | |
422 | */ | |
423 | int rds_send_acked_before(struct rds_connection *conn, u64 seq) | |
424 | { | |
425 | struct rds_message *rm, *tmp; | |
426 | int ret = 1; | |
427 | ||
428 | spin_lock(&conn->c_lock); | |
429 | ||
430 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | |
431 | if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) | |
432 | ret = 0; | |
433 | break; | |
434 | } | |
435 | ||
436 | list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { | |
437 | if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) | |
438 | ret = 0; | |
439 | break; | |
440 | } | |
441 | ||
442 | spin_unlock(&conn->c_lock); | |
443 | ||
444 | return ret; | |
445 | } | |
446 | ||
447 | /* | |
448 | * This is pretty similar to what happens below in the ACK | |
449 | * handling code - except that we call here as soon as we get | |
450 | * the IB send completion on the RDMA op and the accompanying | |
451 | * message. | |
452 | */ | |
453 | void rds_rdma_send_complete(struct rds_message *rm, int status) | |
454 | { | |
455 | struct rds_sock *rs = NULL; | |
f8b3aaf2 | 456 | struct rm_rdma_op *ro; |
5c115590 | 457 | struct rds_notifier *notifier; |
9de0864c | 458 | unsigned long flags; |
5c115590 | 459 | |
9de0864c | 460 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 | 461 | |
f8b3aaf2 | 462 | ro = &rm->rdma; |
f64f9e71 | 463 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && |
f8b3aaf2 AG |
464 | ro->op_active && ro->op_notify && ro->op_notifier) { |
465 | notifier = ro->op_notifier; | |
5c115590 AG |
466 | rs = rm->m_rs; |
467 | sock_hold(rds_rs_to_sk(rs)); | |
468 | ||
469 | notifier->n_status = status; | |
470 | spin_lock(&rs->rs_lock); | |
471 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | |
472 | spin_unlock(&rs->rs_lock); | |
473 | ||
f8b3aaf2 | 474 | ro->op_notifier = NULL; |
5c115590 AG |
475 | } |
476 | ||
9de0864c | 477 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
5c115590 AG |
478 | |
479 | if (rs) { | |
480 | rds_wake_sk_sleep(rs); | |
481 | sock_put(rds_rs_to_sk(rs)); | |
482 | } | |
483 | } | |
616b757a | 484 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); |
5c115590 | 485 | |
15133f6e AG |
486 | /* |
487 | * Just like above, except looks at atomic op | |
488 | */ | |
489 | void rds_atomic_send_complete(struct rds_message *rm, int status) | |
490 | { | |
491 | struct rds_sock *rs = NULL; | |
492 | struct rm_atomic_op *ao; | |
493 | struct rds_notifier *notifier; | |
494 | ||
495 | spin_lock(&rm->m_rs_lock); | |
496 | ||
497 | ao = &rm->atomic; | |
498 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) | |
499 | && ao->op_active && ao->op_notify && ao->op_notifier) { | |
500 | notifier = ao->op_notifier; | |
501 | rs = rm->m_rs; | |
502 | sock_hold(rds_rs_to_sk(rs)); | |
503 | ||
504 | notifier->n_status = status; | |
505 | spin_lock(&rs->rs_lock); | |
506 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | |
507 | spin_unlock(&rs->rs_lock); | |
508 | ||
509 | ao->op_notifier = NULL; | |
510 | } | |
511 | ||
512 | spin_unlock(&rm->m_rs_lock); | |
513 | ||
514 | if (rs) { | |
515 | rds_wake_sk_sleep(rs); | |
516 | sock_put(rds_rs_to_sk(rs)); | |
517 | } | |
518 | } | |
519 | EXPORT_SYMBOL_GPL(rds_atomic_send_complete); | |
520 | ||
5c115590 AG |
521 | /* |
522 | * This is the same as rds_rdma_send_complete except we | |
523 | * don't do any locking - we have all the ingredients (message, | |
524 | * socket, socket lock) and can just move the notifier. | |
525 | */ | |
526 | static inline void | |
940786eb | 527 | __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) |
5c115590 | 528 | { |
f8b3aaf2 | 529 | struct rm_rdma_op *ro; |
940786eb | 530 | struct rm_atomic_op *ao; |
5c115590 | 531 | |
f8b3aaf2 AG |
532 | ro = &rm->rdma; |
533 | if (ro->op_active && ro->op_notify && ro->op_notifier) { | |
534 | ro->op_notifier->n_status = status; | |
535 | list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); | |
536 | ro->op_notifier = NULL; | |
5c115590 AG |
537 | } |
538 | ||
940786eb AG |
539 | ao = &rm->atomic; |
540 | if (ao->op_active && ao->op_notify && ao->op_notifier) { | |
541 | ao->op_notifier->n_status = status; | |
542 | list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); | |
543 | ao->op_notifier = NULL; | |
544 | } | |
545 | ||
5c115590 AG |
546 | /* No need to wake the app - caller does this */ |
547 | } | |
548 | ||
549 | /* | |
550 | * This is called from the IB send completion when we detect | |
551 | * a RDMA operation that failed with remote access error. | |
552 | * So speed is not an issue here. | |
553 | */ | |
554 | struct rds_message *rds_send_get_message(struct rds_connection *conn, | |
f8b3aaf2 | 555 | struct rm_rdma_op *op) |
5c115590 AG |
556 | { |
557 | struct rds_message *rm, *tmp, *found = NULL; | |
558 | unsigned long flags; | |
559 | ||
560 | spin_lock_irqsave(&conn->c_lock, flags); | |
561 | ||
562 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | |
f8b3aaf2 | 563 | if (&rm->rdma == op) { |
5c115590 AG |
564 | atomic_inc(&rm->m_refcount); |
565 | found = rm; | |
566 | goto out; | |
567 | } | |
568 | } | |
569 | ||
570 | list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { | |
f8b3aaf2 | 571 | if (&rm->rdma == op) { |
5c115590 AG |
572 | atomic_inc(&rm->m_refcount); |
573 | found = rm; | |
574 | break; | |
575 | } | |
576 | } | |
577 | ||
578 | out: | |
579 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
580 | ||
581 | return found; | |
582 | } | |
616b757a | 583 | EXPORT_SYMBOL_GPL(rds_send_get_message); |
5c115590 AG |
584 | |
585 | /* | |
586 | * This removes messages from the socket's list if they're on it. The list | |
587 | * argument must be private to the caller, we must be able to modify it | |
588 | * without locks. The messages must have a reference held for their | |
589 | * position on the list. This function will drop that reference after | |
590 | * removing the messages from the 'messages' list regardless of if it found | |
591 | * the messages on the socket list or not. | |
592 | */ | |
593 | void rds_send_remove_from_sock(struct list_head *messages, int status) | |
594 | { | |
561c7df6 | 595 | unsigned long flags; |
5c115590 AG |
596 | struct rds_sock *rs = NULL; |
597 | struct rds_message *rm; | |
598 | ||
5c115590 | 599 | while (!list_empty(messages)) { |
561c7df6 AG |
600 | int was_on_sock = 0; |
601 | ||
5c115590 AG |
602 | rm = list_entry(messages->next, struct rds_message, |
603 | m_conn_item); | |
604 | list_del_init(&rm->m_conn_item); | |
605 | ||
606 | /* | |
607 | * If we see this flag cleared then we're *sure* that someone | |
608 | * else beat us to removing it from the sock. If we race | |
609 | * with their flag update we'll get the lock and then really | |
610 | * see that the flag has been cleared. | |
611 | * | |
612 | * The message spinlock makes sure nobody clears rm->m_rs | |
613 | * while we're messing with it. It does not prevent the | |
614 | * message from being removed from the socket, though. | |
615 | */ | |
561c7df6 | 616 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 AG |
617 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
618 | goto unlock_and_drop; | |
619 | ||
620 | if (rs != rm->m_rs) { | |
621 | if (rs) { | |
5c115590 AG |
622 | rds_wake_sk_sleep(rs); |
623 | sock_put(rds_rs_to_sk(rs)); | |
624 | } | |
625 | rs = rm->m_rs; | |
5c115590 AG |
626 | sock_hold(rds_rs_to_sk(rs)); |
627 | } | |
048c15e6 | 628 | spin_lock(&rs->rs_lock); |
5c115590 AG |
629 | |
630 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | |
f8b3aaf2 | 631 | struct rm_rdma_op *ro = &rm->rdma; |
5c115590 AG |
632 | struct rds_notifier *notifier; |
633 | ||
634 | list_del_init(&rm->m_sock_item); | |
635 | rds_send_sndbuf_remove(rs, rm); | |
636 | ||
f8b3aaf2 AG |
637 | if (ro->op_active && ro->op_notifier && |
638 | (ro->op_notify || (ro->op_recverr && status))) { | |
639 | notifier = ro->op_notifier; | |
5c115590 AG |
640 | list_add_tail(¬ifier->n_list, |
641 | &rs->rs_notify_queue); | |
642 | if (!notifier->n_status) | |
643 | notifier->n_status = status; | |
f8b3aaf2 | 644 | rm->rdma.op_notifier = NULL; |
5c115590 | 645 | } |
561c7df6 | 646 | was_on_sock = 1; |
5c115590 AG |
647 | rm->m_rs = NULL; |
648 | } | |
048c15e6 | 649 | spin_unlock(&rs->rs_lock); |
5c115590 AG |
650 | |
651 | unlock_and_drop: | |
561c7df6 | 652 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
5c115590 | 653 | rds_message_put(rm); |
561c7df6 AG |
654 | if (was_on_sock) |
655 | rds_message_put(rm); | |
5c115590 AG |
656 | } |
657 | ||
658 | if (rs) { | |
5c115590 AG |
659 | rds_wake_sk_sleep(rs); |
660 | sock_put(rds_rs_to_sk(rs)); | |
661 | } | |
5c115590 AG |
662 | } |
663 | ||
664 | /* | |
665 | * Transports call here when they've determined that the receiver queued | |
666 | * messages up to, and including, the given sequence number. Messages are | |
667 | * moved to the retrans queue when rds_send_xmit picks them off the send | |
668 | * queue. This means that in the TCP case, the message may not have been | |
669 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked | |
670 | * checks the RDS_MSG_HAS_ACK_SEQ bit. | |
671 | * | |
672 | * XXX It's not clear to me how this is safely serialized with socket | |
673 | * destruction. Maybe it should bail if it sees SOCK_DEAD. | |
674 | */ | |
675 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, | |
676 | is_acked_func is_acked) | |
677 | { | |
678 | struct rds_message *rm, *tmp; | |
679 | unsigned long flags; | |
680 | LIST_HEAD(list); | |
681 | ||
682 | spin_lock_irqsave(&conn->c_lock, flags); | |
683 | ||
684 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | |
685 | if (!rds_send_is_acked(rm, ack, is_acked)) | |
686 | break; | |
687 | ||
688 | list_move(&rm->m_conn_item, &list); | |
689 | clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); | |
690 | } | |
691 | ||
692 | /* order flag updates with spin locks */ | |
693 | if (!list_empty(&list)) | |
694 | smp_mb__after_clear_bit(); | |
695 | ||
696 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
697 | ||
698 | /* now remove the messages from the sock list as needed */ | |
699 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); | |
700 | } | |
616b757a | 701 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); |
5c115590 AG |
702 | |
703 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |
704 | { | |
705 | struct rds_message *rm, *tmp; | |
706 | struct rds_connection *conn; | |
7c82eaf0 | 707 | unsigned long flags; |
5c115590 | 708 | LIST_HEAD(list); |
5c115590 AG |
709 | |
710 | /* get all the messages we're dropping under the rs lock */ | |
711 | spin_lock_irqsave(&rs->rs_lock, flags); | |
712 | ||
713 | list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { | |
714 | if (dest && (dest->sin_addr.s_addr != rm->m_daddr || | |
715 | dest->sin_port != rm->m_inc.i_hdr.h_dport)) | |
716 | continue; | |
717 | ||
5c115590 AG |
718 | list_move(&rm->m_sock_item, &list); |
719 | rds_send_sndbuf_remove(rs, rm); | |
720 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | |
5c115590 AG |
721 | } |
722 | ||
723 | /* order flag updates with the rs lock */ | |
7c82eaf0 | 724 | smp_mb__after_clear_bit(); |
5c115590 AG |
725 | |
726 | spin_unlock_irqrestore(&rs->rs_lock, flags); | |
727 | ||
7c82eaf0 AG |
728 | if (list_empty(&list)) |
729 | return; | |
5c115590 | 730 | |
7c82eaf0 | 731 | /* Remove the messages from the conn */ |
5c115590 | 732 | list_for_each_entry(rm, &list, m_sock_item) { |
7c82eaf0 AG |
733 | |
734 | conn = rm->m_inc.i_conn; | |
5c115590 | 735 | |
9de0864c | 736 | spin_lock_irqsave(&conn->c_lock, flags); |
5c115590 | 737 | /* |
7c82eaf0 AG |
738 | * Maybe someone else beat us to removing rm from the conn. |
739 | * If we race with their flag update we'll get the lock and | |
740 | * then really see that the flag has been cleared. | |
5c115590 | 741 | */ |
7c82eaf0 AG |
742 | if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { |
743 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
5c115590 | 744 | continue; |
5c115590 | 745 | } |
9de0864c AG |
746 | list_del_init(&rm->m_conn_item); |
747 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
5c115590 | 748 | |
7c82eaf0 AG |
749 | /* |
750 | * Couldn't grab m_rs_lock in top loop (lock ordering), | |
751 | * but we can now. | |
752 | */ | |
9de0864c | 753 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 | 754 | |
7c82eaf0 | 755 | spin_lock(&rs->rs_lock); |
940786eb | 756 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); |
7c82eaf0 AG |
757 | spin_unlock(&rs->rs_lock); |
758 | ||
759 | rm->m_rs = NULL; | |
9de0864c | 760 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
7c82eaf0 | 761 | |
7c82eaf0 | 762 | rds_message_put(rm); |
7c82eaf0 | 763 | } |
5c115590 | 764 | |
7c82eaf0 | 765 | rds_wake_sk_sleep(rs); |
550a8002 | 766 | |
5c115590 AG |
767 | while (!list_empty(&list)) { |
768 | rm = list_entry(list.next, struct rds_message, m_sock_item); | |
769 | list_del_init(&rm->m_sock_item); | |
770 | ||
771 | rds_message_wait(rm); | |
772 | rds_message_put(rm); | |
773 | } | |
774 | } | |
775 | ||
776 | /* | |
777 | * we only want this to fire once so we use the callers 'queued'. It's | |
778 | * possible that another thread can race with us and remove the | |
779 | * message from the flow with RDS_CANCEL_SENT_TO. | |
780 | */ | |
781 | static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, | |
782 | struct rds_message *rm, __be16 sport, | |
783 | __be16 dport, int *queued) | |
784 | { | |
785 | unsigned long flags; | |
786 | u32 len; | |
787 | ||
788 | if (*queued) | |
789 | goto out; | |
790 | ||
791 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | |
792 | ||
793 | /* this is the only place which holds both the socket's rs_lock | |
794 | * and the connection's c_lock */ | |
795 | spin_lock_irqsave(&rs->rs_lock, flags); | |
796 | ||
797 | /* | |
798 | * If there is a little space in sndbuf, we don't queue anything, | |
799 | * and userspace gets -EAGAIN. But poll() indicates there's send | |
800 | * room. This can lead to bad behavior (spinning) if snd_bytes isn't | |
801 | * freed up by incoming acks. So we check the *old* value of | |
802 | * rs_snd_bytes here to allow the last msg to exceed the buffer, | |
803 | * and poll() now knows no more data can be sent. | |
804 | */ | |
805 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { | |
806 | rs->rs_snd_bytes += len; | |
807 | ||
808 | /* let recv side know we are close to send space exhaustion. | |
809 | * This is probably not the optimal way to do it, as this | |
810 | * means we set the flag on *all* messages as soon as our | |
811 | * throughput hits a certain threshold. | |
812 | */ | |
813 | if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) | |
814 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | |
815 | ||
816 | list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); | |
817 | set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | |
818 | rds_message_addref(rm); | |
819 | rm->m_rs = rs; | |
820 | ||
821 | /* The code ordering is a little weird, but we're | |
822 | trying to minimize the time we hold c_lock */ | |
823 | rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); | |
824 | rm->m_inc.i_conn = conn; | |
825 | rds_message_addref(rm); | |
826 | ||
827 | spin_lock(&conn->c_lock); | |
828 | rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); | |
829 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); | |
830 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); | |
831 | spin_unlock(&conn->c_lock); | |
832 | ||
833 | rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", | |
834 | rm, len, rs, rs->rs_snd_bytes, | |
835 | (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); | |
836 | ||
837 | *queued = 1; | |
838 | } | |
839 | ||
840 | spin_unlock_irqrestore(&rs->rs_lock, flags); | |
841 | out: | |
842 | return *queued; | |
843 | } | |
844 | ||
fc445084 AG |
845 | /* |
846 | * rds_message is getting to be quite complicated, and we'd like to allocate | |
847 | * it all in one go. This figures out how big it needs to be up front. | |
848 | */ | |
849 | static int rds_rm_size(struct msghdr *msg, int data_len) | |
850 | { | |
ff87e97a | 851 | struct cmsghdr *cmsg; |
fc445084 | 852 | int size = 0; |
aa0a4ef4 | 853 | int cmsg_groups = 0; |
ff87e97a AG |
854 | int retval; |
855 | ||
856 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | |
857 | if (!CMSG_OK(msg, cmsg)) | |
858 | return -EINVAL; | |
859 | ||
860 | if (cmsg->cmsg_level != SOL_RDS) | |
861 | continue; | |
862 | ||
863 | switch (cmsg->cmsg_type) { | |
864 | case RDS_CMSG_RDMA_ARGS: | |
aa0a4ef4 | 865 | cmsg_groups |= 1; |
ff87e97a AG |
866 | retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); |
867 | if (retval < 0) | |
868 | return retval; | |
869 | size += retval; | |
aa0a4ef4 | 870 | |
ff87e97a AG |
871 | break; |
872 | ||
873 | case RDS_CMSG_RDMA_DEST: | |
874 | case RDS_CMSG_RDMA_MAP: | |
aa0a4ef4 | 875 | cmsg_groups |= 2; |
ff87e97a AG |
876 | /* these are valid but do no add any size */ |
877 | break; | |
878 | ||
15133f6e AG |
879 | case RDS_CMSG_ATOMIC_CSWP: |
880 | case RDS_CMSG_ATOMIC_FADD: | |
aa0a4ef4 | 881 | cmsg_groups |= 1; |
15133f6e AG |
882 | size += sizeof(struct scatterlist); |
883 | break; | |
884 | ||
ff87e97a AG |
885 | default: |
886 | return -EINVAL; | |
887 | } | |
888 | ||
889 | } | |
fc445084 | 890 | |
ff87e97a | 891 | size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); |
fc445084 | 892 | |
aa0a4ef4 AG |
893 | /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ |
894 | if (cmsg_groups == 3) | |
895 | return -EINVAL; | |
896 | ||
fc445084 AG |
897 | return size; |
898 | } | |
899 | ||
5c115590 AG |
900 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, |
901 | struct msghdr *msg, int *allocated_mr) | |
902 | { | |
903 | struct cmsghdr *cmsg; | |
904 | int ret = 0; | |
905 | ||
906 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | |
907 | if (!CMSG_OK(msg, cmsg)) | |
908 | return -EINVAL; | |
909 | ||
910 | if (cmsg->cmsg_level != SOL_RDS) | |
911 | continue; | |
912 | ||
913 | /* As a side effect, RDMA_DEST and RDMA_MAP will set | |
15133f6e | 914 | * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. |
5c115590 AG |
915 | */ |
916 | switch (cmsg->cmsg_type) { | |
917 | case RDS_CMSG_RDMA_ARGS: | |
918 | ret = rds_cmsg_rdma_args(rs, rm, cmsg); | |
919 | break; | |
920 | ||
921 | case RDS_CMSG_RDMA_DEST: | |
922 | ret = rds_cmsg_rdma_dest(rs, rm, cmsg); | |
923 | break; | |
924 | ||
925 | case RDS_CMSG_RDMA_MAP: | |
926 | ret = rds_cmsg_rdma_map(rs, rm, cmsg); | |
927 | if (!ret) | |
928 | *allocated_mr = 1; | |
929 | break; | |
15133f6e AG |
930 | case RDS_CMSG_ATOMIC_CSWP: |
931 | case RDS_CMSG_ATOMIC_FADD: | |
932 | ret = rds_cmsg_atomic(rs, rm, cmsg); | |
933 | break; | |
5c115590 AG |
934 | |
935 | default: | |
936 | return -EINVAL; | |
937 | } | |
938 | ||
939 | if (ret) | |
940 | break; | |
941 | } | |
942 | ||
943 | return ret; | |
944 | } | |
945 | ||
946 | int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |
947 | size_t payload_len) | |
948 | { | |
949 | struct sock *sk = sock->sk; | |
950 | struct rds_sock *rs = rds_sk_to_rs(sk); | |
951 | struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; | |
952 | __be32 daddr; | |
953 | __be16 dport; | |
954 | struct rds_message *rm = NULL; | |
955 | struct rds_connection *conn; | |
956 | int ret = 0; | |
957 | int queued = 0, allocated_mr = 0; | |
958 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | |
1123fd73 | 959 | long timeo = sock_sndtimeo(sk, nonblock); |
5c115590 AG |
960 | |
961 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | |
962 | /* XXX: Perhaps MSG_MORE someday */ | |
963 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { | |
964 | printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags); | |
965 | ret = -EOPNOTSUPP; | |
966 | goto out; | |
967 | } | |
968 | ||
969 | if (msg->msg_namelen) { | |
970 | /* XXX fail non-unicast destination IPs? */ | |
971 | if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { | |
972 | ret = -EINVAL; | |
973 | goto out; | |
974 | } | |
975 | daddr = usin->sin_addr.s_addr; | |
976 | dport = usin->sin_port; | |
977 | } else { | |
978 | /* We only care about consistency with ->connect() */ | |
979 | lock_sock(sk); | |
980 | daddr = rs->rs_conn_addr; | |
981 | dport = rs->rs_conn_port; | |
982 | release_sock(sk); | |
983 | } | |
984 | ||
985 | /* racing with another thread binding seems ok here */ | |
986 | if (daddr == 0 || rs->rs_bound_addr == 0) { | |
987 | ret = -ENOTCONN; /* XXX not a great errno */ | |
988 | goto out; | |
989 | } | |
990 | ||
fc445084 AG |
991 | /* size of rm including all sgs */ |
992 | ret = rds_rm_size(msg, payload_len); | |
993 | if (ret < 0) | |
994 | goto out; | |
995 | ||
996 | rm = rds_message_alloc(ret, GFP_KERNEL); | |
997 | if (!rm) { | |
998 | ret = -ENOMEM; | |
5c115590 AG |
999 | goto out; |
1000 | } | |
1001 | ||
372cd7de AG |
1002 | /* Attach data to the rm */ |
1003 | if (payload_len) { | |
1004 | rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); | |
1005 | ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); | |
1006 | if (ret) | |
1007 | goto out; | |
1008 | } | |
1009 | rm->data.op_active = 1; | |
fc445084 | 1010 | |
5c115590 AG |
1011 | rm->m_daddr = daddr; |
1012 | ||
5c115590 AG |
1013 | /* rds_conn_create has a spinlock that runs with IRQ off. |
1014 | * Caching the conn in the socket helps a lot. */ | |
1015 | if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) | |
1016 | conn = rs->rs_conn; | |
1017 | else { | |
1018 | conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, | |
1019 | rs->rs_transport, | |
1020 | sock->sk->sk_allocation); | |
1021 | if (IS_ERR(conn)) { | |
1022 | ret = PTR_ERR(conn); | |
1023 | goto out; | |
1024 | } | |
1025 | rs->rs_conn = conn; | |
1026 | } | |
1027 | ||
49f69691 AG |
1028 | /* Parse any control messages the user may have included. */ |
1029 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); | |
1030 | if (ret) | |
1031 | goto out; | |
1032 | ||
2c3a5f9a | 1033 | if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { |
5c115590 AG |
1034 | if (printk_ratelimit()) |
1035 | printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", | |
f8b3aaf2 | 1036 | &rm->rdma, conn->c_trans->xmit_rdma); |
15133f6e AG |
1037 | ret = -EOPNOTSUPP; |
1038 | goto out; | |
1039 | } | |
1040 | ||
1041 | if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { | |
1042 | if (printk_ratelimit()) | |
1043 | printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", | |
1044 | &rm->atomic, conn->c_trans->xmit_atomic); | |
5c115590 AG |
1045 | ret = -EOPNOTSUPP; |
1046 | goto out; | |
1047 | } | |
1048 | ||
1049 | /* If the connection is down, trigger a connect. We may | |
1050 | * have scheduled a delayed reconnect however - in this case | |
1051 | * we should not interfere. | |
1052 | */ | |
f64f9e71 JP |
1053 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
1054 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) | |
5c115590 AG |
1055 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
1056 | ||
1057 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | |
b98ba52f AG |
1058 | if (ret) { |
1059 | rs->rs_seen_congestion = 1; | |
5c115590 | 1060 | goto out; |
b98ba52f | 1061 | } |
5c115590 AG |
1062 | |
1063 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, | |
1064 | dport, &queued)) { | |
1065 | rds_stats_inc(s_send_queue_full); | |
1066 | /* XXX make sure this is reasonable */ | |
1067 | if (payload_len > rds_sk_sndbuf(rs)) { | |
1068 | ret = -EMSGSIZE; | |
1069 | goto out; | |
1070 | } | |
1071 | if (nonblock) { | |
1072 | ret = -EAGAIN; | |
1073 | goto out; | |
1074 | } | |
1075 | ||
aa395145 | 1076 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
5c115590 AG |
1077 | rds_send_queue_rm(rs, conn, rm, |
1078 | rs->rs_bound_port, | |
1079 | dport, | |
1080 | &queued), | |
1081 | timeo); | |
1082 | rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); | |
1083 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) | |
1084 | continue; | |
1085 | ||
1086 | ret = timeo; | |
1087 | if (ret == 0) | |
1088 | ret = -ETIMEDOUT; | |
1089 | goto out; | |
1090 | } | |
1091 | ||
1092 | /* | |
1093 | * By now we've committed to the send. We reuse rds_send_worker() | |
1094 | * to retry sends in the rds thread if the transport asks us to. | |
1095 | */ | |
1096 | rds_stats_inc(s_send_queued); | |
1097 | ||
1098 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | |
1099 | rds_send_worker(&conn->c_send_w.work); | |
1100 | ||
1101 | rds_message_put(rm); | |
1102 | return payload_len; | |
1103 | ||
1104 | out: | |
1105 | /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. | |
1106 | * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN | |
1107 | * or in any other way, we need to destroy the MR again */ | |
1108 | if (allocated_mr) | |
1109 | rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); | |
1110 | ||
1111 | if (rm) | |
1112 | rds_message_put(rm); | |
1113 | return ret; | |
1114 | } | |
1115 | ||
1116 | /* | |
1117 | * Reply to a ping packet. | |
1118 | */ | |
1119 | int | |
1120 | rds_send_pong(struct rds_connection *conn, __be16 dport) | |
1121 | { | |
1122 | struct rds_message *rm; | |
1123 | unsigned long flags; | |
1124 | int ret = 0; | |
1125 | ||
1126 | rm = rds_message_alloc(0, GFP_ATOMIC); | |
8690bfa1 | 1127 | if (!rm) { |
5c115590 AG |
1128 | ret = -ENOMEM; |
1129 | goto out; | |
1130 | } | |
1131 | ||
1132 | rm->m_daddr = conn->c_faddr; | |
1133 | ||
1134 | /* If the connection is down, trigger a connect. We may | |
1135 | * have scheduled a delayed reconnect however - in this case | |
1136 | * we should not interfere. | |
1137 | */ | |
f64f9e71 JP |
1138 | if (rds_conn_state(conn) == RDS_CONN_DOWN && |
1139 | !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) | |
5c115590 AG |
1140 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
1141 | ||
1142 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); | |
1143 | if (ret) | |
1144 | goto out; | |
1145 | ||
1146 | spin_lock_irqsave(&conn->c_lock, flags); | |
1147 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); | |
1148 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); | |
1149 | rds_message_addref(rm); | |
1150 | rm->m_inc.i_conn = conn; | |
1151 | ||
1152 | rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, | |
1153 | conn->c_next_tx_seq); | |
1154 | conn->c_next_tx_seq++; | |
1155 | spin_unlock_irqrestore(&conn->c_lock, flags); | |
1156 | ||
1157 | rds_stats_inc(s_send_queued); | |
1158 | rds_stats_inc(s_send_pong); | |
1159 | ||
1160 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | |
1161 | rds_message_put(rm); | |
1162 | return 0; | |
1163 | ||
1164 | out: | |
1165 | if (rm) | |
1166 | rds_message_put(rm); | |
1167 | return ret; | |
1168 | } |