2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/moduleparam.h>
35 #include <linux/gfp.h>
38 #include <linux/list.h>
39 #include <linux/ratelimit.h>
40 #include <linux/export.h>
41 #include <linux/sizes.h>
45 /* When transmitting messages in rds_send_xmit, we need to emerge from
46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
48 * Also, it seems fairer to not let one busy connection stall all the
51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
52 * it to 0 will restore the old behavior (where we looped until we had
55 static int send_batch_count
= SZ_1K
;
56 module_param(send_batch_count
, int, 0444);
57 MODULE_PARM_DESC(send_batch_count
, " batch factor when working the send queue");
59 static void rds_send_remove_from_sock(struct list_head
*messages
, int status
);
62 * Reset the send state. Callers must ensure that this doesn't race with
65 void rds_send_reset(struct rds_connection
*conn
)
67 struct rds_message
*rm
, *tmp
;
70 if (conn
->c_xmit_rm
) {
72 conn
->c_xmit_rm
= NULL
;
73 /* Tell the user the RDMA op is no longer mapped by the
74 * transport. This isn't entirely true (it's flushed out
75 * independently) but as the connection is down, there's
76 * no ongoing RDMA to/from that memory */
77 rds_message_unmapped(rm
);
82 conn
->c_xmit_hdr_off
= 0;
83 conn
->c_xmit_data_off
= 0;
84 conn
->c_xmit_atomic_sent
= 0;
85 conn
->c_xmit_rdma_sent
= 0;
86 conn
->c_xmit_data_sent
= 0;
88 conn
->c_map_queued
= 0;
90 conn
->c_unacked_packets
= rds_sysctl_max_unacked_packets
;
91 conn
->c_unacked_bytes
= rds_sysctl_max_unacked_bytes
;
93 /* Mark messages as retransmissions, and move them to the send q */
94 spin_lock_irqsave(&conn
->c_lock
, flags
);
95 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
96 set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
97 set_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
);
99 list_splice_init(&conn
->c_retrans
, &conn
->c_send_queue
);
100 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
102 EXPORT_SYMBOL_GPL(rds_send_reset
);
104 static int acquire_in_xmit(struct rds_connection
*conn
)
106 return test_and_set_bit(RDS_IN_XMIT
, &conn
->c_flags
) == 0;
109 static void release_in_xmit(struct rds_connection
*conn
)
111 clear_bit(RDS_IN_XMIT
, &conn
->c_flags
);
112 smp_mb__after_atomic();
114 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
115 * hot path and finding waiters is very rare. We don't want to walk
116 * the system-wide hashed waitqueue buckets in the fast path only to
117 * almost never find waiters.
119 if (waitqueue_active(&conn
->c_waitq
))
120 wake_up_all(&conn
->c_waitq
);
124 * We're making the conscious trade-off here to only send one message
125 * down the connection at a time.
127 * - tx queueing is a simple fifo list
128 * - reassembly is optional and easily done by transports per conn
129 * - no per flow rx lookup at all, straight to the socket
130 * - less per-frag memory and wire overhead
132 * - queued acks can be delayed behind large messages
134 * - small message latency is higher behind queued large messages
135 * - large message latency isn't starved by intervening small sends
137 int rds_send_xmit(struct rds_connection
*conn
)
139 struct rds_message
*rm
;
142 struct scatterlist
*sg
;
144 LIST_HEAD(to_be_dropped
);
146 unsigned long send_gen
= 0;
152 * sendmsg calls here after having queued its message on the send
153 * queue. We only have one task feeding the connection at a time. If
154 * another thread is already feeding the queue then we back off. This
155 * avoids blocking the caller and trading per-connection data between
156 * caches per message.
158 if (!acquire_in_xmit(conn
)) {
159 rds_stats_inc(s_send_lock_contention
);
165 * we record the send generation after doing the xmit acquire.
166 * if someone else manages to jump in and do some work, we'll use
167 * this to avoid a goto restart farther down.
169 * The acquire_in_xmit() check above ensures that only one
170 * caller can increment c_send_gen at any time.
173 send_gen
= conn
->c_send_gen
;
176 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
177 * we do the opposite to avoid races.
179 if (!rds_conn_up(conn
)) {
180 release_in_xmit(conn
);
185 if (conn
->c_trans
->xmit_prepare
)
186 conn
->c_trans
->xmit_prepare(conn
);
189 * spin trying to push headers and data down the connection until
190 * the connection doesn't make forward progress.
194 rm
= conn
->c_xmit_rm
;
197 * If between sending messages, we can send a pending congestion
200 if (!rm
&& test_and_clear_bit(0, &conn
->c_map_queued
)) {
201 rm
= rds_cong_update_alloc(conn
);
206 rm
->data
.op_active
= 1;
208 conn
->c_xmit_rm
= rm
;
212 * If not already working on one, grab the next message.
214 * c_xmit_rm holds a ref while we're sending this message down
215 * the connction. We can use this ref while holding the
216 * send_sem.. rds_send_reset() is serialized with it.
223 /* we want to process as big a batch as we can, but
224 * we also want to avoid softlockups. If we've been
225 * through a lot of messages, lets back off and see
226 * if anyone else jumps in
228 if (batch_count
>= send_batch_count
)
231 spin_lock_irqsave(&conn
->c_lock
, flags
);
233 if (!list_empty(&conn
->c_send_queue
)) {
234 rm
= list_entry(conn
->c_send_queue
.next
,
237 rds_message_addref(rm
);
240 * Move the message from the send queue to the retransmit
243 list_move_tail(&rm
->m_conn_item
, &conn
->c_retrans
);
246 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
251 /* Unfortunately, the way Infiniband deals with
252 * RDMA to a bad MR key is by moving the entire
253 * queue pair to error state. We cold possibly
254 * recover from that, but right now we drop the
256 * Therefore, we never retransmit messages with RDMA ops.
258 if (rm
->rdma
.op_active
&&
259 test_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
)) {
260 spin_lock_irqsave(&conn
->c_lock
, flags
);
261 if (test_and_clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
))
262 list_move(&rm
->m_conn_item
, &to_be_dropped
);
263 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
267 /* Require an ACK every once in a while */
268 len
= ntohl(rm
->m_inc
.i_hdr
.h_len
);
269 if (conn
->c_unacked_packets
== 0 ||
270 conn
->c_unacked_bytes
< len
) {
271 __set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
273 conn
->c_unacked_packets
= rds_sysctl_max_unacked_packets
;
274 conn
->c_unacked_bytes
= rds_sysctl_max_unacked_bytes
;
275 rds_stats_inc(s_send_ack_required
);
277 conn
->c_unacked_bytes
-= len
;
278 conn
->c_unacked_packets
--;
281 conn
->c_xmit_rm
= rm
;
284 /* The transport either sends the whole rdma or none of it */
285 if (rm
->rdma
.op_active
&& !conn
->c_xmit_rdma_sent
) {
286 rm
->m_final_op
= &rm
->rdma
;
287 /* The transport owns the mapped memory for now.
288 * You can't unmap it while it's on the send queue
290 set_bit(RDS_MSG_MAPPED
, &rm
->m_flags
);
291 ret
= conn
->c_trans
->xmit_rdma(conn
, &rm
->rdma
);
293 clear_bit(RDS_MSG_MAPPED
, &rm
->m_flags
);
294 wake_up_interruptible(&rm
->m_flush_wait
);
297 conn
->c_xmit_rdma_sent
= 1;
301 if (rm
->atomic
.op_active
&& !conn
->c_xmit_atomic_sent
) {
302 rm
->m_final_op
= &rm
->atomic
;
303 /* The transport owns the mapped memory for now.
304 * You can't unmap it while it's on the send queue
306 set_bit(RDS_MSG_MAPPED
, &rm
->m_flags
);
307 ret
= conn
->c_trans
->xmit_atomic(conn
, &rm
->atomic
);
309 clear_bit(RDS_MSG_MAPPED
, &rm
->m_flags
);
310 wake_up_interruptible(&rm
->m_flush_wait
);
313 conn
->c_xmit_atomic_sent
= 1;
318 * A number of cases require an RDS header to be sent
319 * even if there is no data.
320 * We permit 0-byte sends; rds-ping depends on this.
321 * However, if there are exclusively attached silent ops,
322 * we skip the hdr/data send, to enable silent operation.
324 if (rm
->data
.op_nents
== 0) {
326 int all_ops_are_silent
= 1;
328 ops_present
= (rm
->atomic
.op_active
|| rm
->rdma
.op_active
);
329 if (rm
->atomic
.op_active
&& !rm
->atomic
.op_silent
)
330 all_ops_are_silent
= 0;
331 if (rm
->rdma
.op_active
&& !rm
->rdma
.op_silent
)
332 all_ops_are_silent
= 0;
334 if (ops_present
&& all_ops_are_silent
335 && !rm
->m_rdma_cookie
)
336 rm
->data
.op_active
= 0;
339 if (rm
->data
.op_active
&& !conn
->c_xmit_data_sent
) {
340 rm
->m_final_op
= &rm
->data
;
341 ret
= conn
->c_trans
->xmit(conn
, rm
,
342 conn
->c_xmit_hdr_off
,
344 conn
->c_xmit_data_off
);
348 if (conn
->c_xmit_hdr_off
< sizeof(struct rds_header
)) {
349 tmp
= min_t(int, ret
,
350 sizeof(struct rds_header
) -
351 conn
->c_xmit_hdr_off
);
352 conn
->c_xmit_hdr_off
+= tmp
;
356 sg
= &rm
->data
.op_sg
[conn
->c_xmit_sg
];
358 tmp
= min_t(int, ret
, sg
->length
-
359 conn
->c_xmit_data_off
);
360 conn
->c_xmit_data_off
+= tmp
;
362 if (conn
->c_xmit_data_off
== sg
->length
) {
363 conn
->c_xmit_data_off
= 0;
367 conn
->c_xmit_sg
== rm
->data
.op_nents
);
371 if (conn
->c_xmit_hdr_off
== sizeof(struct rds_header
) &&
372 (conn
->c_xmit_sg
== rm
->data
.op_nents
))
373 conn
->c_xmit_data_sent
= 1;
377 * A rm will only take multiple times through this loop
378 * if there is a data op. Thus, if the data is sent (or there was
379 * none), then we're done with the rm.
381 if (!rm
->data
.op_active
|| conn
->c_xmit_data_sent
) {
382 conn
->c_xmit_rm
= NULL
;
384 conn
->c_xmit_hdr_off
= 0;
385 conn
->c_xmit_data_off
= 0;
386 conn
->c_xmit_rdma_sent
= 0;
387 conn
->c_xmit_atomic_sent
= 0;
388 conn
->c_xmit_data_sent
= 0;
395 if (conn
->c_trans
->xmit_complete
)
396 conn
->c_trans
->xmit_complete(conn
);
397 release_in_xmit(conn
);
399 /* Nuke any messages we decided not to retransmit. */
400 if (!list_empty(&to_be_dropped
)) {
401 /* irqs on here, so we can put(), unlike above */
402 list_for_each_entry(rm
, &to_be_dropped
, m_conn_item
)
404 rds_send_remove_from_sock(&to_be_dropped
, RDS_RDMA_DROPPED
);
408 * Other senders can queue a message after we last test the send queue
409 * but before we clear RDS_IN_XMIT. In that case they'd back off and
410 * not try and send their newly queued message. We need to check the
411 * send queue after having cleared RDS_IN_XMIT so that their message
412 * doesn't get stuck on the send queue.
414 * If the transport cannot continue (i.e ret != 0), then it must
415 * call us when more room is available, such as from the tx
416 * completion handler.
418 * We have an extra generation check here so that if someone manages
419 * to jump in after our release_in_xmit, we'll see that they have done
420 * some work and we will skip our goto
424 if ((test_bit(0, &conn
->c_map_queued
) ||
425 !list_empty(&conn
->c_send_queue
)) &&
426 send_gen
== conn
->c_send_gen
) {
427 rds_stats_inc(s_send_lock_queue_raced
);
428 if (batch_count
< send_batch_count
)
430 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 1);
436 EXPORT_SYMBOL_GPL(rds_send_xmit
);
438 static void rds_send_sndbuf_remove(struct rds_sock
*rs
, struct rds_message
*rm
)
440 u32 len
= be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
);
442 assert_spin_locked(&rs
->rs_lock
);
444 BUG_ON(rs
->rs_snd_bytes
< len
);
445 rs
->rs_snd_bytes
-= len
;
447 if (rs
->rs_snd_bytes
== 0)
448 rds_stats_inc(s_send_queue_empty
);
451 static inline int rds_send_is_acked(struct rds_message
*rm
, u64 ack
,
452 is_acked_func is_acked
)
455 return is_acked(rm
, ack
);
456 return be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
) <= ack
;
460 * This is pretty similar to what happens below in the ACK
461 * handling code - except that we call here as soon as we get
462 * the IB send completion on the RDMA op and the accompanying
465 void rds_rdma_send_complete(struct rds_message
*rm
, int status
)
467 struct rds_sock
*rs
= NULL
;
468 struct rm_rdma_op
*ro
;
469 struct rds_notifier
*notifier
;
472 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
475 if (test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
) &&
476 ro
->op_active
&& ro
->op_notify
&& ro
->op_notifier
) {
477 notifier
= ro
->op_notifier
;
479 sock_hold(rds_rs_to_sk(rs
));
481 notifier
->n_status
= status
;
482 spin_lock(&rs
->rs_lock
);
483 list_add_tail(¬ifier
->n_list
, &rs
->rs_notify_queue
);
484 spin_unlock(&rs
->rs_lock
);
486 ro
->op_notifier
= NULL
;
489 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
492 rds_wake_sk_sleep(rs
);
493 sock_put(rds_rs_to_sk(rs
));
496 EXPORT_SYMBOL_GPL(rds_rdma_send_complete
);
499 * Just like above, except looks at atomic op
501 void rds_atomic_send_complete(struct rds_message
*rm
, int status
)
503 struct rds_sock
*rs
= NULL
;
504 struct rm_atomic_op
*ao
;
505 struct rds_notifier
*notifier
;
508 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
511 if (test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
)
512 && ao
->op_active
&& ao
->op_notify
&& ao
->op_notifier
) {
513 notifier
= ao
->op_notifier
;
515 sock_hold(rds_rs_to_sk(rs
));
517 notifier
->n_status
= status
;
518 spin_lock(&rs
->rs_lock
);
519 list_add_tail(¬ifier
->n_list
, &rs
->rs_notify_queue
);
520 spin_unlock(&rs
->rs_lock
);
522 ao
->op_notifier
= NULL
;
525 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
528 rds_wake_sk_sleep(rs
);
529 sock_put(rds_rs_to_sk(rs
));
532 EXPORT_SYMBOL_GPL(rds_atomic_send_complete
);
535 * This is the same as rds_rdma_send_complete except we
536 * don't do any locking - we have all the ingredients (message,
537 * socket, socket lock) and can just move the notifier.
540 __rds_send_complete(struct rds_sock
*rs
, struct rds_message
*rm
, int status
)
542 struct rm_rdma_op
*ro
;
543 struct rm_atomic_op
*ao
;
546 if (ro
->op_active
&& ro
->op_notify
&& ro
->op_notifier
) {
547 ro
->op_notifier
->n_status
= status
;
548 list_add_tail(&ro
->op_notifier
->n_list
, &rs
->rs_notify_queue
);
549 ro
->op_notifier
= NULL
;
553 if (ao
->op_active
&& ao
->op_notify
&& ao
->op_notifier
) {
554 ao
->op_notifier
->n_status
= status
;
555 list_add_tail(&ao
->op_notifier
->n_list
, &rs
->rs_notify_queue
);
556 ao
->op_notifier
= NULL
;
559 /* No need to wake the app - caller does this */
563 * This is called from the IB send completion when we detect
564 * a RDMA operation that failed with remote access error.
565 * So speed is not an issue here.
567 struct rds_message
*rds_send_get_message(struct rds_connection
*conn
,
568 struct rm_rdma_op
*op
)
570 struct rds_message
*rm
, *tmp
, *found
= NULL
;
573 spin_lock_irqsave(&conn
->c_lock
, flags
);
575 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
576 if (&rm
->rdma
== op
) {
577 atomic_inc(&rm
->m_refcount
);
583 list_for_each_entry_safe(rm
, tmp
, &conn
->c_send_queue
, m_conn_item
) {
584 if (&rm
->rdma
== op
) {
585 atomic_inc(&rm
->m_refcount
);
592 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
596 EXPORT_SYMBOL_GPL(rds_send_get_message
);
599 * This removes messages from the socket's list if they're on it. The list
600 * argument must be private to the caller, we must be able to modify it
601 * without locks. The messages must have a reference held for their
602 * position on the list. This function will drop that reference after
603 * removing the messages from the 'messages' list regardless of if it found
604 * the messages on the socket list or not.
606 static void rds_send_remove_from_sock(struct list_head
*messages
, int status
)
609 struct rds_sock
*rs
= NULL
;
610 struct rds_message
*rm
;
612 while (!list_empty(messages
)) {
615 rm
= list_entry(messages
->next
, struct rds_message
,
617 list_del_init(&rm
->m_conn_item
);
620 * If we see this flag cleared then we're *sure* that someone
621 * else beat us to removing it from the sock. If we race
622 * with their flag update we'll get the lock and then really
623 * see that the flag has been cleared.
625 * The message spinlock makes sure nobody clears rm->m_rs
626 * while we're messing with it. It does not prevent the
627 * message from being removed from the socket, though.
629 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
630 if (!test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
))
631 goto unlock_and_drop
;
633 if (rs
!= rm
->m_rs
) {
635 rds_wake_sk_sleep(rs
);
636 sock_put(rds_rs_to_sk(rs
));
640 sock_hold(rds_rs_to_sk(rs
));
643 goto unlock_and_drop
;
644 spin_lock(&rs
->rs_lock
);
646 if (test_and_clear_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
)) {
647 struct rm_rdma_op
*ro
= &rm
->rdma
;
648 struct rds_notifier
*notifier
;
650 list_del_init(&rm
->m_sock_item
);
651 rds_send_sndbuf_remove(rs
, rm
);
653 if (ro
->op_active
&& ro
->op_notifier
&&
654 (ro
->op_notify
|| (ro
->op_recverr
&& status
))) {
655 notifier
= ro
->op_notifier
;
656 list_add_tail(¬ifier
->n_list
,
657 &rs
->rs_notify_queue
);
658 if (!notifier
->n_status
)
659 notifier
->n_status
= status
;
660 rm
->rdma
.op_notifier
= NULL
;
665 spin_unlock(&rs
->rs_lock
);
668 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
675 rds_wake_sk_sleep(rs
);
676 sock_put(rds_rs_to_sk(rs
));
681 * Transports call here when they've determined that the receiver queued
682 * messages up to, and including, the given sequence number. Messages are
683 * moved to the retrans queue when rds_send_xmit picks them off the send
684 * queue. This means that in the TCP case, the message may not have been
685 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
686 * checks the RDS_MSG_HAS_ACK_SEQ bit.
688 void rds_send_drop_acked(struct rds_connection
*conn
, u64 ack
,
689 is_acked_func is_acked
)
691 struct rds_message
*rm
, *tmp
;
695 spin_lock_irqsave(&conn
->c_lock
, flags
);
697 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
698 if (!rds_send_is_acked(rm
, ack
, is_acked
))
701 list_move(&rm
->m_conn_item
, &list
);
702 clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
705 /* order flag updates with spin locks */
706 if (!list_empty(&list
))
707 smp_mb__after_atomic();
709 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
711 /* now remove the messages from the sock list as needed */
712 rds_send_remove_from_sock(&list
, RDS_RDMA_SUCCESS
);
714 EXPORT_SYMBOL_GPL(rds_send_drop_acked
);
716 void rds_send_drop_to(struct rds_sock
*rs
, struct sockaddr_in
*dest
)
718 struct rds_message
*rm
, *tmp
;
719 struct rds_connection
*conn
;
723 /* get all the messages we're dropping under the rs lock */
724 spin_lock_irqsave(&rs
->rs_lock
, flags
);
726 list_for_each_entry_safe(rm
, tmp
, &rs
->rs_send_queue
, m_sock_item
) {
727 if (dest
&& (dest
->sin_addr
.s_addr
!= rm
->m_daddr
||
728 dest
->sin_port
!= rm
->m_inc
.i_hdr
.h_dport
))
731 list_move(&rm
->m_sock_item
, &list
);
732 rds_send_sndbuf_remove(rs
, rm
);
733 clear_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
);
736 /* order flag updates with the rs lock */
737 smp_mb__after_atomic();
739 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
741 if (list_empty(&list
))
744 /* Remove the messages from the conn */
745 list_for_each_entry(rm
, &list
, m_sock_item
) {
747 conn
= rm
->m_inc
.i_conn
;
749 spin_lock_irqsave(&conn
->c_lock
, flags
);
751 * Maybe someone else beat us to removing rm from the conn.
752 * If we race with their flag update we'll get the lock and
753 * then really see that the flag has been cleared.
755 if (!test_and_clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
)) {
756 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
757 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
759 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
762 list_del_init(&rm
->m_conn_item
);
763 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
766 * Couldn't grab m_rs_lock in top loop (lock ordering),
769 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
771 spin_lock(&rs
->rs_lock
);
772 __rds_send_complete(rs
, rm
, RDS_RDMA_CANCELED
);
773 spin_unlock(&rs
->rs_lock
);
776 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
781 rds_wake_sk_sleep(rs
);
783 while (!list_empty(&list
)) {
784 rm
= list_entry(list
.next
, struct rds_message
, m_sock_item
);
785 list_del_init(&rm
->m_sock_item
);
786 rds_message_wait(rm
);
788 /* just in case the code above skipped this message
789 * because RDS_MSG_ON_CONN wasn't set, run it again here
790 * taking m_rs_lock is the only thing that keeps us
791 * from racing with ack processing.
793 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
795 spin_lock(&rs
->rs_lock
);
796 __rds_send_complete(rs
, rm
, RDS_RDMA_CANCELED
);
797 spin_unlock(&rs
->rs_lock
);
800 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
807 * we only want this to fire once so we use the callers 'queued'. It's
808 * possible that another thread can race with us and remove the
809 * message from the flow with RDS_CANCEL_SENT_TO.
811 static int rds_send_queue_rm(struct rds_sock
*rs
, struct rds_connection
*conn
,
812 struct rds_message
*rm
, __be16 sport
,
813 __be16 dport
, int *queued
)
821 len
= be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
);
823 /* this is the only place which holds both the socket's rs_lock
824 * and the connection's c_lock */
825 spin_lock_irqsave(&rs
->rs_lock
, flags
);
828 * If there is a little space in sndbuf, we don't queue anything,
829 * and userspace gets -EAGAIN. But poll() indicates there's send
830 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
831 * freed up by incoming acks. So we check the *old* value of
832 * rs_snd_bytes here to allow the last msg to exceed the buffer,
833 * and poll() now knows no more data can be sent.
835 if (rs
->rs_snd_bytes
< rds_sk_sndbuf(rs
)) {
836 rs
->rs_snd_bytes
+= len
;
838 /* let recv side know we are close to send space exhaustion.
839 * This is probably not the optimal way to do it, as this
840 * means we set the flag on *all* messages as soon as our
841 * throughput hits a certain threshold.
843 if (rs
->rs_snd_bytes
>= rds_sk_sndbuf(rs
) / 2)
844 __set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
846 list_add_tail(&rm
->m_sock_item
, &rs
->rs_send_queue
);
847 set_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
);
848 rds_message_addref(rm
);
851 /* The code ordering is a little weird, but we're
852 trying to minimize the time we hold c_lock */
853 rds_message_populate_header(&rm
->m_inc
.i_hdr
, sport
, dport
, 0);
854 rm
->m_inc
.i_conn
= conn
;
855 rds_message_addref(rm
);
857 spin_lock(&conn
->c_lock
);
858 rm
->m_inc
.i_hdr
.h_sequence
= cpu_to_be64(conn
->c_next_tx_seq
++);
859 list_add_tail(&rm
->m_conn_item
, &conn
->c_send_queue
);
860 set_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
861 spin_unlock(&conn
->c_lock
);
863 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
864 rm
, len
, rs
, rs
->rs_snd_bytes
,
865 (unsigned long long)be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
));
870 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
876 * rds_message is getting to be quite complicated, and we'd like to allocate
877 * it all in one go. This figures out how big it needs to be up front.
879 static int rds_rm_size(struct msghdr
*msg
, int data_len
)
881 struct cmsghdr
*cmsg
;
886 for_each_cmsghdr(cmsg
, msg
) {
887 if (!CMSG_OK(msg
, cmsg
))
890 if (cmsg
->cmsg_level
!= SOL_RDS
)
893 switch (cmsg
->cmsg_type
) {
894 case RDS_CMSG_RDMA_ARGS
:
896 retval
= rds_rdma_extra_size(CMSG_DATA(cmsg
));
903 case RDS_CMSG_RDMA_DEST
:
904 case RDS_CMSG_RDMA_MAP
:
906 /* these are valid but do no add any size */
909 case RDS_CMSG_ATOMIC_CSWP
:
910 case RDS_CMSG_ATOMIC_FADD
:
911 case RDS_CMSG_MASKED_ATOMIC_CSWP
:
912 case RDS_CMSG_MASKED_ATOMIC_FADD
:
914 size
+= sizeof(struct scatterlist
);
923 size
+= ceil(data_len
, PAGE_SIZE
) * sizeof(struct scatterlist
);
925 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
926 if (cmsg_groups
== 3)
932 static int rds_cmsg_send(struct rds_sock
*rs
, struct rds_message
*rm
,
933 struct msghdr
*msg
, int *allocated_mr
)
935 struct cmsghdr
*cmsg
;
938 for_each_cmsghdr(cmsg
, msg
) {
939 if (!CMSG_OK(msg
, cmsg
))
942 if (cmsg
->cmsg_level
!= SOL_RDS
)
945 /* As a side effect, RDMA_DEST and RDMA_MAP will set
946 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
948 switch (cmsg
->cmsg_type
) {
949 case RDS_CMSG_RDMA_ARGS
:
950 ret
= rds_cmsg_rdma_args(rs
, rm
, cmsg
);
953 case RDS_CMSG_RDMA_DEST
:
954 ret
= rds_cmsg_rdma_dest(rs
, rm
, cmsg
);
957 case RDS_CMSG_RDMA_MAP
:
958 ret
= rds_cmsg_rdma_map(rs
, rm
, cmsg
);
962 case RDS_CMSG_ATOMIC_CSWP
:
963 case RDS_CMSG_ATOMIC_FADD
:
964 case RDS_CMSG_MASKED_ATOMIC_CSWP
:
965 case RDS_CMSG_MASKED_ATOMIC_FADD
:
966 ret
= rds_cmsg_atomic(rs
, rm
, cmsg
);
980 int rds_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t payload_len
)
982 struct sock
*sk
= sock
->sk
;
983 struct rds_sock
*rs
= rds_sk_to_rs(sk
);
984 DECLARE_SOCKADDR(struct sockaddr_in
*, usin
, msg
->msg_name
);
987 struct rds_message
*rm
= NULL
;
988 struct rds_connection
*conn
;
990 int queued
= 0, allocated_mr
= 0;
991 int nonblock
= msg
->msg_flags
& MSG_DONTWAIT
;
992 long timeo
= sock_sndtimeo(sk
, nonblock
);
994 /* Mirror Linux UDP mirror of BSD error message compatibility */
995 /* XXX: Perhaps MSG_MORE someday */
996 if (msg
->msg_flags
& ~(MSG_DONTWAIT
| MSG_CMSG_COMPAT
)) {
1001 if (msg
->msg_namelen
) {
1002 /* XXX fail non-unicast destination IPs? */
1003 if (msg
->msg_namelen
< sizeof(*usin
) || usin
->sin_family
!= AF_INET
) {
1007 daddr
= usin
->sin_addr
.s_addr
;
1008 dport
= usin
->sin_port
;
1010 /* We only care about consistency with ->connect() */
1012 daddr
= rs
->rs_conn_addr
;
1013 dport
= rs
->rs_conn_port
;
1018 if (daddr
== 0 || rs
->rs_bound_addr
== 0) {
1020 ret
= -ENOTCONN
; /* XXX not a great errno */
1025 if (payload_len
> rds_sk_sndbuf(rs
)) {
1030 /* size of rm including all sgs */
1031 ret
= rds_rm_size(msg
, payload_len
);
1035 rm
= rds_message_alloc(ret
, GFP_KERNEL
);
1041 /* Attach data to the rm */
1043 rm
->data
.op_sg
= rds_message_alloc_sgs(rm
, ceil(payload_len
, PAGE_SIZE
));
1044 if (!rm
->data
.op_sg
) {
1048 ret
= rds_message_copy_from_user(rm
, &msg
->msg_iter
);
1052 rm
->data
.op_active
= 1;
1054 rm
->m_daddr
= daddr
;
1056 /* rds_conn_create has a spinlock that runs with IRQ off.
1057 * Caching the conn in the socket helps a lot. */
1058 if (rs
->rs_conn
&& rs
->rs_conn
->c_faddr
== daddr
)
1061 conn
= rds_conn_create_outgoing(sock_net(sock
->sk
),
1062 rs
->rs_bound_addr
, daddr
,
1064 sock
->sk
->sk_allocation
);
1066 ret
= PTR_ERR(conn
);
1072 /* Parse any control messages the user may have included. */
1073 ret
= rds_cmsg_send(rs
, rm
, msg
, &allocated_mr
);
1077 if (rm
->rdma
.op_active
&& !conn
->c_trans
->xmit_rdma
) {
1078 printk_ratelimited(KERN_NOTICE
"rdma_op %p conn xmit_rdma %p\n",
1079 &rm
->rdma
, conn
->c_trans
->xmit_rdma
);
1084 if (rm
->atomic
.op_active
&& !conn
->c_trans
->xmit_atomic
) {
1085 printk_ratelimited(KERN_NOTICE
"atomic_op %p conn xmit_atomic %p\n",
1086 &rm
->atomic
, conn
->c_trans
->xmit_atomic
);
1091 rds_conn_connect_if_down(conn
);
1093 ret
= rds_cong_wait(conn
->c_fcong
, dport
, nonblock
, rs
);
1095 rs
->rs_seen_congestion
= 1;
1099 while (!rds_send_queue_rm(rs
, conn
, rm
, rs
->rs_bound_port
,
1101 rds_stats_inc(s_send_queue_full
);
1108 timeo
= wait_event_interruptible_timeout(*sk_sleep(sk
),
1109 rds_send_queue_rm(rs
, conn
, rm
,
1114 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued
, timeo
);
1115 if (timeo
> 0 || timeo
== MAX_SCHEDULE_TIMEOUT
)
1125 * By now we've committed to the send. We reuse rds_send_worker()
1126 * to retry sends in the rds thread if the transport asks us to.
1128 rds_stats_inc(s_send_queued
);
1130 ret
= rds_send_xmit(conn
);
1131 if (ret
== -ENOMEM
|| ret
== -EAGAIN
)
1132 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 1);
1134 rds_message_put(rm
);
1138 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1139 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1140 * or in any other way, we need to destroy the MR again */
1142 rds_rdma_unuse(rs
, rds_rdma_cookie_key(rm
->m_rdma_cookie
), 1);
1145 rds_message_put(rm
);
1150 * Reply to a ping packet.
1153 rds_send_pong(struct rds_connection
*conn
, __be16 dport
)
1155 struct rds_message
*rm
;
1156 unsigned long flags
;
1159 rm
= rds_message_alloc(0, GFP_ATOMIC
);
1165 rm
->m_daddr
= conn
->c_faddr
;
1166 rm
->data
.op_active
= 1;
1168 rds_conn_connect_if_down(conn
);
1170 ret
= rds_cong_wait(conn
->c_fcong
, dport
, 1, NULL
);
1174 spin_lock_irqsave(&conn
->c_lock
, flags
);
1175 list_add_tail(&rm
->m_conn_item
, &conn
->c_send_queue
);
1176 set_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
1177 rds_message_addref(rm
);
1178 rm
->m_inc
.i_conn
= conn
;
1180 rds_message_populate_header(&rm
->m_inc
.i_hdr
, 0, dport
,
1181 conn
->c_next_tx_seq
);
1182 conn
->c_next_tx_seq
++;
1183 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
1185 rds_stats_inc(s_send_queued
);
1186 rds_stats_inc(s_send_pong
);
1188 /* schedule the send work on rds_wq */
1189 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 1);
1191 rds_message_put(rm
);
1196 rds_message_put(rm
);