2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
37 #include <linux/list.h>
41 /* When transmitting messages in rds_send_xmit, we need to emerge from
42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
44 * Also, it seems fairer to not let one busy connection stall all the
47 * send_batch_count is the number of times we'll loop in send_xmit. Setting
48 * it to 0 will restore the old behavior (where we looped until we had
51 static int send_batch_count
= 64;
52 module_param(send_batch_count
, int, 0444);
53 MODULE_PARM_DESC(send_batch_count
, " batch factor when working the send queue");
56 * Reset the send state. Caller must hold c_send_lock when calling here.
58 void rds_send_reset(struct rds_connection
*conn
)
60 struct rds_message
*rm
, *tmp
;
63 spin_lock_irqsave(&conn
->c_send_lock
, flags
);
64 if (conn
->c_xmit_rm
) {
66 conn
->c_xmit_rm
= NULL
;
67 /* Tell the user the RDMA op is no longer mapped by the
68 * transport. This isn't entirely true (it's flushed out
69 * independently) but as the connection is down, there's
70 * no ongoing RDMA to/from that memory */
71 printk(KERN_CRIT
"send reset unmapping %p\n", rm
);
72 rds_message_unmapped(rm
);
73 spin_unlock_irqrestore(&conn
->c_send_lock
, flags
);
77 spin_unlock_irqrestore(&conn
->c_send_lock
, flags
);
81 conn
->c_xmit_hdr_off
= 0;
82 conn
->c_xmit_data_off
= 0;
83 conn
->c_xmit_atomic_sent
= 0;
84 conn
->c_xmit_rdma_sent
= 0;
85 conn
->c_xmit_data_sent
= 0;
87 conn
->c_map_queued
= 0;
89 conn
->c_unacked_packets
= rds_sysctl_max_unacked_packets
;
90 conn
->c_unacked_bytes
= rds_sysctl_max_unacked_bytes
;
92 /* Mark messages as retransmissions, and move them to the send q */
93 spin_lock_irqsave(&conn
->c_lock
, flags
);
94 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
95 set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
96 set_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
);
98 list_splice_init(&conn
->c_retrans
, &conn
->c_send_queue
);
99 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
103 * We're making the concious trade-off here to only send one message
104 * down the connection at a time.
106 * - tx queueing is a simple fifo list
107 * - reassembly is optional and easily done by transports per conn
108 * - no per flow rx lookup at all, straight to the socket
109 * - less per-frag memory and wire overhead
111 * - queued acks can be delayed behind large messages
113 * - small message latency is higher behind queued large messages
114 * - large message latency isn't starved by intervening small sends
116 int rds_send_xmit(struct rds_connection
*conn
)
118 struct rds_message
*rm
;
121 struct scatterlist
*sg
;
124 LIST_HEAD(to_be_dropped
);
127 if (!rds_conn_up(conn
))
131 * sendmsg calls here after having queued its message on the send
132 * queue. We only have one task feeding the connection at a time. If
133 * another thread is already feeding the queue then we back off. This
134 * avoids blocking the caller and trading per-connection data between
135 * caches per message.
137 if (!spin_trylock_irqsave(&conn
->c_send_lock
, flags
)) {
138 rds_stats_inc(s_send_lock_contention
);
142 atomic_inc(&conn
->c_senders
);
144 if (conn
->c_trans
->xmit_prepare
)
145 conn
->c_trans
->xmit_prepare(conn
);
147 gen
= atomic_inc_return(&conn
->c_send_generation
);
150 * spin trying to push headers and data down the connection until
151 * the connection doesn't make forward progress.
155 rm
= conn
->c_xmit_rm
;
158 * If between sending messages, we can send a pending congestion
161 if (!rm
&& test_and_clear_bit(0, &conn
->c_map_queued
)) {
162 rm
= rds_cong_update_alloc(conn
);
167 rm
->data
.op_active
= 1;
169 conn
->c_xmit_rm
= rm
;
173 * If not already working on one, grab the next message.
175 * c_xmit_rm holds a ref while we're sending this message down
176 * the connction. We can use this ref while holding the
177 * send_sem.. rds_send_reset() is serialized with it.
182 spin_lock(&conn
->c_lock
);
184 if (!list_empty(&conn
->c_send_queue
)) {
185 rm
= list_entry(conn
->c_send_queue
.next
,
188 rds_message_addref(rm
);
191 * Move the message from the send queue to the retransmit
194 list_move_tail(&rm
->m_conn_item
, &conn
->c_retrans
);
197 spin_unlock(&conn
->c_lock
);
202 /* Unfortunately, the way Infiniband deals with
203 * RDMA to a bad MR key is by moving the entire
204 * queue pair to error state. We cold possibly
205 * recover from that, but right now we drop the
207 * Therefore, we never retransmit messages with RDMA ops.
209 if (rm
->rdma
.op_active
&&
210 test_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
)) {
211 spin_lock(&conn
->c_lock
);
212 if (test_and_clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
))
213 list_move(&rm
->m_conn_item
, &to_be_dropped
);
214 spin_unlock(&conn
->c_lock
);
218 /* Require an ACK every once in a while */
219 len
= ntohl(rm
->m_inc
.i_hdr
.h_len
);
220 if (conn
->c_unacked_packets
== 0 ||
221 conn
->c_unacked_bytes
< len
) {
222 __set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
224 conn
->c_unacked_packets
= rds_sysctl_max_unacked_packets
;
225 conn
->c_unacked_bytes
= rds_sysctl_max_unacked_bytes
;
226 rds_stats_inc(s_send_ack_required
);
228 conn
->c_unacked_bytes
-= len
;
229 conn
->c_unacked_packets
--;
232 conn
->c_xmit_rm
= rm
;
235 /* The transport either sends the whole rdma or none of it */
236 if (rm
->rdma
.op_active
&& !conn
->c_xmit_rdma_sent
) {
237 rm
->m_final_op
= &rm
->rdma
;
238 ret
= conn
->c_trans
->xmit_rdma(conn
, &rm
->rdma
);
241 conn
->c_xmit_rdma_sent
= 1;
243 /* The transport owns the mapped memory for now.
244 * You can't unmap it while it's on the send queue */
245 set_bit(RDS_MSG_MAPPED
, &rm
->m_flags
);
248 if (rm
->atomic
.op_active
&& !conn
->c_xmit_atomic_sent
) {
249 rm
->m_final_op
= &rm
->atomic
;
250 ret
= conn
->c_trans
->xmit_atomic(conn
, &rm
->atomic
);
253 conn
->c_xmit_atomic_sent
= 1;
255 /* The transport owns the mapped memory for now.
256 * You can't unmap it while it's on the send queue */
257 set_bit(RDS_MSG_MAPPED
, &rm
->m_flags
);
261 * A number of cases require an RDS header to be sent
262 * even if there is no data.
263 * We permit 0-byte sends; rds-ping depends on this.
264 * However, if there are exclusively attached silent ops,
265 * we skip the hdr/data send, to enable silent operation.
267 if (rm
->data
.op_nents
== 0) {
269 int all_ops_are_silent
= 1;
271 ops_present
= (rm
->atomic
.op_active
|| rm
->rdma
.op_active
);
272 if (rm
->atomic
.op_active
&& !rm
->atomic
.op_silent
)
273 all_ops_are_silent
= 0;
274 if (rm
->rdma
.op_active
&& !rm
->rdma
.op_silent
)
275 all_ops_are_silent
= 0;
277 if (ops_present
&& all_ops_are_silent
278 && !rm
->m_rdma_cookie
)
279 rm
->data
.op_active
= 0;
282 if (rm
->data
.op_active
&& !conn
->c_xmit_data_sent
) {
283 rm
->m_final_op
= &rm
->data
;
284 ret
= conn
->c_trans
->xmit(conn
, rm
,
285 conn
->c_xmit_hdr_off
,
287 conn
->c_xmit_data_off
);
291 if (conn
->c_xmit_hdr_off
< sizeof(struct rds_header
)) {
292 tmp
= min_t(int, ret
,
293 sizeof(struct rds_header
) -
294 conn
->c_xmit_hdr_off
);
295 conn
->c_xmit_hdr_off
+= tmp
;
299 sg
= &rm
->data
.op_sg
[conn
->c_xmit_sg
];
301 tmp
= min_t(int, ret
, sg
->length
-
302 conn
->c_xmit_data_off
);
303 conn
->c_xmit_data_off
+= tmp
;
305 if (conn
->c_xmit_data_off
== sg
->length
) {
306 conn
->c_xmit_data_off
= 0;
310 conn
->c_xmit_sg
== rm
->data
.op_nents
);
314 if (conn
->c_xmit_hdr_off
== sizeof(struct rds_header
) &&
315 (conn
->c_xmit_sg
== rm
->data
.op_nents
))
316 conn
->c_xmit_data_sent
= 1;
320 * A rm will only take multiple times through this loop
321 * if there is a data op. Thus, if the data is sent (or there was
322 * none), then we're done with the rm.
324 if (!rm
->data
.op_active
|| conn
->c_xmit_data_sent
) {
325 conn
->c_xmit_rm
= NULL
;
327 conn
->c_xmit_hdr_off
= 0;
328 conn
->c_xmit_data_off
= 0;
329 conn
->c_xmit_rdma_sent
= 0;
330 conn
->c_xmit_atomic_sent
= 0;
331 conn
->c_xmit_data_sent
= 0;
337 if (conn
->c_trans
->xmit_complete
)
338 conn
->c_trans
->xmit_complete(conn
);
341 * We might be racing with another sender who queued a message but
342 * backed off on noticing that we held the c_send_lock. If we check
343 * for queued messages after dropping the sem then either we'll
344 * see the queued message or the queuer will get the sem. If we
345 * notice the queued message then we trigger an immediate retry.
347 * We need to be careful only to do this when we stopped processing
348 * the send queue because it was empty. It's the only way we
349 * stop processing the loop when the transport hasn't taken
350 * responsibility for forward progress.
352 spin_unlock_irqrestore(&conn
->c_send_lock
, flags
);
354 /* Nuke any messages we decided not to retransmit. */
355 if (!list_empty(&to_be_dropped
)) {
356 /* irqs on here, so we can put(), unlike above */
357 list_for_each_entry(rm
, &to_be_dropped
, m_conn_item
)
359 rds_send_remove_from_sock(&to_be_dropped
, RDS_RDMA_DROPPED
);
362 atomic_dec(&conn
->c_senders
);
365 * Other senders will see we have c_send_lock and exit. We
366 * need to recheck the send queue and race again for c_send_lock
367 * to make sure messages don't just sit on the send queue, if
368 * somebody hasn't already beat us into the loop.
370 * If the transport cannot continue (i.e ret != 0), then it must
371 * call us when more room is available, such as from the tx
372 * completion handler.
376 if (!list_empty(&conn
->c_send_queue
)) {
377 rds_stats_inc(s_send_lock_queue_raced
);
378 if (gen
== atomic_read(&conn
->c_send_generation
)) {
387 static void rds_send_sndbuf_remove(struct rds_sock
*rs
, struct rds_message
*rm
)
389 u32 len
= be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
);
391 assert_spin_locked(&rs
->rs_lock
);
393 BUG_ON(rs
->rs_snd_bytes
< len
);
394 rs
->rs_snd_bytes
-= len
;
396 if (rs
->rs_snd_bytes
== 0)
397 rds_stats_inc(s_send_queue_empty
);
400 static inline int rds_send_is_acked(struct rds_message
*rm
, u64 ack
,
401 is_acked_func is_acked
)
404 return is_acked(rm
, ack
);
405 return be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
) <= ack
;
409 * Returns true if there are no messages on the send and retransmit queues
410 * which have a sequence number greater than or equal to the given sequence
413 int rds_send_acked_before(struct rds_connection
*conn
, u64 seq
)
415 struct rds_message
*rm
, *tmp
;
418 spin_lock(&conn
->c_lock
);
420 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
421 if (be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
) < seq
)
426 list_for_each_entry_safe(rm
, tmp
, &conn
->c_send_queue
, m_conn_item
) {
427 if (be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
) < seq
)
432 spin_unlock(&conn
->c_lock
);
438 * This is pretty similar to what happens below in the ACK
439 * handling code - except that we call here as soon as we get
440 * the IB send completion on the RDMA op and the accompanying
443 void rds_rdma_send_complete(struct rds_message
*rm
, int status
)
445 struct rds_sock
*rs
= NULL
;
446 struct rm_rdma_op
*ro
;
447 struct rds_notifier
*notifier
;
450 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
453 if (test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
) &&
454 ro
->op_active
&& ro
->op_notify
&& ro
->op_notifier
) {
455 notifier
= ro
->op_notifier
;
457 sock_hold(rds_rs_to_sk(rs
));
459 notifier
->n_status
= status
;
460 spin_lock(&rs
->rs_lock
);
461 list_add_tail(¬ifier
->n_list
, &rs
->rs_notify_queue
);
462 spin_unlock(&rs
->rs_lock
);
464 ro
->op_notifier
= NULL
;
467 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
470 rds_wake_sk_sleep(rs
);
471 sock_put(rds_rs_to_sk(rs
));
474 EXPORT_SYMBOL_GPL(rds_rdma_send_complete
);
477 * Just like above, except looks at atomic op
479 void rds_atomic_send_complete(struct rds_message
*rm
, int status
)
481 struct rds_sock
*rs
= NULL
;
482 struct rm_atomic_op
*ao
;
483 struct rds_notifier
*notifier
;
486 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
489 if (test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
)
490 && ao
->op_active
&& ao
->op_notify
&& ao
->op_notifier
) {
491 notifier
= ao
->op_notifier
;
493 sock_hold(rds_rs_to_sk(rs
));
495 notifier
->n_status
= status
;
496 spin_lock(&rs
->rs_lock
);
497 list_add_tail(¬ifier
->n_list
, &rs
->rs_notify_queue
);
498 spin_unlock(&rs
->rs_lock
);
500 ao
->op_notifier
= NULL
;
503 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
506 rds_wake_sk_sleep(rs
);
507 sock_put(rds_rs_to_sk(rs
));
510 EXPORT_SYMBOL_GPL(rds_atomic_send_complete
);
513 * This is the same as rds_rdma_send_complete except we
514 * don't do any locking - we have all the ingredients (message,
515 * socket, socket lock) and can just move the notifier.
518 __rds_send_complete(struct rds_sock
*rs
, struct rds_message
*rm
, int status
)
520 struct rm_rdma_op
*ro
;
521 struct rm_atomic_op
*ao
;
524 if (ro
->op_active
&& ro
->op_notify
&& ro
->op_notifier
) {
525 ro
->op_notifier
->n_status
= status
;
526 list_add_tail(&ro
->op_notifier
->n_list
, &rs
->rs_notify_queue
);
527 ro
->op_notifier
= NULL
;
531 if (ao
->op_active
&& ao
->op_notify
&& ao
->op_notifier
) {
532 ao
->op_notifier
->n_status
= status
;
533 list_add_tail(&ao
->op_notifier
->n_list
, &rs
->rs_notify_queue
);
534 ao
->op_notifier
= NULL
;
537 /* No need to wake the app - caller does this */
541 * This is called from the IB send completion when we detect
542 * a RDMA operation that failed with remote access error.
543 * So speed is not an issue here.
545 struct rds_message
*rds_send_get_message(struct rds_connection
*conn
,
546 struct rm_rdma_op
*op
)
548 struct rds_message
*rm
, *tmp
, *found
= NULL
;
551 spin_lock_irqsave(&conn
->c_lock
, flags
);
553 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
554 if (&rm
->rdma
== op
) {
555 atomic_inc(&rm
->m_refcount
);
561 list_for_each_entry_safe(rm
, tmp
, &conn
->c_send_queue
, m_conn_item
) {
562 if (&rm
->rdma
== op
) {
563 atomic_inc(&rm
->m_refcount
);
570 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
574 EXPORT_SYMBOL_GPL(rds_send_get_message
);
577 * This removes messages from the socket's list if they're on it. The list
578 * argument must be private to the caller, we must be able to modify it
579 * without locks. The messages must have a reference held for their
580 * position on the list. This function will drop that reference after
581 * removing the messages from the 'messages' list regardless of if it found
582 * the messages on the socket list or not.
584 void rds_send_remove_from_sock(struct list_head
*messages
, int status
)
587 struct rds_sock
*rs
= NULL
;
588 struct rds_message
*rm
;
590 while (!list_empty(messages
)) {
593 rm
= list_entry(messages
->next
, struct rds_message
,
595 list_del_init(&rm
->m_conn_item
);
598 * If we see this flag cleared then we're *sure* that someone
599 * else beat us to removing it from the sock. If we race
600 * with their flag update we'll get the lock and then really
601 * see that the flag has been cleared.
603 * The message spinlock makes sure nobody clears rm->m_rs
604 * while we're messing with it. It does not prevent the
605 * message from being removed from the socket, though.
607 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
608 if (!test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
))
609 goto unlock_and_drop
;
611 if (rs
!= rm
->m_rs
) {
613 rds_wake_sk_sleep(rs
);
614 sock_put(rds_rs_to_sk(rs
));
617 sock_hold(rds_rs_to_sk(rs
));
619 spin_lock(&rs
->rs_lock
);
621 if (test_and_clear_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
)) {
622 struct rm_rdma_op
*ro
= &rm
->rdma
;
623 struct rds_notifier
*notifier
;
625 list_del_init(&rm
->m_sock_item
);
626 rds_send_sndbuf_remove(rs
, rm
);
628 if (ro
->op_active
&& ro
->op_notifier
&&
629 (ro
->op_notify
|| (ro
->op_recverr
&& status
))) {
630 notifier
= ro
->op_notifier
;
631 list_add_tail(¬ifier
->n_list
,
632 &rs
->rs_notify_queue
);
633 if (!notifier
->n_status
)
634 notifier
->n_status
= status
;
635 rm
->rdma
.op_notifier
= NULL
;
640 spin_unlock(&rs
->rs_lock
);
643 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
650 rds_wake_sk_sleep(rs
);
651 sock_put(rds_rs_to_sk(rs
));
656 * Transports call here when they've determined that the receiver queued
657 * messages up to, and including, the given sequence number. Messages are
658 * moved to the retrans queue when rds_send_xmit picks them off the send
659 * queue. This means that in the TCP case, the message may not have been
660 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
661 * checks the RDS_MSG_HAS_ACK_SEQ bit.
663 * XXX It's not clear to me how this is safely serialized with socket
664 * destruction. Maybe it should bail if it sees SOCK_DEAD.
666 void rds_send_drop_acked(struct rds_connection
*conn
, u64 ack
,
667 is_acked_func is_acked
)
669 struct rds_message
*rm
, *tmp
;
673 spin_lock_irqsave(&conn
->c_lock
, flags
);
675 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
676 if (!rds_send_is_acked(rm
, ack
, is_acked
))
679 list_move(&rm
->m_conn_item
, &list
);
680 clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
683 /* order flag updates with spin locks */
684 if (!list_empty(&list
))
685 smp_mb__after_clear_bit();
687 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
689 /* now remove the messages from the sock list as needed */
690 rds_send_remove_from_sock(&list
, RDS_RDMA_SUCCESS
);
692 EXPORT_SYMBOL_GPL(rds_send_drop_acked
);
694 void rds_send_drop_to(struct rds_sock
*rs
, struct sockaddr_in
*dest
)
696 struct rds_message
*rm
, *tmp
;
697 struct rds_connection
*conn
;
701 /* get all the messages we're dropping under the rs lock */
702 spin_lock_irqsave(&rs
->rs_lock
, flags
);
704 list_for_each_entry_safe(rm
, tmp
, &rs
->rs_send_queue
, m_sock_item
) {
705 if (dest
&& (dest
->sin_addr
.s_addr
!= rm
->m_daddr
||
706 dest
->sin_port
!= rm
->m_inc
.i_hdr
.h_dport
))
709 list_move(&rm
->m_sock_item
, &list
);
710 rds_send_sndbuf_remove(rs
, rm
);
711 clear_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
);
714 /* order flag updates with the rs lock */
715 smp_mb__after_clear_bit();
717 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
719 if (list_empty(&list
))
722 /* Remove the messages from the conn */
723 list_for_each_entry(rm
, &list
, m_sock_item
) {
725 conn
= rm
->m_inc
.i_conn
;
727 spin_lock_irqsave(&conn
->c_lock
, flags
);
729 * Maybe someone else beat us to removing rm from the conn.
730 * If we race with their flag update we'll get the lock and
731 * then really see that the flag has been cleared.
733 if (!test_and_clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
)) {
734 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
737 list_del_init(&rm
->m_conn_item
);
738 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
741 * Couldn't grab m_rs_lock in top loop (lock ordering),
744 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
746 spin_lock(&rs
->rs_lock
);
747 __rds_send_complete(rs
, rm
, RDS_RDMA_CANCELED
);
748 spin_unlock(&rs
->rs_lock
);
751 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
756 rds_wake_sk_sleep(rs
);
758 while (!list_empty(&list
)) {
759 rm
= list_entry(list
.next
, struct rds_message
, m_sock_item
);
760 list_del_init(&rm
->m_sock_item
);
762 rds_message_wait(rm
);
768 * we only want this to fire once so we use the callers 'queued'. It's
769 * possible that another thread can race with us and remove the
770 * message from the flow with RDS_CANCEL_SENT_TO.
772 static int rds_send_queue_rm(struct rds_sock
*rs
, struct rds_connection
*conn
,
773 struct rds_message
*rm
, __be16 sport
,
774 __be16 dport
, int *queued
)
782 len
= be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
);
784 /* this is the only place which holds both the socket's rs_lock
785 * and the connection's c_lock */
786 spin_lock_irqsave(&rs
->rs_lock
, flags
);
789 * If there is a little space in sndbuf, we don't queue anything,
790 * and userspace gets -EAGAIN. But poll() indicates there's send
791 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
792 * freed up by incoming acks. So we check the *old* value of
793 * rs_snd_bytes here to allow the last msg to exceed the buffer,
794 * and poll() now knows no more data can be sent.
796 if (rs
->rs_snd_bytes
< rds_sk_sndbuf(rs
)) {
797 rs
->rs_snd_bytes
+= len
;
799 /* let recv side know we are close to send space exhaustion.
800 * This is probably not the optimal way to do it, as this
801 * means we set the flag on *all* messages as soon as our
802 * throughput hits a certain threshold.
804 if (rs
->rs_snd_bytes
>= rds_sk_sndbuf(rs
) / 2)
805 __set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
807 list_add_tail(&rm
->m_sock_item
, &rs
->rs_send_queue
);
808 set_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
);
809 rds_message_addref(rm
);
812 /* The code ordering is a little weird, but we're
813 trying to minimize the time we hold c_lock */
814 rds_message_populate_header(&rm
->m_inc
.i_hdr
, sport
, dport
, 0);
815 rm
->m_inc
.i_conn
= conn
;
816 rds_message_addref(rm
);
818 spin_lock(&conn
->c_lock
);
819 rm
->m_inc
.i_hdr
.h_sequence
= cpu_to_be64(conn
->c_next_tx_seq
++);
820 list_add_tail(&rm
->m_conn_item
, &conn
->c_send_queue
);
821 set_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
822 spin_unlock(&conn
->c_lock
);
824 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
825 rm
, len
, rs
, rs
->rs_snd_bytes
,
826 (unsigned long long)be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
));
831 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
837 * rds_message is getting to be quite complicated, and we'd like to allocate
838 * it all in one go. This figures out how big it needs to be up front.
840 static int rds_rm_size(struct msghdr
*msg
, int data_len
)
842 struct cmsghdr
*cmsg
;
847 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
848 if (!CMSG_OK(msg
, cmsg
))
851 if (cmsg
->cmsg_level
!= SOL_RDS
)
854 switch (cmsg
->cmsg_type
) {
855 case RDS_CMSG_RDMA_ARGS
:
857 retval
= rds_rdma_extra_size(CMSG_DATA(cmsg
));
864 case RDS_CMSG_RDMA_DEST
:
865 case RDS_CMSG_RDMA_MAP
:
867 /* these are valid but do no add any size */
870 case RDS_CMSG_ATOMIC_CSWP
:
871 case RDS_CMSG_ATOMIC_FADD
:
873 size
+= sizeof(struct scatterlist
);
882 size
+= ceil(data_len
, PAGE_SIZE
) * sizeof(struct scatterlist
);
884 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
885 if (cmsg_groups
== 3)
891 static int rds_cmsg_send(struct rds_sock
*rs
, struct rds_message
*rm
,
892 struct msghdr
*msg
, int *allocated_mr
)
894 struct cmsghdr
*cmsg
;
897 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
898 if (!CMSG_OK(msg
, cmsg
))
901 if (cmsg
->cmsg_level
!= SOL_RDS
)
904 /* As a side effect, RDMA_DEST and RDMA_MAP will set
905 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
907 switch (cmsg
->cmsg_type
) {
908 case RDS_CMSG_RDMA_ARGS
:
909 ret
= rds_cmsg_rdma_args(rs
, rm
, cmsg
);
912 case RDS_CMSG_RDMA_DEST
:
913 ret
= rds_cmsg_rdma_dest(rs
, rm
, cmsg
);
916 case RDS_CMSG_RDMA_MAP
:
917 ret
= rds_cmsg_rdma_map(rs
, rm
, cmsg
);
921 case RDS_CMSG_ATOMIC_CSWP
:
922 case RDS_CMSG_ATOMIC_FADD
:
923 ret
= rds_cmsg_atomic(rs
, rm
, cmsg
);
937 int rds_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
,
940 struct sock
*sk
= sock
->sk
;
941 struct rds_sock
*rs
= rds_sk_to_rs(sk
);
942 struct sockaddr_in
*usin
= (struct sockaddr_in
*)msg
->msg_name
;
945 struct rds_message
*rm
= NULL
;
946 struct rds_connection
*conn
;
948 int queued
= 0, allocated_mr
= 0;
949 int nonblock
= msg
->msg_flags
& MSG_DONTWAIT
;
950 long timeo
= sock_sndtimeo(sk
, nonblock
);
952 /* Mirror Linux UDP mirror of BSD error message compatibility */
953 /* XXX: Perhaps MSG_MORE someday */
954 if (msg
->msg_flags
& ~(MSG_DONTWAIT
| MSG_CMSG_COMPAT
)) {
955 printk(KERN_INFO
"msg_flags 0x%08X\n", msg
->msg_flags
);
960 if (msg
->msg_namelen
) {
961 /* XXX fail non-unicast destination IPs? */
962 if (msg
->msg_namelen
< sizeof(*usin
) || usin
->sin_family
!= AF_INET
) {
966 daddr
= usin
->sin_addr
.s_addr
;
967 dport
= usin
->sin_port
;
969 /* We only care about consistency with ->connect() */
971 daddr
= rs
->rs_conn_addr
;
972 dport
= rs
->rs_conn_port
;
976 /* racing with another thread binding seems ok here */
977 if (daddr
== 0 || rs
->rs_bound_addr
== 0) {
978 ret
= -ENOTCONN
; /* XXX not a great errno */
982 /* size of rm including all sgs */
983 ret
= rds_rm_size(msg
, payload_len
);
987 rm
= rds_message_alloc(ret
, GFP_KERNEL
);
993 /* Attach data to the rm */
995 rm
->data
.op_sg
= rds_message_alloc_sgs(rm
, ceil(payload_len
, PAGE_SIZE
));
996 ret
= rds_message_copy_from_user(rm
, msg
->msg_iov
, payload_len
);
1000 rm
->data
.op_active
= 1;
1002 rm
->m_daddr
= daddr
;
1004 /* rds_conn_create has a spinlock that runs with IRQ off.
1005 * Caching the conn in the socket helps a lot. */
1006 if (rs
->rs_conn
&& rs
->rs_conn
->c_faddr
== daddr
)
1009 conn
= rds_conn_create_outgoing(rs
->rs_bound_addr
, daddr
,
1011 sock
->sk
->sk_allocation
);
1013 ret
= PTR_ERR(conn
);
1019 /* Parse any control messages the user may have included. */
1020 ret
= rds_cmsg_send(rs
, rm
, msg
, &allocated_mr
);
1024 if (rm
->rdma
.op_active
&& !conn
->c_trans
->xmit_rdma
) {
1025 if (printk_ratelimit())
1026 printk(KERN_NOTICE
"rdma_op %p conn xmit_rdma %p\n",
1027 &rm
->rdma
, conn
->c_trans
->xmit_rdma
);
1032 if (rm
->atomic
.op_active
&& !conn
->c_trans
->xmit_atomic
) {
1033 if (printk_ratelimit())
1034 printk(KERN_NOTICE
"atomic_op %p conn xmit_atomic %p\n",
1035 &rm
->atomic
, conn
->c_trans
->xmit_atomic
);
1040 /* If the connection is down, trigger a connect. We may
1041 * have scheduled a delayed reconnect however - in this case
1042 * we should not interfere.
1044 if (rds_conn_state(conn
) == RDS_CONN_DOWN
&&
1045 !test_and_set_bit(RDS_RECONNECT_PENDING
, &conn
->c_flags
))
1046 queue_delayed_work(rds_wq
, &conn
->c_conn_w
, 0);
1048 ret
= rds_cong_wait(conn
->c_fcong
, dport
, nonblock
, rs
);
1050 rs
->rs_seen_congestion
= 1;
1054 while (!rds_send_queue_rm(rs
, conn
, rm
, rs
->rs_bound_port
,
1056 rds_stats_inc(s_send_queue_full
);
1057 /* XXX make sure this is reasonable */
1058 if (payload_len
> rds_sk_sndbuf(rs
)) {
1067 timeo
= wait_event_interruptible_timeout(*sk_sleep(sk
),
1068 rds_send_queue_rm(rs
, conn
, rm
,
1073 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued
, timeo
);
1074 if (timeo
> 0 || timeo
== MAX_SCHEDULE_TIMEOUT
)
1084 * By now we've committed to the send. We reuse rds_send_worker()
1085 * to retry sends in the rds thread if the transport asks us to.
1087 rds_stats_inc(s_send_queued
);
1089 if (!test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
))
1090 rds_send_xmit(conn
);
1092 rds_message_put(rm
);
1096 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1097 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1098 * or in any other way, we need to destroy the MR again */
1100 rds_rdma_unuse(rs
, rds_rdma_cookie_key(rm
->m_rdma_cookie
), 1);
1103 rds_message_put(rm
);
1108 * Reply to a ping packet.
1111 rds_send_pong(struct rds_connection
*conn
, __be16 dport
)
1113 struct rds_message
*rm
;
1114 unsigned long flags
;
1117 rm
= rds_message_alloc(0, GFP_ATOMIC
);
1123 rm
->m_daddr
= conn
->c_faddr
;
1124 rm
->data
.op_active
= 1;
1126 /* If the connection is down, trigger a connect. We may
1127 * have scheduled a delayed reconnect however - in this case
1128 * we should not interfere.
1130 if (rds_conn_state(conn
) == RDS_CONN_DOWN
&&
1131 !test_and_set_bit(RDS_RECONNECT_PENDING
, &conn
->c_flags
))
1132 queue_delayed_work(rds_wq
, &conn
->c_conn_w
, 0);
1134 ret
= rds_cong_wait(conn
->c_fcong
, dport
, 1, NULL
);
1138 spin_lock_irqsave(&conn
->c_lock
, flags
);
1139 list_add_tail(&rm
->m_conn_item
, &conn
->c_send_queue
);
1140 set_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
1141 rds_message_addref(rm
);
1142 rm
->m_inc
.i_conn
= conn
;
1144 rds_message_populate_header(&rm
->m_inc
.i_hdr
, 0, dport
,
1145 conn
->c_next_tx_seq
);
1146 conn
->c_next_tx_seq
++;
1147 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
1149 rds_stats_inc(s_send_queued
);
1150 rds_stats_inc(s_send_pong
);
1152 if (!test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
))
1153 rds_send_xmit(conn
);
1155 rds_message_put(rm
);
1160 rds_message_put(rm
);