RDS/IB: Remove ib_[header/data]_sge() functions
[deliverable/linux.git] / net / rds / ib_send.c
CommitLineData
6a0979df
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <linux/device.h>
36#include <linux/dmapool.h>
37
38#include "rds.h"
6a0979df
AG
39#include "ib.h"
40
9c030391
AG
41/*
42 * Convert IB-specific error message to RDS error message and call core
43 * completion handler.
44 */
45static void rds_ib_send_complete(struct rds_message *rm,
46 int wc_status,
47 void (*complete)(struct rds_message *rm, int status))
6a0979df
AG
48{
49 int notify_status;
50
51 switch (wc_status) {
52 case IB_WC_WR_FLUSH_ERR:
53 return;
54
55 case IB_WC_SUCCESS:
56 notify_status = RDS_RDMA_SUCCESS;
57 break;
58
59 case IB_WC_REM_ACCESS_ERR:
60 notify_status = RDS_RDMA_REMOTE_ERROR;
61 break;
62
63 default:
64 notify_status = RDS_RDMA_OTHER_ERROR;
65 break;
66 }
9c030391 67 complete(rm, notify_status);
6a0979df
AG
68}
69
70static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
71 struct rds_ib_send_work *send,
72 int wc_status)
73{
74 struct rds_message *rm = send->s_rm;
75
76 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
77
78 ib_dma_unmap_sg(ic->i_cm_id->device,
e779137a
AG
79 rm->data.m_sg, rm->data.m_nents,
80 DMA_TO_DEVICE);
6a0979df 81
ff87e97a 82 if (rm->rdma.m_rdma_op.r_active) {
15133f6e
AG
83 struct rds_rdma_op *op = &rm->rdma.m_rdma_op;
84
85 if (op->r_mapped) {
86 ib_dma_unmap_sg(ic->i_cm_id->device,
87 op->r_sg, op->r_nents,
88 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 op->r_mapped = 0;
90 }
6a0979df
AG
91
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
97 * packet delay.
98 * 2. Notify when the IB stack gives us the completion event for
99 * the RDMA operation.
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
106 *
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
110 * of synching.
111 */
9c030391 112 rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete);
6a0979df 113
ff87e97a
AG
114 if (rm->rdma.m_rdma_op.r_write)
115 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
6a0979df 116 else
ff87e97a 117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
6a0979df
AG
118 }
119
15133f6e
AG
120 if (rm->atomic.op_active) {
121 struct rm_atomic_op *op = &rm->atomic;
122
123 /* unmap atomic recvbuf */
124 if (op->op_mapped) {
125 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
126 DMA_FROM_DEVICE);
127 op->op_mapped = 0;
128 }
129
9c030391 130 rds_ib_send_complete(rm, wc_status, rds_atomic_send_complete);
15133f6e
AG
131
132 if (rm->atomic.op_type == RDS_ATOMIC_TYPE_CSWP)
133 rds_stats_inc(s_atomic_cswp);
134 else
135 rds_stats_inc(s_atomic_fadd);
136 }
137
6a0979df
AG
138 /* If anyone waited for this message to get flushed out, wake
139 * them up now */
140 rds_message_unmapped(rm);
141
142 rds_message_put(rm);
143 send->s_rm = NULL;
144}
145
146void rds_ib_send_init_ring(struct rds_ib_connection *ic)
147{
148 struct rds_ib_send_work *send;
149 u32 i;
150
151 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
152 struct ib_sge *sge;
153
154 send->s_rm = NULL;
155 send->s_op = NULL;
156
157 send->s_wr.wr_id = i;
158 send->s_wr.sg_list = send->s_sge;
6a0979df
AG
159 send->s_wr.ex.imm_data = 0;
160
919ced4c 161 sge = &send->s_sge[0];
6a0979df
AG
162 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
163 sge->length = sizeof(struct rds_header);
164 sge->lkey = ic->i_mr->lkey;
919ced4c
AG
165
166 send->s_sge[1].lkey = ic->i_mr->lkey;
6a0979df
AG
167 }
168}
169
170void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
171{
172 struct rds_ib_send_work *send;
173 u32 i;
174
175 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
15133f6e 176 if (!send->s_rm || send->s_wr.opcode == 0xdead)
6a0979df 177 continue;
15133f6e 178 rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
6a0979df
AG
179 }
180}
181
182/*
183 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
184 * operations performed in the send path. As the sender allocs and potentially
185 * unallocs the next free entry in the ring it doesn't alter which is
186 * the next to be freed, which is what this is concerned with.
187 */
188void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
189{
190 struct rds_connection *conn = context;
191 struct rds_ib_connection *ic = conn->c_transport_data;
192 struct ib_wc wc;
193 struct rds_ib_send_work *send;
194 u32 completed;
195 u32 oldest;
196 u32 i = 0;
197 int ret;
198
199 rdsdebug("cq %p conn %p\n", cq, conn);
200 rds_ib_stats_inc(s_ib_tx_cq_call);
201 ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
202 if (ret)
203 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
204
205 while (ib_poll_cq(cq, 1, &wc) > 0) {
206 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
207 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
208 be32_to_cpu(wc.ex.imm_data));
209 rds_ib_stats_inc(s_ib_tx_cq_event);
210
211 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
212 if (ic->i_ack_queued + HZ/2 < jiffies)
213 rds_ib_stats_inc(s_ib_tx_stalled);
214 rds_ib_ack_send_complete(ic);
215 continue;
216 }
217
218 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
219
220 completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
221
222 for (i = 0; i < completed; i++) {
223 send = &ic->i_sends[oldest];
224
225 /* In the error case, wc.opcode sometimes contains garbage */
226 switch (send->s_wr.opcode) {
227 case IB_WR_SEND:
228 if (send->s_rm)
229 rds_ib_send_unmap_rm(ic, send, wc.status);
230 break;
231 case IB_WR_RDMA_WRITE:
232 case IB_WR_RDMA_READ:
15133f6e
AG
233 case IB_WR_ATOMIC_FETCH_AND_ADD:
234 case IB_WR_ATOMIC_CMP_AND_SWP:
6a0979df
AG
235 /* Nothing to be done - the SG list will be unmapped
236 * when the SEND completes. */
237 break;
238 default:
239 if (printk_ratelimit())
240 printk(KERN_NOTICE
241 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
242 __func__, send->s_wr.opcode);
243 break;
244 }
245
246 send->s_wr.opcode = 0xdead;
247 send->s_wr.num_sge = 1;
248 if (send->s_queued + HZ/2 < jiffies)
249 rds_ib_stats_inc(s_ib_tx_stalled);
250
251 /* If a RDMA operation produced an error, signal this right
252 * away. If we don't, the subsequent SEND that goes with this
253 * RDMA will be canceled with ERR_WFLUSH, and the application
254 * never learn that the RDMA failed. */
255 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
256 struct rds_message *rm;
257
258 rm = rds_send_get_message(conn, send->s_op);
450d06c0 259 if (rm) {
15133f6e 260 rds_ib_send_unmap_rm(ic, send, wc.status);
9c030391 261 rds_ib_send_complete(rm, wc.status, rds_rdma_send_complete);
450d06c0
SP
262 rds_message_put(rm);
263 }
6a0979df
AG
264 }
265
266 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
267 }
268
269 rds_ib_ring_free(&ic->i_send_ring, completed);
270
f64f9e71
JP
271 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
272 test_bit(0, &conn->c_map_queued))
6a0979df
AG
273 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
274
275 /* We expect errors as the qp is drained during shutdown */
276 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
277 rds_ib_conn_error(conn,
278 "send completion on %pI4 "
279 "had status %u, disconnecting and reconnecting\n",
280 &conn->c_faddr, wc.status);
281 }
282 }
283}
284
285/*
286 * This is the main function for allocating credits when sending
287 * messages.
288 *
289 * Conceptually, we have two counters:
290 * - send credits: this tells us how many WRs we're allowed
291 * to submit without overruning the reciever's queue. For
292 * each SEND WR we post, we decrement this by one.
293 *
294 * - posted credits: this tells us how many WRs we recently
295 * posted to the receive queue. This value is transferred
296 * to the peer as a "credit update" in a RDS header field.
297 * Every time we transmit credits to the peer, we subtract
298 * the amount of transferred credits from this counter.
299 *
300 * It is essential that we avoid situations where both sides have
301 * exhausted their send credits, and are unable to send new credits
302 * to the peer. We achieve this by requiring that we send at least
303 * one credit update to the peer before exhausting our credits.
304 * When new credits arrive, we subtract one credit that is withheld
305 * until we've posted new buffers and are ready to transmit these
306 * credits (see rds_ib_send_add_credits below).
307 *
308 * The RDS send code is essentially single-threaded; rds_send_xmit
309 * grabs c_send_lock to ensure exclusive access to the send ring.
310 * However, the ACK sending code is independent and can race with
311 * message SENDs.
312 *
313 * In the send path, we need to update the counters for send credits
314 * and the counter of posted buffers atomically - when we use the
315 * last available credit, we cannot allow another thread to race us
316 * and grab the posted credits counter. Hence, we have to use a
317 * spinlock to protect the credit counter, or use atomics.
318 *
319 * Spinlocks shared between the send and the receive path are bad,
320 * because they create unnecessary delays. An early implementation
321 * using a spinlock showed a 5% degradation in throughput at some
322 * loads.
323 *
324 * This implementation avoids spinlocks completely, putting both
325 * counters into a single atomic, and updating that atomic using
326 * atomic_add (in the receive path, when receiving fresh credits),
327 * and using atomic_cmpxchg when updating the two counters.
328 */
329int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
7b70d033 330 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
6a0979df
AG
331{
332 unsigned int avail, posted, got = 0, advertise;
333 long oldval, newval;
334
335 *adv_credits = 0;
336 if (!ic->i_flowctl)
337 return wanted;
338
339try_again:
340 advertise = 0;
341 oldval = newval = atomic_read(&ic->i_credits);
342 posted = IB_GET_POST_CREDITS(oldval);
343 avail = IB_GET_SEND_CREDITS(oldval);
344
345 rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
346 wanted, avail, posted);
347
348 /* The last credit must be used to send a credit update. */
349 if (avail && !posted)
350 avail--;
351
352 if (avail < wanted) {
353 struct rds_connection *conn = ic->i_cm_id->context;
354
355 /* Oops, there aren't that many credits left! */
356 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
357 got = avail;
358 } else {
359 /* Sometimes you get what you want, lalala. */
360 got = wanted;
361 }
362 newval -= IB_SET_SEND_CREDITS(got);
363
364 /*
365 * If need_posted is non-zero, then the caller wants
366 * the posted regardless of whether any send credits are
367 * available.
368 */
369 if (posted && (got || need_posted)) {
7b70d033 370 advertise = min_t(unsigned int, posted, max_posted);
6a0979df
AG
371 newval -= IB_SET_POST_CREDITS(advertise);
372 }
373
374 /* Finally bill everything */
375 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
376 goto try_again;
377
378 *adv_credits = advertise;
379 return got;
380}
381
382void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
383{
384 struct rds_ib_connection *ic = conn->c_transport_data;
385
386 if (credits == 0)
387 return;
388
389 rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
390 credits,
391 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
392 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
393
394 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
395 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
396 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
397
398 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
399
400 rds_ib_stats_inc(s_ib_rx_credit_updates);
401}
402
403void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
404{
405 struct rds_ib_connection *ic = conn->c_transport_data;
406
407 if (posted == 0)
408 return;
409
410 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
411
412 /* Decide whether to send an update to the peer now.
413 * If we would send a credit update for every single buffer we
414 * post, we would end up with an ACK storm (ACK arrives,
415 * consumes buffer, we refill the ring, send ACK to remote
416 * advertising the newly posted buffer... ad inf)
417 *
418 * Performance pretty much depends on how often we send
419 * credit updates - too frequent updates mean lots of ACKs.
420 * Too infrequent updates, and the peer will run out of
421 * credits and has to throttle.
422 * For the time being, 16 seems to be a good compromise.
423 */
424 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
425 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
426}
427
428static inline void
429rds_ib_xmit_populate_wr(struct rds_ib_connection *ic,
430 struct rds_ib_send_work *send, unsigned int pos,
431 unsigned long buffer, unsigned int length,
432 int send_flags)
433{
434 struct ib_sge *sge;
435
436 WARN_ON(pos != send - ic->i_sends);
437
438 send->s_wr.send_flags = send_flags;
439 send->s_wr.opcode = IB_WR_SEND;
919ced4c 440 send->s_wr.num_sge = 1;
6a0979df
AG
441 send->s_wr.next = NULL;
442 send->s_queued = jiffies;
443 send->s_op = NULL;
444
919ced4c
AG
445 sge = &send->s_sge[0];
446 sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
447 sge->length = sizeof(struct rds_header);
448 sge->lkey = ic->i_mr->lkey;
449
6a0979df 450 if (length != 0) {
919ced4c
AG
451 send->s_wr.num_sge = 2;
452
453 sge = &send->s_sge[1];
6a0979df
AG
454 sge->addr = buffer;
455 sge->length = length;
456 sge->lkey = ic->i_mr->lkey;
6a0979df 457 }
6a0979df
AG
458}
459
460/*
461 * This can be called multiple times for a given message. The first time
462 * we see a message we map its scatterlist into the IB device so that
463 * we can provide that mapped address to the IB scatter gather entries
464 * in the IB work requests. We translate the scatterlist into a series
465 * of work requests that fragment the message. These work requests complete
466 * in order so we pass ownership of the message to the completion handler
467 * once we send the final fragment.
468 *
469 * The RDS core uses the c_send_lock to only enter this function once
470 * per connection. This makes sure that the tx ring alloc/unalloc pairs
471 * don't get out of sync and confuse the ring.
472 */
473int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
474 unsigned int hdr_off, unsigned int sg, unsigned int off)
475{
476 struct rds_ib_connection *ic = conn->c_transport_data;
477 struct ib_device *dev = ic->i_cm_id->device;
478 struct rds_ib_send_work *send = NULL;
479 struct rds_ib_send_work *first;
480 struct rds_ib_send_work *prev;
481 struct ib_send_wr *failed_wr;
482 struct scatterlist *scat;
483 u32 pos;
484 u32 i;
485 u32 work_alloc;
486 u32 credit_alloc;
487 u32 posted;
488 u32 adv_credits = 0;
489 int send_flags = 0;
490 int sent;
491 int ret;
492 int flow_controlled = 0;
493
494 BUG_ON(off % RDS_FRAG_SIZE);
495 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
496
2e7b3b99
AG
497 /* Do not send cong updates to IB loopback */
498 if (conn->c_loopback
499 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
500 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
501 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
502 }
503
6a0979df
AG
504 /* FIXME we may overallocate here */
505 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
506 i = 1;
507 else
508 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
509
510 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
511 if (work_alloc == 0) {
512 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
513 rds_ib_stats_inc(s_ib_tx_ring_full);
514 ret = -ENOMEM;
515 goto out;
516 }
517
518 credit_alloc = work_alloc;
519 if (ic->i_flowctl) {
7b70d033 520 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
6a0979df
AG
521 adv_credits += posted;
522 if (credit_alloc < work_alloc) {
523 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
524 work_alloc = credit_alloc;
525 flow_controlled++;
526 }
527 if (work_alloc == 0) {
d39e0602 528 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
6a0979df
AG
529 rds_ib_stats_inc(s_ib_tx_throttle);
530 ret = -ENOMEM;
531 goto out;
532 }
533 }
534
535 /* map the message the first time we see it */
8690bfa1 536 if (!ic->i_rm) {
e779137a
AG
537 if (rm->data.m_nents) {
538 rm->data.m_count = ib_dma_map_sg(dev,
539 rm->data.m_sg,
540 rm->data.m_nents,
541 DMA_TO_DEVICE);
542 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
543 if (rm->data.m_count == 0) {
6a0979df
AG
544 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
545 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
546 ret = -ENOMEM; /* XXX ? */
547 goto out;
548 }
549 } else {
e779137a 550 rm->data.m_count = 0;
6a0979df
AG
551 }
552
553 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
554 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
555 rds_message_addref(rm);
556 ic->i_rm = rm;
557
558 /* Finalize the header */
559 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
560 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
561 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
562 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
563
564 /* If it has a RDMA op, tell the peer we did it. This is
565 * used by the peer to release use-once RDMA MRs. */
ff87e97a 566 if (rm->rdma.m_rdma_op.r_active) {
6a0979df
AG
567 struct rds_ext_header_rdma ext_hdr;
568
ff87e97a 569 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key);
6a0979df
AG
570 rds_message_add_extension(&rm->m_inc.i_hdr,
571 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
572 }
573 if (rm->m_rdma_cookie) {
574 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
575 rds_rdma_cookie_key(rm->m_rdma_cookie),
576 rds_rdma_cookie_offset(rm->m_rdma_cookie));
577 }
578
579 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
580 * we should not do this unless we have a chance of at least
581 * sticking the header into the send ring. Which is why we
582 * should call rds_ib_ring_alloc first. */
583 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
584 rds_message_make_checksum(&rm->m_inc.i_hdr);
585
586 /*
587 * Update adv_credits since we reset the ACK_REQUIRED bit.
588 */
7b70d033 589 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
6a0979df
AG
590 adv_credits += posted;
591 BUG_ON(adv_credits > 255);
735f61e6 592 }
6a0979df
AG
593
594 send = &ic->i_sends[pos];
595 first = send;
596 prev = NULL;
e779137a 597 scat = &rm->data.m_sg[sg];
6a0979df
AG
598 sent = 0;
599 i = 0;
600
601 /* Sometimes you want to put a fence between an RDMA
602 * READ and the following SEND.
603 * We could either do this all the time
604 * or when requested by the user. Right now, we let
605 * the application choose.
606 */
ff87e97a 607 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence)
6a0979df
AG
608 send_flags = IB_SEND_FENCE;
609
610 /*
611 * We could be copying the header into the unused tail of the page.
612 * That would need to be changed in the future when those pages might
613 * be mapped userspace pages or page cache pages. So instead we always
614 * use a second sge and our long-lived ring of mapped headers. We send
615 * the header after the data so that the data payload can be aligned on
616 * the receiver.
617 */
618
619 /* handle a 0-len message */
620 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
621 rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
622 goto add_header;
623 }
624
625 /* if there's data reference it with a chain of work reqs */
e779137a 626 for (; i < work_alloc && scat != &rm->data.m_sg[rm->data.m_count]; i++) {
6a0979df
AG
627 unsigned int len;
628
629 send = &ic->i_sends[pos];
630
631 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
632 rds_ib_xmit_populate_wr(ic, send, pos,
633 ib_sg_dma_address(dev, scat) + off, len,
634 send_flags);
635
636 /*
637 * We want to delay signaling completions just enough to get
638 * the batching benefits but not so much that we create dead time
639 * on the wire.
640 */
641 if (ic->i_unsignaled_wrs-- == 0) {
642 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
643 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
644 }
645
646 ic->i_unsignaled_bytes -= len;
647 if (ic->i_unsignaled_bytes <= 0) {
648 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
649 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
650 }
651
652 /*
653 * Always signal the last one if we're stopping due to flow control.
654 */
655 if (flow_controlled && i == (work_alloc-1))
656 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
657
658 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
659 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
660
661 sent += len;
662 off += len;
663 if (off == ib_sg_dma_len(dev, scat)) {
664 scat++;
665 off = 0;
666 }
667
668add_header:
669 /* Tack on the header after the data. The header SGE should already
670 * have been set up to point to the right header buffer. */
671 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
672
6a0979df
AG
673 if (adv_credits) {
674 struct rds_header *hdr = &ic->i_send_hdrs[pos];
675
676 /* add credit and redo the header checksum */
677 hdr->h_credit = adv_credits;
678 rds_message_make_checksum(hdr);
679 adv_credits = 0;
680 rds_ib_stats_inc(s_ib_tx_credit_updates);
681 }
682
683 if (prev)
684 prev->s_wr.next = &send->s_wr;
685 prev = send;
686
687 pos = (pos + 1) % ic->i_send_ring.w_nr;
688 }
689
690 /* Account the RDS header in the number of bytes we sent, but just once.
691 * The caller has no concept of fragmentation. */
692 if (hdr_off == 0)
693 sent += sizeof(struct rds_header);
694
695 /* if we finished the message then send completion owns it */
e779137a 696 if (scat == &rm->data.m_sg[rm->data.m_count]) {
6a0979df
AG
697 prev->s_rm = ic->i_rm;
698 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
699 ic->i_rm = NULL;
700 }
701
702 if (i < work_alloc) {
703 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
704 work_alloc = i;
705 }
706 if (ic->i_flowctl && i < credit_alloc)
707 rds_ib_send_add_credits(conn, credit_alloc - i);
708
709 /* XXX need to worry about failed_wr and partial sends. */
710 failed_wr = &first->s_wr;
711 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
712 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
713 first, &first->s_wr, ret, failed_wr);
714 BUG_ON(failed_wr != &first->s_wr);
715 if (ret) {
716 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
717 "returned %d\n", &conn->c_faddr, ret);
718 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
719 if (prev->s_rm) {
720 ic->i_rm = prev->s_rm;
721 prev->s_rm = NULL;
722 }
735f61e6
AG
723
724 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
6a0979df
AG
725 goto out;
726 }
727
728 ret = sent;
729out:
730 BUG_ON(adv_credits);
731 return ret;
732}
733
15133f6e
AG
734/*
735 * Issue atomic operation.
736 * A simplified version of the rdma case, we always map 1 SG, and
737 * only 8 bytes, for the return value from the atomic operation.
738 */
739int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
740{
741 struct rds_ib_connection *ic = conn->c_transport_data;
742 struct rds_ib_send_work *send = NULL;
743 struct ib_send_wr *failed_wr;
744 struct rds_ib_device *rds_ibdev;
745 u32 pos;
746 u32 work_alloc;
747 int ret;
748
749 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
750
751 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
752 if (work_alloc != 1) {
753 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
754 rds_ib_stats_inc(s_ib_tx_ring_full);
755 ret = -ENOMEM;
756 goto out;
757 }
758
759 /* address of send request in ring */
760 send = &ic->i_sends[pos];
761 send->s_queued = jiffies;
762
763 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
764 send->s_wr.opcode = IB_WR_ATOMIC_CMP_AND_SWP;
765 send->s_wr.wr.atomic.compare_add = op->op_compare;
766 send->s_wr.wr.atomic.swap = op->op_swap_add;
767 } else { /* FADD */
768 send->s_wr.opcode = IB_WR_ATOMIC_FETCH_AND_ADD;
769 send->s_wr.wr.atomic.compare_add = op->op_swap_add;
770 send->s_wr.wr.atomic.swap = 0;
771 }
772 send->s_wr.send_flags = IB_SEND_SIGNALED;
773 send->s_wr.num_sge = 1;
774 send->s_wr.next = NULL;
775 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
776 send->s_wr.wr.atomic.rkey = op->op_rkey;
777
778 /* map 8 byte retval buffer to the device */
779 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
780 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
781 if (ret != 1) {
782 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
783 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
784 ret = -ENOMEM; /* XXX ? */
785 goto out;
786 }
787
788 /* Convert our struct scatterlist to struct ib_sge */
789 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
790 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
791 send->s_sge[0].lkey = ic->i_mr->lkey;
792
793 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
794 send->s_sge[0].addr, send->s_sge[0].length);
795
796 failed_wr = &send->s_wr;
797 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
798 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
799 send, &send->s_wr, ret, failed_wr);
800 BUG_ON(failed_wr != &send->s_wr);
801 if (ret) {
802 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
803 "returned %d\n", &conn->c_faddr, ret);
804 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
805 goto out;
806 }
807
808 if (unlikely(failed_wr != &send->s_wr)) {
809 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
810 BUG_ON(failed_wr != &send->s_wr);
811 }
812
813out:
814 return ret;
815}
816
6a0979df
AG
817int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
818{
819 struct rds_ib_connection *ic = conn->c_transport_data;
820 struct rds_ib_send_work *send = NULL;
821 struct rds_ib_send_work *first;
822 struct rds_ib_send_work *prev;
823 struct ib_send_wr *failed_wr;
824 struct rds_ib_device *rds_ibdev;
825 struct scatterlist *scat;
826 unsigned long len;
827 u64 remote_addr = op->r_remote_addr;
828 u32 pos;
829 u32 work_alloc;
830 u32 i;
831 u32 j;
832 int sent;
833 int ret;
834 int num_sge;
835
836 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
837
838 /* map the message the first time we see it */
839 if (!op->r_mapped) {
840 op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
841 op->r_sg, op->r_nents, (op->r_write) ?
842 DMA_TO_DEVICE : DMA_FROM_DEVICE);
843 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
844 if (op->r_count == 0) {
845 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
846 ret = -ENOMEM; /* XXX ? */
847 goto out;
848 }
849
850 op->r_mapped = 1;
851 }
852
853 /*
854 * Instead of knowing how to return a partial rdma read/write we insist that there
855 * be enough work requests to send the entire message.
856 */
857 i = ceil(op->r_count, rds_ibdev->max_sge);
858
859 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
860 if (work_alloc != i) {
861 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
862 rds_ib_stats_inc(s_ib_tx_ring_full);
863 ret = -ENOMEM;
864 goto out;
865 }
866
867 send = &ic->i_sends[pos];
868 first = send;
869 prev = NULL;
870 scat = &op->r_sg[0];
871 sent = 0;
872 num_sge = op->r_count;
873
874 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
875 send->s_wr.send_flags = 0;
876 send->s_queued = jiffies;
877 /*
878 * We want to delay signaling completions just enough to get
879 * the batching benefits but not so much that we create dead time on the wire.
880 */
881 if (ic->i_unsignaled_wrs-- == 0) {
882 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
883 send->s_wr.send_flags = IB_SEND_SIGNALED;
884 }
885
886 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
887 send->s_wr.wr.rdma.remote_addr = remote_addr;
888 send->s_wr.wr.rdma.rkey = op->r_key;
889 send->s_op = op;
890
891 if (num_sge > rds_ibdev->max_sge) {
892 send->s_wr.num_sge = rds_ibdev->max_sge;
893 num_sge -= rds_ibdev->max_sge;
894 } else {
895 send->s_wr.num_sge = num_sge;
896 }
897
898 send->s_wr.next = NULL;
899
900 if (prev)
901 prev->s_wr.next = &send->s_wr;
902
903 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
904 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
905 send->s_sge[j].addr =
906 ib_sg_dma_address(ic->i_cm_id->device, scat);
907 send->s_sge[j].length = len;
908 send->s_sge[j].lkey = ic->i_mr->lkey;
909
910 sent += len;
911 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
912
913 remote_addr += len;
914 scat++;
915 }
916
917 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
918 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
919
920 prev = send;
921 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
922 send = ic->i_sends;
923 }
924
925 /* if we finished the message then send completion owns it */
926 if (scat == &op->r_sg[op->r_count])
927 prev->s_wr.send_flags = IB_SEND_SIGNALED;
928
929 if (i < work_alloc) {
930 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
931 work_alloc = i;
932 }
933
934 failed_wr = &first->s_wr;
935 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
936 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
937 first, &first->s_wr, ret, failed_wr);
938 BUG_ON(failed_wr != &first->s_wr);
939 if (ret) {
940 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
941 "returned %d\n", &conn->c_faddr, ret);
942 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
943 goto out;
944 }
945
946 if (unlikely(failed_wr != &first->s_wr)) {
947 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
948 BUG_ON(failed_wr != &first->s_wr);
949 }
950
951
952out:
953 return ret;
954}
955
956void rds_ib_xmit_complete(struct rds_connection *conn)
957{
958 struct rds_ib_connection *ic = conn->c_transport_data;
959
960 /* We may have a pending ACK or window update we were unable
961 * to send previously (due to flow control). Try again. */
962 rds_ib_attempt_ack(ic);
963}
This page took 0.160974 seconds and 5 git commands to generate.