Commit | Line | Data |
---|---|---|
8f000cac CH |
1 | /* |
2 | * NVMe over Fabrics RDMA target. | |
3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | #include <linux/atomic.h> | |
16 | #include <linux/ctype.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/nvme.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/wait.h> | |
25 | #include <linux/inet.h> | |
26 | #include <asm/unaligned.h> | |
27 | ||
28 | #include <rdma/ib_verbs.h> | |
29 | #include <rdma/rdma_cm.h> | |
30 | #include <rdma/rw.h> | |
31 | ||
32 | #include <linux/nvme-rdma.h> | |
33 | #include "nvmet.h" | |
34 | ||
35 | /* | |
36 | * We allow up to a page of inline data to go with the SQE | |
37 | */ | |
38 | #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE | |
39 | ||
40 | struct nvmet_rdma_cmd { | |
41 | struct ib_sge sge[2]; | |
42 | struct ib_cqe cqe; | |
43 | struct ib_recv_wr wr; | |
44 | struct scatterlist inline_sg; | |
45 | struct page *inline_page; | |
46 | struct nvme_command *nvme_cmd; | |
47 | struct nvmet_rdma_queue *queue; | |
48 | }; | |
49 | ||
50 | enum { | |
51 | NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), | |
52 | NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), | |
53 | }; | |
54 | ||
55 | struct nvmet_rdma_rsp { | |
56 | struct ib_sge send_sge; | |
57 | struct ib_cqe send_cqe; | |
58 | struct ib_send_wr send_wr; | |
59 | ||
60 | struct nvmet_rdma_cmd *cmd; | |
61 | struct nvmet_rdma_queue *queue; | |
62 | ||
63 | struct ib_cqe read_cqe; | |
64 | struct rdma_rw_ctx rw; | |
65 | ||
66 | struct nvmet_req req; | |
67 | ||
68 | u8 n_rdma; | |
69 | u32 flags; | |
70 | u32 invalidate_rkey; | |
71 | ||
72 | struct list_head wait_list; | |
73 | struct list_head free_list; | |
74 | }; | |
75 | ||
76 | enum nvmet_rdma_queue_state { | |
77 | NVMET_RDMA_Q_CONNECTING, | |
78 | NVMET_RDMA_Q_LIVE, | |
79 | NVMET_RDMA_Q_DISCONNECTING, | |
80 | }; | |
81 | ||
82 | struct nvmet_rdma_queue { | |
83 | struct rdma_cm_id *cm_id; | |
84 | struct nvmet_port *port; | |
85 | struct ib_cq *cq; | |
86 | atomic_t sq_wr_avail; | |
87 | struct nvmet_rdma_device *dev; | |
88 | spinlock_t state_lock; | |
89 | enum nvmet_rdma_queue_state state; | |
90 | struct nvmet_cq nvme_cq; | |
91 | struct nvmet_sq nvme_sq; | |
92 | ||
93 | struct nvmet_rdma_rsp *rsps; | |
94 | struct list_head free_rsps; | |
95 | spinlock_t rsps_lock; | |
96 | struct nvmet_rdma_cmd *cmds; | |
97 | ||
98 | struct work_struct release_work; | |
99 | struct list_head rsp_wait_list; | |
100 | struct list_head rsp_wr_wait_list; | |
101 | spinlock_t rsp_wr_wait_lock; | |
102 | ||
103 | int idx; | |
104 | int host_qid; | |
105 | int recv_queue_size; | |
106 | int send_queue_size; | |
107 | ||
108 | struct list_head queue_list; | |
109 | }; | |
110 | ||
111 | struct nvmet_rdma_device { | |
112 | struct ib_device *device; | |
113 | struct ib_pd *pd; | |
114 | struct ib_srq *srq; | |
115 | struct nvmet_rdma_cmd *srq_cmds; | |
116 | size_t srq_size; | |
117 | struct kref ref; | |
118 | struct list_head entry; | |
119 | }; | |
120 | ||
121 | static bool nvmet_rdma_use_srq; | |
122 | module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); | |
123 | MODULE_PARM_DESC(use_srq, "Use shared receive queue."); | |
124 | ||
125 | static DEFINE_IDA(nvmet_rdma_queue_ida); | |
126 | static LIST_HEAD(nvmet_rdma_queue_list); | |
127 | static DEFINE_MUTEX(nvmet_rdma_queue_mutex); | |
128 | ||
129 | static LIST_HEAD(device_list); | |
130 | static DEFINE_MUTEX(device_list_mutex); | |
131 | ||
132 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); | |
133 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); | |
134 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); | |
135 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); | |
136 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); | |
137 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); | |
138 | ||
139 | static struct nvmet_fabrics_ops nvmet_rdma_ops; | |
140 | ||
141 | /* XXX: really should move to a generic header sooner or later.. */ | |
142 | static inline u32 get_unaligned_le24(const u8 *p) | |
143 | { | |
144 | return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; | |
145 | } | |
146 | ||
147 | static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) | |
148 | { | |
149 | return nvme_is_write(rsp->req.cmd) && | |
150 | rsp->req.data_len && | |
151 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); | |
152 | } | |
153 | ||
154 | static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) | |
155 | { | |
156 | return !nvme_is_write(rsp->req.cmd) && | |
157 | rsp->req.data_len && | |
158 | !rsp->req.rsp->status && | |
159 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); | |
160 | } | |
161 | ||
162 | static inline struct nvmet_rdma_rsp * | |
163 | nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) | |
164 | { | |
165 | struct nvmet_rdma_rsp *rsp; | |
166 | unsigned long flags; | |
167 | ||
168 | spin_lock_irqsave(&queue->rsps_lock, flags); | |
169 | rsp = list_first_entry(&queue->free_rsps, | |
170 | struct nvmet_rdma_rsp, free_list); | |
171 | list_del(&rsp->free_list); | |
172 | spin_unlock_irqrestore(&queue->rsps_lock, flags); | |
173 | ||
174 | return rsp; | |
175 | } | |
176 | ||
177 | static inline void | |
178 | nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) | |
179 | { | |
180 | unsigned long flags; | |
181 | ||
182 | spin_lock_irqsave(&rsp->queue->rsps_lock, flags); | |
183 | list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); | |
184 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); | |
185 | } | |
186 | ||
187 | static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents) | |
188 | { | |
189 | struct scatterlist *sg; | |
190 | int count; | |
191 | ||
192 | if (!sgl || !nents) | |
193 | return; | |
194 | ||
195 | for_each_sg(sgl, sg, nents, count) | |
196 | __free_page(sg_page(sg)); | |
197 | kfree(sgl); | |
198 | } | |
199 | ||
200 | static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, | |
201 | u32 length) | |
202 | { | |
203 | struct scatterlist *sg; | |
204 | struct page *page; | |
205 | unsigned int nent; | |
206 | int i = 0; | |
207 | ||
208 | nent = DIV_ROUND_UP(length, PAGE_SIZE); | |
209 | sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); | |
210 | if (!sg) | |
211 | goto out; | |
212 | ||
213 | sg_init_table(sg, nent); | |
214 | ||
215 | while (length) { | |
216 | u32 page_len = min_t(u32, length, PAGE_SIZE); | |
217 | ||
218 | page = alloc_page(GFP_KERNEL); | |
219 | if (!page) | |
220 | goto out_free_pages; | |
221 | ||
222 | sg_set_page(&sg[i], page, page_len, 0); | |
223 | length -= page_len; | |
224 | i++; | |
225 | } | |
226 | *sgl = sg; | |
227 | *nents = nent; | |
228 | return 0; | |
229 | ||
230 | out_free_pages: | |
231 | while (i > 0) { | |
232 | i--; | |
233 | __free_page(sg_page(&sg[i])); | |
234 | } | |
235 | kfree(sg); | |
236 | out: | |
237 | return NVME_SC_INTERNAL; | |
238 | } | |
239 | ||
240 | static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, | |
241 | struct nvmet_rdma_cmd *c, bool admin) | |
242 | { | |
243 | /* NVMe command / RDMA RECV */ | |
244 | c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); | |
245 | if (!c->nvme_cmd) | |
246 | goto out; | |
247 | ||
248 | c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, | |
249 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
250 | if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) | |
251 | goto out_free_cmd; | |
252 | ||
253 | c->sge[0].length = sizeof(*c->nvme_cmd); | |
254 | c->sge[0].lkey = ndev->pd->local_dma_lkey; | |
255 | ||
256 | if (!admin) { | |
257 | c->inline_page = alloc_pages(GFP_KERNEL, | |
258 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); | |
259 | if (!c->inline_page) | |
260 | goto out_unmap_cmd; | |
261 | c->sge[1].addr = ib_dma_map_page(ndev->device, | |
262 | c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE, | |
263 | DMA_FROM_DEVICE); | |
264 | if (ib_dma_mapping_error(ndev->device, c->sge[1].addr)) | |
265 | goto out_free_inline_page; | |
266 | c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE; | |
267 | c->sge[1].lkey = ndev->pd->local_dma_lkey; | |
268 | } | |
269 | ||
270 | c->cqe.done = nvmet_rdma_recv_done; | |
271 | ||
272 | c->wr.wr_cqe = &c->cqe; | |
273 | c->wr.sg_list = c->sge; | |
274 | c->wr.num_sge = admin ? 1 : 2; | |
275 | ||
276 | return 0; | |
277 | ||
278 | out_free_inline_page: | |
279 | if (!admin) { | |
280 | __free_pages(c->inline_page, | |
281 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); | |
282 | } | |
283 | out_unmap_cmd: | |
284 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, | |
285 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
286 | out_free_cmd: | |
287 | kfree(c->nvme_cmd); | |
288 | ||
289 | out: | |
290 | return -ENOMEM; | |
291 | } | |
292 | ||
293 | static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, | |
294 | struct nvmet_rdma_cmd *c, bool admin) | |
295 | { | |
296 | if (!admin) { | |
297 | ib_dma_unmap_page(ndev->device, c->sge[1].addr, | |
298 | NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE); | |
299 | __free_pages(c->inline_page, | |
300 | get_order(NVMET_RDMA_INLINE_DATA_SIZE)); | |
301 | } | |
302 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, | |
303 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
304 | kfree(c->nvme_cmd); | |
305 | } | |
306 | ||
307 | static struct nvmet_rdma_cmd * | |
308 | nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, | |
309 | int nr_cmds, bool admin) | |
310 | { | |
311 | struct nvmet_rdma_cmd *cmds; | |
312 | int ret = -EINVAL, i; | |
313 | ||
314 | cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); | |
315 | if (!cmds) | |
316 | goto out; | |
317 | ||
318 | for (i = 0; i < nr_cmds; i++) { | |
319 | ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); | |
320 | if (ret) | |
321 | goto out_free; | |
322 | } | |
323 | ||
324 | return cmds; | |
325 | ||
326 | out_free: | |
327 | while (--i >= 0) | |
328 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); | |
329 | kfree(cmds); | |
330 | out: | |
331 | return ERR_PTR(ret); | |
332 | } | |
333 | ||
334 | static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, | |
335 | struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) | |
336 | { | |
337 | int i; | |
338 | ||
339 | for (i = 0; i < nr_cmds; i++) | |
340 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); | |
341 | kfree(cmds); | |
342 | } | |
343 | ||
344 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, | |
345 | struct nvmet_rdma_rsp *r) | |
346 | { | |
347 | /* NVMe CQE / RDMA SEND */ | |
348 | r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); | |
349 | if (!r->req.rsp) | |
350 | goto out; | |
351 | ||
352 | r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, | |
353 | sizeof(*r->req.rsp), DMA_TO_DEVICE); | |
354 | if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) | |
355 | goto out_free_rsp; | |
356 | ||
357 | r->send_sge.length = sizeof(*r->req.rsp); | |
358 | r->send_sge.lkey = ndev->pd->local_dma_lkey; | |
359 | ||
360 | r->send_cqe.done = nvmet_rdma_send_done; | |
361 | ||
362 | r->send_wr.wr_cqe = &r->send_cqe; | |
363 | r->send_wr.sg_list = &r->send_sge; | |
364 | r->send_wr.num_sge = 1; | |
365 | r->send_wr.send_flags = IB_SEND_SIGNALED; | |
366 | ||
367 | /* Data In / RDMA READ */ | |
368 | r->read_cqe.done = nvmet_rdma_read_data_done; | |
369 | return 0; | |
370 | ||
371 | out_free_rsp: | |
372 | kfree(r->req.rsp); | |
373 | out: | |
374 | return -ENOMEM; | |
375 | } | |
376 | ||
377 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, | |
378 | struct nvmet_rdma_rsp *r) | |
379 | { | |
380 | ib_dma_unmap_single(ndev->device, r->send_sge.addr, | |
381 | sizeof(*r->req.rsp), DMA_TO_DEVICE); | |
382 | kfree(r->req.rsp); | |
383 | } | |
384 | ||
385 | static int | |
386 | nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) | |
387 | { | |
388 | struct nvmet_rdma_device *ndev = queue->dev; | |
389 | int nr_rsps = queue->recv_queue_size * 2; | |
390 | int ret = -EINVAL, i; | |
391 | ||
392 | queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), | |
393 | GFP_KERNEL); | |
394 | if (!queue->rsps) | |
395 | goto out; | |
396 | ||
397 | for (i = 0; i < nr_rsps; i++) { | |
398 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
399 | ||
400 | ret = nvmet_rdma_alloc_rsp(ndev, rsp); | |
401 | if (ret) | |
402 | goto out_free; | |
403 | ||
404 | list_add_tail(&rsp->free_list, &queue->free_rsps); | |
405 | } | |
406 | ||
407 | return 0; | |
408 | ||
409 | out_free: | |
410 | while (--i >= 0) { | |
411 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
412 | ||
413 | list_del(&rsp->free_list); | |
414 | nvmet_rdma_free_rsp(ndev, rsp); | |
415 | } | |
416 | kfree(queue->rsps); | |
417 | out: | |
418 | return ret; | |
419 | } | |
420 | ||
421 | static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) | |
422 | { | |
423 | struct nvmet_rdma_device *ndev = queue->dev; | |
424 | int i, nr_rsps = queue->recv_queue_size * 2; | |
425 | ||
426 | for (i = 0; i < nr_rsps; i++) { | |
427 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
428 | ||
429 | list_del(&rsp->free_list); | |
430 | nvmet_rdma_free_rsp(ndev, rsp); | |
431 | } | |
432 | kfree(queue->rsps); | |
433 | } | |
434 | ||
435 | static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, | |
436 | struct nvmet_rdma_cmd *cmd) | |
437 | { | |
438 | struct ib_recv_wr *bad_wr; | |
439 | ||
440 | if (ndev->srq) | |
441 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); | |
442 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); | |
443 | } | |
444 | ||
445 | static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) | |
446 | { | |
447 | spin_lock(&queue->rsp_wr_wait_lock); | |
448 | while (!list_empty(&queue->rsp_wr_wait_list)) { | |
449 | struct nvmet_rdma_rsp *rsp; | |
450 | bool ret; | |
451 | ||
452 | rsp = list_entry(queue->rsp_wr_wait_list.next, | |
453 | struct nvmet_rdma_rsp, wait_list); | |
454 | list_del(&rsp->wait_list); | |
455 | ||
456 | spin_unlock(&queue->rsp_wr_wait_lock); | |
457 | ret = nvmet_rdma_execute_command(rsp); | |
458 | spin_lock(&queue->rsp_wr_wait_lock); | |
459 | ||
460 | if (!ret) { | |
461 | list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); | |
462 | break; | |
463 | } | |
464 | } | |
465 | spin_unlock(&queue->rsp_wr_wait_lock); | |
466 | } | |
467 | ||
468 | ||
469 | static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) | |
470 | { | |
471 | struct nvmet_rdma_queue *queue = rsp->queue; | |
472 | ||
473 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); | |
474 | ||
475 | if (rsp->n_rdma) { | |
476 | rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, | |
477 | queue->cm_id->port_num, rsp->req.sg, | |
478 | rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); | |
479 | } | |
480 | ||
481 | if (rsp->req.sg != &rsp->cmd->inline_sg) | |
482 | nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt); | |
483 | ||
484 | if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) | |
485 | nvmet_rdma_process_wr_wait_list(queue); | |
486 | ||
487 | nvmet_rdma_put_rsp(rsp); | |
488 | } | |
489 | ||
490 | static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) | |
491 | { | |
492 | if (queue->nvme_sq.ctrl) { | |
493 | nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); | |
494 | } else { | |
495 | /* | |
496 | * we didn't setup the controller yet in case | |
497 | * of admin connect error, just disconnect and | |
498 | * cleanup the queue | |
499 | */ | |
500 | nvmet_rdma_queue_disconnect(queue); | |
501 | } | |
502 | } | |
503 | ||
504 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) | |
505 | { | |
506 | struct nvmet_rdma_rsp *rsp = | |
507 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); | |
508 | ||
509 | nvmet_rdma_release_rsp(rsp); | |
510 | ||
511 | if (unlikely(wc->status != IB_WC_SUCCESS && | |
512 | wc->status != IB_WC_WR_FLUSH_ERR)) { | |
513 | pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", | |
514 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); | |
515 | nvmet_rdma_error_comp(rsp->queue); | |
516 | } | |
517 | } | |
518 | ||
519 | static void nvmet_rdma_queue_response(struct nvmet_req *req) | |
520 | { | |
521 | struct nvmet_rdma_rsp *rsp = | |
522 | container_of(req, struct nvmet_rdma_rsp, req); | |
523 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
524 | struct ib_send_wr *first_wr, *bad_wr; | |
525 | ||
526 | if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { | |
527 | rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; | |
528 | rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; | |
529 | } else { | |
530 | rsp->send_wr.opcode = IB_WR_SEND; | |
531 | } | |
532 | ||
533 | if (nvmet_rdma_need_data_out(rsp)) | |
534 | first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, | |
535 | cm_id->port_num, NULL, &rsp->send_wr); | |
536 | else | |
537 | first_wr = &rsp->send_wr; | |
538 | ||
539 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); | |
540 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { | |
541 | pr_err("sending cmd response failed\n"); | |
542 | nvmet_rdma_release_rsp(rsp); | |
543 | } | |
544 | } | |
545 | ||
546 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) | |
547 | { | |
548 | struct nvmet_rdma_rsp *rsp = | |
549 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); | |
550 | struct nvmet_rdma_queue *queue = cq->cq_context; | |
551 | ||
552 | WARN_ON(rsp->n_rdma <= 0); | |
553 | atomic_add(rsp->n_rdma, &queue->sq_wr_avail); | |
554 | rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, | |
555 | queue->cm_id->port_num, rsp->req.sg, | |
556 | rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); | |
557 | rsp->n_rdma = 0; | |
558 | ||
559 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
560 | nvmet_rdma_release_rsp(rsp); | |
561 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | |
562 | pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", | |
563 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); | |
564 | nvmet_rdma_error_comp(queue); | |
565 | } | |
566 | return; | |
567 | } | |
568 | ||
569 | rsp->req.execute(&rsp->req); | |
570 | } | |
571 | ||
572 | static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, | |
573 | u64 off) | |
574 | { | |
575 | sg_init_table(&rsp->cmd->inline_sg, 1); | |
576 | sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off); | |
577 | rsp->req.sg = &rsp->cmd->inline_sg; | |
578 | rsp->req.sg_cnt = 1; | |
579 | } | |
580 | ||
581 | static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) | |
582 | { | |
583 | struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; | |
584 | u64 off = le64_to_cpu(sgl->addr); | |
585 | u32 len = le32_to_cpu(sgl->length); | |
586 | ||
587 | if (!nvme_is_write(rsp->req.cmd)) | |
588 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
589 | ||
590 | if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) { | |
591 | pr_err("invalid inline data offset!\n"); | |
592 | return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; | |
593 | } | |
594 | ||
595 | /* no data command? */ | |
596 | if (!len) | |
597 | return 0; | |
598 | ||
599 | nvmet_rdma_use_inline_sg(rsp, len, off); | |
600 | rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; | |
601 | return 0; | |
602 | } | |
603 | ||
604 | static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, | |
605 | struct nvme_keyed_sgl_desc *sgl, bool invalidate) | |
606 | { | |
607 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
608 | u64 addr = le64_to_cpu(sgl->addr); | |
609 | u32 len = get_unaligned_le24(sgl->length); | |
610 | u32 key = get_unaligned_le32(sgl->key); | |
611 | int ret; | |
612 | u16 status; | |
613 | ||
614 | /* no data command? */ | |
615 | if (!len) | |
616 | return 0; | |
617 | ||
618 | /* use the already allocated data buffer if possible */ | |
619 | if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { | |
620 | nvmet_rdma_use_inline_sg(rsp, len, 0); | |
621 | } else { | |
622 | status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, | |
623 | len); | |
624 | if (status) | |
625 | return status; | |
626 | } | |
627 | ||
628 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, | |
629 | rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, | |
630 | nvmet_data_dir(&rsp->req)); | |
631 | if (ret < 0) | |
632 | return NVME_SC_INTERNAL; | |
633 | rsp->n_rdma += ret; | |
634 | ||
635 | if (invalidate) { | |
636 | rsp->invalidate_rkey = key; | |
637 | rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; | |
638 | } | |
639 | ||
640 | return 0; | |
641 | } | |
642 | ||
643 | static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) | |
644 | { | |
645 | struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; | |
646 | ||
647 | switch (sgl->type >> 4) { | |
648 | case NVME_SGL_FMT_DATA_DESC: | |
649 | switch (sgl->type & 0xf) { | |
650 | case NVME_SGL_FMT_OFFSET: | |
651 | return nvmet_rdma_map_sgl_inline(rsp); | |
652 | default: | |
653 | pr_err("invalid SGL subtype: %#x\n", sgl->type); | |
654 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
655 | } | |
656 | case NVME_KEY_SGL_FMT_DATA_DESC: | |
657 | switch (sgl->type & 0xf) { | |
658 | case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: | |
659 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); | |
660 | case NVME_SGL_FMT_ADDRESS: | |
661 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); | |
662 | default: | |
663 | pr_err("invalid SGL subtype: %#x\n", sgl->type); | |
664 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
665 | } | |
666 | default: | |
667 | pr_err("invalid SGL type: %#x\n", sgl->type); | |
668 | return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; | |
669 | } | |
670 | } | |
671 | ||
672 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) | |
673 | { | |
674 | struct nvmet_rdma_queue *queue = rsp->queue; | |
675 | ||
676 | if (unlikely(atomic_sub_return(1 + rsp->n_rdma, | |
677 | &queue->sq_wr_avail) < 0)) { | |
678 | pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", | |
679 | 1 + rsp->n_rdma, queue->idx, | |
680 | queue->nvme_sq.ctrl->cntlid); | |
681 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); | |
682 | return false; | |
683 | } | |
684 | ||
685 | if (nvmet_rdma_need_data_in(rsp)) { | |
686 | if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, | |
687 | queue->cm_id->port_num, &rsp->read_cqe, NULL)) | |
688 | nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); | |
689 | } else { | |
690 | rsp->req.execute(&rsp->req); | |
691 | } | |
692 | ||
693 | return true; | |
694 | } | |
695 | ||
696 | static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | |
697 | struct nvmet_rdma_rsp *cmd) | |
698 | { | |
699 | u16 status; | |
700 | ||
701 | cmd->queue = queue; | |
702 | cmd->n_rdma = 0; | |
703 | cmd->req.port = queue->port; | |
704 | ||
705 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, | |
706 | &queue->nvme_sq, &nvmet_rdma_ops)) | |
707 | return; | |
708 | ||
709 | status = nvmet_rdma_map_sgl(cmd); | |
710 | if (status) | |
711 | goto out_err; | |
712 | ||
713 | if (unlikely(!nvmet_rdma_execute_command(cmd))) { | |
714 | spin_lock(&queue->rsp_wr_wait_lock); | |
715 | list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); | |
716 | spin_unlock(&queue->rsp_wr_wait_lock); | |
717 | } | |
718 | ||
719 | return; | |
720 | ||
721 | out_err: | |
722 | nvmet_req_complete(&cmd->req, status); | |
723 | } | |
724 | ||
725 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) | |
726 | { | |
727 | struct nvmet_rdma_cmd *cmd = | |
728 | container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); | |
729 | struct nvmet_rdma_queue *queue = cq->cq_context; | |
730 | struct nvmet_rdma_rsp *rsp; | |
731 | ||
732 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
733 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | |
734 | pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", | |
735 | wc->wr_cqe, ib_wc_status_msg(wc->status), | |
736 | wc->status); | |
737 | nvmet_rdma_error_comp(queue); | |
738 | } | |
739 | return; | |
740 | } | |
741 | ||
742 | if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { | |
743 | pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); | |
744 | nvmet_rdma_error_comp(queue); | |
745 | return; | |
746 | } | |
747 | ||
748 | cmd->queue = queue; | |
749 | rsp = nvmet_rdma_get_rsp(queue); | |
750 | rsp->cmd = cmd; | |
751 | rsp->flags = 0; | |
752 | rsp->req.cmd = cmd->nvme_cmd; | |
753 | ||
754 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { | |
755 | unsigned long flags; | |
756 | ||
757 | spin_lock_irqsave(&queue->state_lock, flags); | |
758 | if (queue->state == NVMET_RDMA_Q_CONNECTING) | |
759 | list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); | |
760 | else | |
761 | nvmet_rdma_put_rsp(rsp); | |
762 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
763 | return; | |
764 | } | |
765 | ||
766 | nvmet_rdma_handle_command(queue, rsp); | |
767 | } | |
768 | ||
769 | static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) | |
770 | { | |
771 | if (!ndev->srq) | |
772 | return; | |
773 | ||
774 | nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); | |
775 | ib_destroy_srq(ndev->srq); | |
776 | } | |
777 | ||
778 | static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) | |
779 | { | |
780 | struct ib_srq_init_attr srq_attr = { NULL, }; | |
781 | struct ib_srq *srq; | |
782 | size_t srq_size; | |
783 | int ret, i; | |
784 | ||
785 | srq_size = 4095; /* XXX: tune */ | |
786 | ||
787 | srq_attr.attr.max_wr = srq_size; | |
788 | srq_attr.attr.max_sge = 2; | |
789 | srq_attr.attr.srq_limit = 0; | |
790 | srq_attr.srq_type = IB_SRQT_BASIC; | |
791 | srq = ib_create_srq(ndev->pd, &srq_attr); | |
792 | if (IS_ERR(srq)) { | |
793 | /* | |
794 | * If SRQs aren't supported we just go ahead and use normal | |
795 | * non-shared receive queues. | |
796 | */ | |
797 | pr_info("SRQ requested but not supported.\n"); | |
798 | return 0; | |
799 | } | |
800 | ||
801 | ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); | |
802 | if (IS_ERR(ndev->srq_cmds)) { | |
803 | ret = PTR_ERR(ndev->srq_cmds); | |
804 | goto out_destroy_srq; | |
805 | } | |
806 | ||
807 | ndev->srq = srq; | |
808 | ndev->srq_size = srq_size; | |
809 | ||
810 | for (i = 0; i < srq_size; i++) | |
811 | nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); | |
812 | ||
813 | return 0; | |
814 | ||
815 | out_destroy_srq: | |
816 | ib_destroy_srq(srq); | |
817 | return ret; | |
818 | } | |
819 | ||
820 | static void nvmet_rdma_free_dev(struct kref *ref) | |
821 | { | |
822 | struct nvmet_rdma_device *ndev = | |
823 | container_of(ref, struct nvmet_rdma_device, ref); | |
824 | ||
825 | mutex_lock(&device_list_mutex); | |
826 | list_del(&ndev->entry); | |
827 | mutex_unlock(&device_list_mutex); | |
828 | ||
829 | nvmet_rdma_destroy_srq(ndev); | |
830 | ib_dealloc_pd(ndev->pd); | |
831 | ||
832 | kfree(ndev); | |
833 | } | |
834 | ||
835 | static struct nvmet_rdma_device * | |
836 | nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) | |
837 | { | |
838 | struct nvmet_rdma_device *ndev; | |
839 | int ret; | |
840 | ||
841 | mutex_lock(&device_list_mutex); | |
842 | list_for_each_entry(ndev, &device_list, entry) { | |
843 | if (ndev->device->node_guid == cm_id->device->node_guid && | |
844 | kref_get_unless_zero(&ndev->ref)) | |
845 | goto out_unlock; | |
846 | } | |
847 | ||
848 | ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); | |
849 | if (!ndev) | |
850 | goto out_err; | |
851 | ||
852 | ndev->device = cm_id->device; | |
853 | kref_init(&ndev->ref); | |
854 | ||
855 | ndev->pd = ib_alloc_pd(ndev->device); | |
856 | if (IS_ERR(ndev->pd)) | |
857 | goto out_free_dev; | |
858 | ||
859 | if (nvmet_rdma_use_srq) { | |
860 | ret = nvmet_rdma_init_srq(ndev); | |
861 | if (ret) | |
862 | goto out_free_pd; | |
863 | } | |
864 | ||
865 | list_add(&ndev->entry, &device_list); | |
866 | out_unlock: | |
867 | mutex_unlock(&device_list_mutex); | |
868 | pr_debug("added %s.\n", ndev->device->name); | |
869 | return ndev; | |
870 | ||
871 | out_free_pd: | |
872 | ib_dealloc_pd(ndev->pd); | |
873 | out_free_dev: | |
874 | kfree(ndev); | |
875 | out_err: | |
876 | mutex_unlock(&device_list_mutex); | |
877 | return NULL; | |
878 | } | |
879 | ||
880 | static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) | |
881 | { | |
882 | struct ib_qp_init_attr qp_attr; | |
883 | struct nvmet_rdma_device *ndev = queue->dev; | |
884 | int comp_vector, nr_cqe, ret, i; | |
885 | ||
886 | /* | |
887 | * Spread the io queues across completion vectors, | |
888 | * but still keep all admin queues on vector 0. | |
889 | */ | |
890 | comp_vector = !queue->host_qid ? 0 : | |
891 | queue->idx % ndev->device->num_comp_vectors; | |
892 | ||
893 | /* | |
894 | * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. | |
895 | */ | |
896 | nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; | |
897 | ||
898 | queue->cq = ib_alloc_cq(ndev->device, queue, | |
899 | nr_cqe + 1, comp_vector, | |
900 | IB_POLL_WORKQUEUE); | |
901 | if (IS_ERR(queue->cq)) { | |
902 | ret = PTR_ERR(queue->cq); | |
903 | pr_err("failed to create CQ cqe= %d ret= %d\n", | |
904 | nr_cqe + 1, ret); | |
905 | goto out; | |
906 | } | |
907 | ||
908 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
909 | qp_attr.qp_context = queue; | |
910 | qp_attr.event_handler = nvmet_rdma_qp_event; | |
911 | qp_attr.send_cq = queue->cq; | |
912 | qp_attr.recv_cq = queue->cq; | |
913 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
914 | qp_attr.qp_type = IB_QPT_RC; | |
915 | /* +1 for drain */ | |
916 | qp_attr.cap.max_send_wr = queue->send_queue_size + 1; | |
917 | qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; | |
918 | qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, | |
919 | ndev->device->attrs.max_sge); | |
920 | ||
921 | if (ndev->srq) { | |
922 | qp_attr.srq = ndev->srq; | |
923 | } else { | |
924 | /* +1 for drain */ | |
925 | qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; | |
926 | qp_attr.cap.max_recv_sge = 2; | |
927 | } | |
928 | ||
929 | ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); | |
930 | if (ret) { | |
931 | pr_err("failed to create_qp ret= %d\n", ret); | |
932 | goto err_destroy_cq; | |
933 | } | |
934 | ||
935 | atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); | |
936 | ||
937 | pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", | |
938 | __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, | |
939 | qp_attr.cap.max_send_wr, queue->cm_id); | |
940 | ||
941 | if (!ndev->srq) { | |
942 | for (i = 0; i < queue->recv_queue_size; i++) { | |
943 | queue->cmds[i].queue = queue; | |
944 | nvmet_rdma_post_recv(ndev, &queue->cmds[i]); | |
945 | } | |
946 | } | |
947 | ||
948 | out: | |
949 | return ret; | |
950 | ||
951 | err_destroy_cq: | |
952 | ib_free_cq(queue->cq); | |
953 | goto out; | |
954 | } | |
955 | ||
956 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) | |
957 | { | |
958 | rdma_destroy_qp(queue->cm_id); | |
959 | ib_free_cq(queue->cq); | |
960 | } | |
961 | ||
962 | static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) | |
963 | { | |
964 | pr_info("freeing queue %d\n", queue->idx); | |
965 | ||
966 | nvmet_sq_destroy(&queue->nvme_sq); | |
967 | ||
968 | nvmet_rdma_destroy_queue_ib(queue); | |
969 | if (!queue->dev->srq) { | |
970 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, | |
971 | queue->recv_queue_size, | |
972 | !queue->host_qid); | |
973 | } | |
974 | nvmet_rdma_free_rsps(queue); | |
975 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); | |
976 | kfree(queue); | |
977 | } | |
978 | ||
979 | static void nvmet_rdma_release_queue_work(struct work_struct *w) | |
980 | { | |
981 | struct nvmet_rdma_queue *queue = | |
982 | container_of(w, struct nvmet_rdma_queue, release_work); | |
983 | struct rdma_cm_id *cm_id = queue->cm_id; | |
984 | struct nvmet_rdma_device *dev = queue->dev; | |
985 | ||
986 | nvmet_rdma_free_queue(queue); | |
987 | rdma_destroy_id(cm_id); | |
988 | kref_put(&dev->ref, nvmet_rdma_free_dev); | |
989 | } | |
990 | ||
991 | static int | |
992 | nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, | |
993 | struct nvmet_rdma_queue *queue) | |
994 | { | |
995 | struct nvme_rdma_cm_req *req; | |
996 | ||
997 | req = (struct nvme_rdma_cm_req *)conn->private_data; | |
998 | if (!req || conn->private_data_len == 0) | |
999 | return NVME_RDMA_CM_INVALID_LEN; | |
1000 | ||
1001 | if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) | |
1002 | return NVME_RDMA_CM_INVALID_RECFMT; | |
1003 | ||
1004 | queue->host_qid = le16_to_cpu(req->qid); | |
1005 | ||
1006 | /* | |
1007 | * req->hsqsize corresponds to our recv queue size | |
1008 | * req->hrqsize corresponds to our send queue size | |
1009 | */ | |
1010 | queue->recv_queue_size = le16_to_cpu(req->hsqsize); | |
1011 | queue->send_queue_size = le16_to_cpu(req->hrqsize); | |
1012 | ||
1013 | if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH) | |
1014 | return NVME_RDMA_CM_INVALID_HSQSIZE; | |
1015 | ||
1016 | /* XXX: Should we enforce some kind of max for IO queues? */ | |
1017 | ||
1018 | return 0; | |
1019 | } | |
1020 | ||
1021 | static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, | |
1022 | enum nvme_rdma_cm_status status) | |
1023 | { | |
1024 | struct nvme_rdma_cm_rej rej; | |
1025 | ||
1026 | rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); | |
1027 | rej.sts = cpu_to_le16(status); | |
1028 | ||
1029 | return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); | |
1030 | } | |
1031 | ||
1032 | static struct nvmet_rdma_queue * | |
1033 | nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, | |
1034 | struct rdma_cm_id *cm_id, | |
1035 | struct rdma_cm_event *event) | |
1036 | { | |
1037 | struct nvmet_rdma_queue *queue; | |
1038 | int ret; | |
1039 | ||
1040 | queue = kzalloc(sizeof(*queue), GFP_KERNEL); | |
1041 | if (!queue) { | |
1042 | ret = NVME_RDMA_CM_NO_RSC; | |
1043 | goto out_reject; | |
1044 | } | |
1045 | ||
1046 | ret = nvmet_sq_init(&queue->nvme_sq); | |
1047 | if (ret) | |
1048 | goto out_free_queue; | |
1049 | ||
1050 | ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); | |
1051 | if (ret) | |
1052 | goto out_destroy_sq; | |
1053 | ||
1054 | /* | |
1055 | * Schedules the actual release because calling rdma_destroy_id from | |
1056 | * inside a CM callback would trigger a deadlock. (great API design..) | |
1057 | */ | |
1058 | INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); | |
1059 | queue->dev = ndev; | |
1060 | queue->cm_id = cm_id; | |
1061 | ||
1062 | spin_lock_init(&queue->state_lock); | |
1063 | queue->state = NVMET_RDMA_Q_CONNECTING; | |
1064 | INIT_LIST_HEAD(&queue->rsp_wait_list); | |
1065 | INIT_LIST_HEAD(&queue->rsp_wr_wait_list); | |
1066 | spin_lock_init(&queue->rsp_wr_wait_lock); | |
1067 | INIT_LIST_HEAD(&queue->free_rsps); | |
1068 | spin_lock_init(&queue->rsps_lock); | |
1069 | ||
1070 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); | |
1071 | if (queue->idx < 0) { | |
1072 | ret = NVME_RDMA_CM_NO_RSC; | |
1073 | goto out_free_queue; | |
1074 | } | |
1075 | ||
1076 | ret = nvmet_rdma_alloc_rsps(queue); | |
1077 | if (ret) { | |
1078 | ret = NVME_RDMA_CM_NO_RSC; | |
1079 | goto out_ida_remove; | |
1080 | } | |
1081 | ||
1082 | if (!ndev->srq) { | |
1083 | queue->cmds = nvmet_rdma_alloc_cmds(ndev, | |
1084 | queue->recv_queue_size, | |
1085 | !queue->host_qid); | |
1086 | if (IS_ERR(queue->cmds)) { | |
1087 | ret = NVME_RDMA_CM_NO_RSC; | |
1088 | goto out_free_responses; | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | ret = nvmet_rdma_create_queue_ib(queue); | |
1093 | if (ret) { | |
1094 | pr_err("%s: creating RDMA queue failed (%d).\n", | |
1095 | __func__, ret); | |
1096 | ret = NVME_RDMA_CM_NO_RSC; | |
1097 | goto out_free_cmds; | |
1098 | } | |
1099 | ||
1100 | return queue; | |
1101 | ||
1102 | out_free_cmds: | |
1103 | if (!ndev->srq) { | |
1104 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, | |
1105 | queue->recv_queue_size, | |
1106 | !queue->host_qid); | |
1107 | } | |
1108 | out_free_responses: | |
1109 | nvmet_rdma_free_rsps(queue); | |
1110 | out_ida_remove: | |
1111 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); | |
1112 | out_destroy_sq: | |
1113 | nvmet_sq_destroy(&queue->nvme_sq); | |
1114 | out_free_queue: | |
1115 | kfree(queue); | |
1116 | out_reject: | |
1117 | nvmet_rdma_cm_reject(cm_id, ret); | |
1118 | return NULL; | |
1119 | } | |
1120 | ||
1121 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) | |
1122 | { | |
1123 | struct nvmet_rdma_queue *queue = priv; | |
1124 | ||
1125 | switch (event->event) { | |
1126 | case IB_EVENT_COMM_EST: | |
1127 | rdma_notify(queue->cm_id, event->event); | |
1128 | break; | |
1129 | default: | |
1130 | pr_err("received unrecognized IB QP event %d\n", event->event); | |
1131 | break; | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, | |
1136 | struct nvmet_rdma_queue *queue, | |
1137 | struct rdma_conn_param *p) | |
1138 | { | |
1139 | struct rdma_conn_param param = { }; | |
1140 | struct nvme_rdma_cm_rep priv = { }; | |
1141 | int ret = -ENOMEM; | |
1142 | ||
1143 | param.rnr_retry_count = 7; | |
1144 | param.flow_control = 1; | |
1145 | param.initiator_depth = min_t(u8, p->initiator_depth, | |
1146 | queue->dev->device->attrs.max_qp_init_rd_atom); | |
1147 | param.private_data = &priv; | |
1148 | param.private_data_len = sizeof(priv); | |
1149 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); | |
1150 | priv.crqsize = cpu_to_le16(queue->recv_queue_size); | |
1151 | ||
1152 | ret = rdma_accept(cm_id, ¶m); | |
1153 | if (ret) | |
1154 | pr_err("rdma_accept failed (error code = %d)\n", ret); | |
1155 | ||
1156 | return ret; | |
1157 | } | |
1158 | ||
1159 | static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, | |
1160 | struct rdma_cm_event *event) | |
1161 | { | |
1162 | struct nvmet_rdma_device *ndev; | |
1163 | struct nvmet_rdma_queue *queue; | |
1164 | int ret = -EINVAL; | |
1165 | ||
1166 | ndev = nvmet_rdma_find_get_device(cm_id); | |
1167 | if (!ndev) { | |
1168 | pr_err("no client data!\n"); | |
1169 | nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); | |
1170 | return -ECONNREFUSED; | |
1171 | } | |
1172 | ||
1173 | queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); | |
1174 | if (!queue) { | |
1175 | ret = -ENOMEM; | |
1176 | goto put_device; | |
1177 | } | |
1178 | queue->port = cm_id->context; | |
1179 | ||
1180 | ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); | |
1181 | if (ret) | |
1182 | goto release_queue; | |
1183 | ||
1184 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1185 | list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); | |
1186 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1187 | ||
1188 | return 0; | |
1189 | ||
1190 | release_queue: | |
1191 | nvmet_rdma_free_queue(queue); | |
1192 | put_device: | |
1193 | kref_put(&ndev->ref, nvmet_rdma_free_dev); | |
1194 | ||
1195 | return ret; | |
1196 | } | |
1197 | ||
1198 | static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) | |
1199 | { | |
1200 | unsigned long flags; | |
1201 | ||
1202 | spin_lock_irqsave(&queue->state_lock, flags); | |
1203 | if (queue->state != NVMET_RDMA_Q_CONNECTING) { | |
1204 | pr_warn("trying to establish a connected queue\n"); | |
1205 | goto out_unlock; | |
1206 | } | |
1207 | queue->state = NVMET_RDMA_Q_LIVE; | |
1208 | ||
1209 | while (!list_empty(&queue->rsp_wait_list)) { | |
1210 | struct nvmet_rdma_rsp *cmd; | |
1211 | ||
1212 | cmd = list_first_entry(&queue->rsp_wait_list, | |
1213 | struct nvmet_rdma_rsp, wait_list); | |
1214 | list_del(&cmd->wait_list); | |
1215 | ||
1216 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1217 | nvmet_rdma_handle_command(queue, cmd); | |
1218 | spin_lock_irqsave(&queue->state_lock, flags); | |
1219 | } | |
1220 | ||
1221 | out_unlock: | |
1222 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1223 | } | |
1224 | ||
1225 | static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |
1226 | { | |
1227 | bool disconnect = false; | |
1228 | unsigned long flags; | |
1229 | ||
1230 | pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); | |
1231 | ||
1232 | spin_lock_irqsave(&queue->state_lock, flags); | |
1233 | switch (queue->state) { | |
1234 | case NVMET_RDMA_Q_CONNECTING: | |
1235 | case NVMET_RDMA_Q_LIVE: | |
1236 | disconnect = true; | |
1237 | queue->state = NVMET_RDMA_Q_DISCONNECTING; | |
1238 | break; | |
1239 | case NVMET_RDMA_Q_DISCONNECTING: | |
1240 | break; | |
1241 | } | |
1242 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1243 | ||
1244 | if (disconnect) { | |
1245 | rdma_disconnect(queue->cm_id); | |
1246 | ib_drain_qp(queue->cm_id->qp); | |
1247 | schedule_work(&queue->release_work); | |
1248 | } | |
1249 | } | |
1250 | ||
1251 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |
1252 | { | |
1253 | bool disconnect = false; | |
1254 | ||
1255 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1256 | if (!list_empty(&queue->queue_list)) { | |
1257 | list_del_init(&queue->queue_list); | |
1258 | disconnect = true; | |
1259 | } | |
1260 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1261 | ||
1262 | if (disconnect) | |
1263 | __nvmet_rdma_queue_disconnect(queue); | |
1264 | } | |
1265 | ||
1266 | static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, | |
1267 | struct nvmet_rdma_queue *queue) | |
1268 | { | |
1269 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); | |
1270 | ||
1271 | pr_err("failed to connect queue\n"); | |
1272 | schedule_work(&queue->release_work); | |
1273 | } | |
1274 | ||
1275 | static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, | |
1276 | struct rdma_cm_event *event) | |
1277 | { | |
1278 | struct nvmet_rdma_queue *queue = NULL; | |
1279 | int ret = 0; | |
1280 | ||
1281 | if (cm_id->qp) | |
1282 | queue = cm_id->qp->qp_context; | |
1283 | ||
1284 | pr_debug("%s (%d): status %d id %p\n", | |
1285 | rdma_event_msg(event->event), event->event, | |
1286 | event->status, cm_id); | |
1287 | ||
1288 | switch (event->event) { | |
1289 | case RDMA_CM_EVENT_CONNECT_REQUEST: | |
1290 | ret = nvmet_rdma_queue_connect(cm_id, event); | |
1291 | break; | |
1292 | case RDMA_CM_EVENT_ESTABLISHED: | |
1293 | nvmet_rdma_queue_established(queue); | |
1294 | break; | |
1295 | case RDMA_CM_EVENT_ADDR_CHANGE: | |
1296 | case RDMA_CM_EVENT_DISCONNECTED: | |
1297 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
1298 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: | |
1299 | /* | |
1300 | * We can get the device removal callback even for a | |
1301 | * CM ID that we aren't actually using. In that case | |
1302 | * the context pointer is NULL, so we shouldn't try | |
1303 | * to disconnect a non-existing queue. But we also | |
1304 | * need to return 1 so that the core will destroy | |
1305 | * it's own ID. What a great API design.. | |
1306 | */ | |
1307 | if (queue) | |
1308 | nvmet_rdma_queue_disconnect(queue); | |
1309 | else | |
1310 | ret = 1; | |
1311 | break; | |
1312 | case RDMA_CM_EVENT_REJECTED: | |
1313 | case RDMA_CM_EVENT_UNREACHABLE: | |
1314 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
1315 | nvmet_rdma_queue_connect_fail(cm_id, queue); | |
1316 | break; | |
1317 | default: | |
1318 | pr_err("received unrecognized RDMA CM event %d\n", | |
1319 | event->event); | |
1320 | break; | |
1321 | } | |
1322 | ||
1323 | return ret; | |
1324 | } | |
1325 | ||
1326 | static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) | |
1327 | { | |
1328 | struct nvmet_rdma_queue *queue; | |
1329 | ||
1330 | restart: | |
1331 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1332 | list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { | |
1333 | if (queue->nvme_sq.ctrl == ctrl) { | |
1334 | list_del_init(&queue->queue_list); | |
1335 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1336 | ||
1337 | __nvmet_rdma_queue_disconnect(queue); | |
1338 | goto restart; | |
1339 | } | |
1340 | } | |
1341 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1342 | } | |
1343 | ||
1344 | static int nvmet_rdma_add_port(struct nvmet_port *port) | |
1345 | { | |
1346 | struct rdma_cm_id *cm_id; | |
1347 | struct sockaddr_in addr_in; | |
1348 | u16 port_in; | |
1349 | int ret; | |
1350 | ||
1351 | switch (port->disc_addr.adrfam) { | |
1352 | case NVMF_ADDR_FAMILY_IP4: | |
1353 | break; | |
1354 | default: | |
1355 | pr_err("address family %d not supported\n", | |
1356 | port->disc_addr.adrfam); | |
1357 | return -EINVAL; | |
1358 | } | |
1359 | ||
1360 | ret = kstrtou16(port->disc_addr.trsvcid, 0, &port_in); | |
1361 | if (ret) | |
1362 | return ret; | |
1363 | ||
1364 | addr_in.sin_family = AF_INET; | |
1365 | addr_in.sin_addr.s_addr = in_aton(port->disc_addr.traddr); | |
1366 | addr_in.sin_port = htons(port_in); | |
1367 | ||
1368 | cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, | |
1369 | RDMA_PS_TCP, IB_QPT_RC); | |
1370 | if (IS_ERR(cm_id)) { | |
1371 | pr_err("CM ID creation failed\n"); | |
1372 | return PTR_ERR(cm_id); | |
1373 | } | |
1374 | ||
1375 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr_in); | |
1376 | if (ret) { | |
1377 | pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in, ret); | |
1378 | goto out_destroy_id; | |
1379 | } | |
1380 | ||
1381 | ret = rdma_listen(cm_id, 128); | |
1382 | if (ret) { | |
1383 | pr_err("listening to %pISpc failed (%d)\n", &addr_in, ret); | |
1384 | goto out_destroy_id; | |
1385 | } | |
1386 | ||
1387 | pr_info("enabling port %d (%pISpc)\n", | |
1388 | le16_to_cpu(port->disc_addr.portid), &addr_in); | |
1389 | port->priv = cm_id; | |
1390 | return 0; | |
1391 | ||
1392 | out_destroy_id: | |
1393 | rdma_destroy_id(cm_id); | |
1394 | return ret; | |
1395 | } | |
1396 | ||
1397 | static void nvmet_rdma_remove_port(struct nvmet_port *port) | |
1398 | { | |
1399 | struct rdma_cm_id *cm_id = port->priv; | |
1400 | ||
1401 | rdma_destroy_id(cm_id); | |
1402 | } | |
1403 | ||
1404 | static struct nvmet_fabrics_ops nvmet_rdma_ops = { | |
1405 | .owner = THIS_MODULE, | |
1406 | .type = NVMF_TRTYPE_RDMA, | |
1407 | .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, | |
1408 | .msdbd = 1, | |
1409 | .has_keyed_sgls = 1, | |
1410 | .add_port = nvmet_rdma_add_port, | |
1411 | .remove_port = nvmet_rdma_remove_port, | |
1412 | .queue_response = nvmet_rdma_queue_response, | |
1413 | .delete_ctrl = nvmet_rdma_delete_ctrl, | |
1414 | }; | |
1415 | ||
1416 | static int __init nvmet_rdma_init(void) | |
1417 | { | |
1418 | return nvmet_register_transport(&nvmet_rdma_ops); | |
1419 | } | |
1420 | ||
1421 | static void __exit nvmet_rdma_exit(void) | |
1422 | { | |
1423 | struct nvmet_rdma_queue *queue; | |
1424 | ||
1425 | nvmet_unregister_transport(&nvmet_rdma_ops); | |
1426 | ||
1427 | flush_scheduled_work(); | |
1428 | ||
1429 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1430 | while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list, | |
1431 | struct nvmet_rdma_queue, queue_list))) { | |
1432 | list_del_init(&queue->queue_list); | |
1433 | ||
1434 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1435 | __nvmet_rdma_queue_disconnect(queue); | |
1436 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1437 | } | |
1438 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1439 | ||
1440 | flush_scheduled_work(); | |
1441 | ida_destroy(&nvmet_rdma_queue_ida); | |
1442 | } | |
1443 | ||
1444 | module_init(nvmet_rdma_init); | |
1445 | module_exit(nvmet_rdma_exit); | |
1446 | ||
1447 | MODULE_LICENSE("GPL v2"); | |
1448 | MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ |