Commit | Line | Data |
---|---|---|
a42d985b BVA |
1 | /* |
2 | * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. | |
3 | * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | * | |
33 | */ | |
34 | ||
35 | #include <linux/module.h> | |
36 | #include <linux/init.h> | |
37 | #include <linux/slab.h> | |
38 | #include <linux/err.h> | |
39 | #include <linux/ctype.h> | |
40 | #include <linux/kthread.h> | |
41 | #include <linux/string.h> | |
42 | #include <linux/delay.h> | |
43 | #include <linux/atomic.h> | |
44 | #include <scsi/scsi_tcq.h> | |
45 | #include <target/configfs_macros.h> | |
46 | #include <target/target_core_base.h> | |
47 | #include <target/target_core_fabric_configfs.h> | |
48 | #include <target/target_core_fabric.h> | |
49 | #include <target/target_core_configfs.h> | |
50 | #include "ib_srpt.h" | |
51 | ||
52 | /* Name of this kernel module. */ | |
53 | #define DRV_NAME "ib_srpt" | |
54 | #define DRV_VERSION "2.0.0" | |
55 | #define DRV_RELDATE "2011-02-14" | |
56 | ||
57 | #define SRPT_ID_STRING "Linux SRP target" | |
58 | ||
59 | #undef pr_fmt | |
60 | #define pr_fmt(fmt) DRV_NAME " " fmt | |
61 | ||
62 | MODULE_AUTHOR("Vu Pham and Bart Van Assche"); | |
63 | MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target " | |
64 | "v" DRV_VERSION " (" DRV_RELDATE ")"); | |
65 | MODULE_LICENSE("Dual BSD/GPL"); | |
66 | ||
67 | /* | |
68 | * Global Variables | |
69 | */ | |
70 | ||
71 | static u64 srpt_service_guid; | |
486d8b9f RD |
72 | static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ |
73 | static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ | |
a42d985b BVA |
74 | |
75 | static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; | |
76 | module_param(srp_max_req_size, int, 0444); | |
77 | MODULE_PARM_DESC(srp_max_req_size, | |
78 | "Maximum size of SRP request messages in bytes."); | |
79 | ||
80 | static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE; | |
81 | module_param(srpt_srq_size, int, 0444); | |
82 | MODULE_PARM_DESC(srpt_srq_size, | |
83 | "Shared receive queue (SRQ) size."); | |
84 | ||
85 | static int srpt_get_u64_x(char *buffer, struct kernel_param *kp) | |
86 | { | |
87 | return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); | |
88 | } | |
89 | module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, | |
90 | 0444); | |
91 | MODULE_PARM_DESC(srpt_service_guid, | |
92 | "Using this value for ioc_guid, id_ext, and cm_listen_id" | |
93 | " instead of using the node_guid of the first HCA."); | |
94 | ||
95 | static struct ib_client srpt_client; | |
96 | static struct target_fabric_configfs *srpt_target; | |
97 | static void srpt_release_channel(struct srpt_rdma_ch *ch); | |
98 | static int srpt_queue_status(struct se_cmd *cmd); | |
99 | ||
100 | /** | |
101 | * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE. | |
102 | */ | |
103 | static inline | |
104 | enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir) | |
105 | { | |
106 | switch (dir) { | |
107 | case DMA_TO_DEVICE: return DMA_FROM_DEVICE; | |
108 | case DMA_FROM_DEVICE: return DMA_TO_DEVICE; | |
109 | default: return dir; | |
110 | } | |
111 | } | |
112 | ||
113 | /** | |
114 | * srpt_sdev_name() - Return the name associated with the HCA. | |
115 | * | |
116 | * Examples are ib0, ib1, ... | |
117 | */ | |
118 | static inline const char *srpt_sdev_name(struct srpt_device *sdev) | |
119 | { | |
120 | return sdev->device->name; | |
121 | } | |
122 | ||
123 | static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch) | |
124 | { | |
125 | unsigned long flags; | |
126 | enum rdma_ch_state state; | |
127 | ||
128 | spin_lock_irqsave(&ch->spinlock, flags); | |
129 | state = ch->state; | |
130 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
131 | return state; | |
132 | } | |
133 | ||
134 | static enum rdma_ch_state | |
135 | srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state) | |
136 | { | |
137 | unsigned long flags; | |
138 | enum rdma_ch_state prev; | |
139 | ||
140 | spin_lock_irqsave(&ch->spinlock, flags); | |
141 | prev = ch->state; | |
142 | ch->state = new_state; | |
143 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
144 | return prev; | |
145 | } | |
146 | ||
147 | /** | |
148 | * srpt_test_and_set_ch_state() - Test and set the channel state. | |
149 | * | |
150 | * Returns true if and only if the channel state has been set to the new state. | |
151 | */ | |
152 | static bool | |
153 | srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old, | |
154 | enum rdma_ch_state new) | |
155 | { | |
156 | unsigned long flags; | |
157 | enum rdma_ch_state prev; | |
158 | ||
159 | spin_lock_irqsave(&ch->spinlock, flags); | |
160 | prev = ch->state; | |
161 | if (prev == old) | |
162 | ch->state = new; | |
163 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
164 | return prev == old; | |
165 | } | |
166 | ||
167 | /** | |
168 | * srpt_event_handler() - Asynchronous IB event callback function. | |
169 | * | |
170 | * Callback function called by the InfiniBand core when an asynchronous IB | |
171 | * event occurs. This callback may occur in interrupt context. See also | |
172 | * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand | |
173 | * Architecture Specification. | |
174 | */ | |
175 | static void srpt_event_handler(struct ib_event_handler *handler, | |
176 | struct ib_event *event) | |
177 | { | |
178 | struct srpt_device *sdev; | |
179 | struct srpt_port *sport; | |
180 | ||
181 | sdev = ib_get_client_data(event->device, &srpt_client); | |
182 | if (!sdev || sdev->device != event->device) | |
183 | return; | |
184 | ||
185 | pr_debug("ASYNC event= %d on device= %s\n", event->event, | |
186 | srpt_sdev_name(sdev)); | |
187 | ||
188 | switch (event->event) { | |
189 | case IB_EVENT_PORT_ERR: | |
190 | if (event->element.port_num <= sdev->device->phys_port_cnt) { | |
191 | sport = &sdev->port[event->element.port_num - 1]; | |
192 | sport->lid = 0; | |
193 | sport->sm_lid = 0; | |
194 | } | |
195 | break; | |
196 | case IB_EVENT_PORT_ACTIVE: | |
197 | case IB_EVENT_LID_CHANGE: | |
198 | case IB_EVENT_PKEY_CHANGE: | |
199 | case IB_EVENT_SM_CHANGE: | |
200 | case IB_EVENT_CLIENT_REREGISTER: | |
201 | /* Refresh port data asynchronously. */ | |
202 | if (event->element.port_num <= sdev->device->phys_port_cnt) { | |
203 | sport = &sdev->port[event->element.port_num - 1]; | |
204 | if (!sport->lid && !sport->sm_lid) | |
205 | schedule_work(&sport->work); | |
206 | } | |
207 | break; | |
208 | default: | |
209 | printk(KERN_ERR "received unrecognized IB event %d\n", | |
210 | event->event); | |
211 | break; | |
212 | } | |
213 | } | |
214 | ||
215 | /** | |
216 | * srpt_srq_event() - SRQ event callback function. | |
217 | */ | |
218 | static void srpt_srq_event(struct ib_event *event, void *ctx) | |
219 | { | |
220 | printk(KERN_INFO "SRQ event %d\n", event->event); | |
221 | } | |
222 | ||
223 | /** | |
224 | * srpt_qp_event() - QP event callback function. | |
225 | */ | |
226 | static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) | |
227 | { | |
228 | pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n", | |
229 | event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); | |
230 | ||
231 | switch (event->event) { | |
232 | case IB_EVENT_COMM_EST: | |
233 | ib_cm_notify(ch->cm_id, event->event); | |
234 | break; | |
235 | case IB_EVENT_QP_LAST_WQE_REACHED: | |
236 | if (srpt_test_and_set_ch_state(ch, CH_DRAINING, | |
237 | CH_RELEASING)) | |
238 | srpt_release_channel(ch); | |
239 | else | |
240 | pr_debug("%s: state %d - ignored LAST_WQE.\n", | |
241 | ch->sess_name, srpt_get_ch_state(ch)); | |
242 | break; | |
243 | default: | |
244 | printk(KERN_ERR "received unrecognized IB QP event %d\n", | |
245 | event->event); | |
246 | break; | |
247 | } | |
248 | } | |
249 | ||
250 | /** | |
251 | * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure. | |
252 | * | |
253 | * @slot: one-based slot number. | |
254 | * @value: four-bit value. | |
255 | * | |
256 | * Copies the lowest four bits of value in element slot of the array of four | |
257 | * bit elements called c_list (controller list). The index slot is one-based. | |
258 | */ | |
259 | static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value) | |
260 | { | |
261 | u16 id; | |
262 | u8 tmp; | |
263 | ||
264 | id = (slot - 1) / 2; | |
265 | if (slot & 0x1) { | |
266 | tmp = c_list[id] & 0xf; | |
267 | c_list[id] = (value << 4) | tmp; | |
268 | } else { | |
269 | tmp = c_list[id] & 0xf0; | |
270 | c_list[id] = (value & 0xf) | tmp; | |
271 | } | |
272 | } | |
273 | ||
274 | /** | |
275 | * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram. | |
276 | * | |
277 | * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture | |
278 | * Specification. | |
279 | */ | |
280 | static void srpt_get_class_port_info(struct ib_dm_mad *mad) | |
281 | { | |
282 | struct ib_class_port_info *cif; | |
283 | ||
284 | cif = (struct ib_class_port_info *)mad->data; | |
285 | memset(cif, 0, sizeof *cif); | |
286 | cif->base_version = 1; | |
287 | cif->class_version = 1; | |
288 | cif->resp_time_value = 20; | |
289 | ||
290 | mad->mad_hdr.status = 0; | |
291 | } | |
292 | ||
293 | /** | |
294 | * srpt_get_iou() - Write IOUnitInfo to a management datagram. | |
295 | * | |
296 | * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture | |
297 | * Specification. See also section B.7, table B.6 in the SRP r16a document. | |
298 | */ | |
299 | static void srpt_get_iou(struct ib_dm_mad *mad) | |
300 | { | |
301 | struct ib_dm_iou_info *ioui; | |
302 | u8 slot; | |
303 | int i; | |
304 | ||
305 | ioui = (struct ib_dm_iou_info *)mad->data; | |
306 | ioui->change_id = __constant_cpu_to_be16(1); | |
307 | ioui->max_controllers = 16; | |
308 | ||
309 | /* set present for slot 1 and empty for the rest */ | |
310 | srpt_set_ioc(ioui->controller_list, 1, 1); | |
311 | for (i = 1, slot = 2; i < 16; i++, slot++) | |
312 | srpt_set_ioc(ioui->controller_list, slot, 0); | |
313 | ||
314 | mad->mad_hdr.status = 0; | |
315 | } | |
316 | ||
317 | /** | |
318 | * srpt_get_ioc() - Write IOControllerprofile to a management datagram. | |
319 | * | |
320 | * See also section 16.3.3.4 IOControllerProfile in the InfiniBand | |
321 | * Architecture Specification. See also section B.7, table B.7 in the SRP | |
322 | * r16a document. | |
323 | */ | |
324 | static void srpt_get_ioc(struct srpt_port *sport, u32 slot, | |
325 | struct ib_dm_mad *mad) | |
326 | { | |
327 | struct srpt_device *sdev = sport->sdev; | |
328 | struct ib_dm_ioc_profile *iocp; | |
329 | ||
330 | iocp = (struct ib_dm_ioc_profile *)mad->data; | |
331 | ||
332 | if (!slot || slot > 16) { | |
333 | mad->mad_hdr.status | |
334 | = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); | |
335 | return; | |
336 | } | |
337 | ||
338 | if (slot > 2) { | |
339 | mad->mad_hdr.status | |
340 | = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); | |
341 | return; | |
342 | } | |
343 | ||
344 | memset(iocp, 0, sizeof *iocp); | |
345 | strcpy(iocp->id_string, SRPT_ID_STRING); | |
346 | iocp->guid = cpu_to_be64(srpt_service_guid); | |
347 | iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); | |
348 | iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id); | |
349 | iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); | |
350 | iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); | |
351 | iocp->subsys_device_id = 0x0; | |
352 | iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); | |
353 | iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); | |
354 | iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); | |
355 | iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); | |
356 | iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); | |
357 | iocp->rdma_read_depth = 4; | |
358 | iocp->send_size = cpu_to_be32(srp_max_req_size); | |
359 | iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size, | |
360 | 1U << 24)); | |
361 | iocp->num_svc_entries = 1; | |
362 | iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC | | |
363 | SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC; | |
364 | ||
365 | mad->mad_hdr.status = 0; | |
366 | } | |
367 | ||
368 | /** | |
369 | * srpt_get_svc_entries() - Write ServiceEntries to a management datagram. | |
370 | * | |
371 | * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture | |
372 | * Specification. See also section B.7, table B.8 in the SRP r16a document. | |
373 | */ | |
374 | static void srpt_get_svc_entries(u64 ioc_guid, | |
375 | u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad) | |
376 | { | |
377 | struct ib_dm_svc_entries *svc_entries; | |
378 | ||
379 | WARN_ON(!ioc_guid); | |
380 | ||
381 | if (!slot || slot > 16) { | |
382 | mad->mad_hdr.status | |
383 | = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); | |
384 | return; | |
385 | } | |
386 | ||
387 | if (slot > 2 || lo > hi || hi > 1) { | |
388 | mad->mad_hdr.status | |
389 | = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); | |
390 | return; | |
391 | } | |
392 | ||
393 | svc_entries = (struct ib_dm_svc_entries *)mad->data; | |
394 | memset(svc_entries, 0, sizeof *svc_entries); | |
395 | svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid); | |
396 | snprintf(svc_entries->service_entries[0].name, | |
397 | sizeof(svc_entries->service_entries[0].name), | |
398 | "%s%016llx", | |
399 | SRP_SERVICE_NAME_PREFIX, | |
400 | ioc_guid); | |
401 | ||
402 | mad->mad_hdr.status = 0; | |
403 | } | |
404 | ||
405 | /** | |
406 | * srpt_mgmt_method_get() - Process a received management datagram. | |
407 | * @sp: source port through which the MAD has been received. | |
408 | * @rq_mad: received MAD. | |
409 | * @rsp_mad: response MAD. | |
410 | */ | |
411 | static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, | |
412 | struct ib_dm_mad *rsp_mad) | |
413 | { | |
414 | u16 attr_id; | |
415 | u32 slot; | |
416 | u8 hi, lo; | |
417 | ||
418 | attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id); | |
419 | switch (attr_id) { | |
420 | case DM_ATTR_CLASS_PORT_INFO: | |
421 | srpt_get_class_port_info(rsp_mad); | |
422 | break; | |
423 | case DM_ATTR_IOU_INFO: | |
424 | srpt_get_iou(rsp_mad); | |
425 | break; | |
426 | case DM_ATTR_IOC_PROFILE: | |
427 | slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); | |
428 | srpt_get_ioc(sp, slot, rsp_mad); | |
429 | break; | |
430 | case DM_ATTR_SVC_ENTRIES: | |
431 | slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); | |
432 | hi = (u8) ((slot >> 8) & 0xff); | |
433 | lo = (u8) (slot & 0xff); | |
434 | slot = (u16) ((slot >> 16) & 0xffff); | |
435 | srpt_get_svc_entries(srpt_service_guid, | |
436 | slot, hi, lo, rsp_mad); | |
437 | break; | |
438 | default: | |
439 | rsp_mad->mad_hdr.status = | |
440 | __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); | |
441 | break; | |
442 | } | |
443 | } | |
444 | ||
445 | /** | |
446 | * srpt_mad_send_handler() - Post MAD-send callback function. | |
447 | */ | |
448 | static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, | |
449 | struct ib_mad_send_wc *mad_wc) | |
450 | { | |
451 | ib_destroy_ah(mad_wc->send_buf->ah); | |
452 | ib_free_send_mad(mad_wc->send_buf); | |
453 | } | |
454 | ||
455 | /** | |
456 | * srpt_mad_recv_handler() - MAD reception callback function. | |
457 | */ | |
458 | static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, | |
459 | struct ib_mad_recv_wc *mad_wc) | |
460 | { | |
461 | struct srpt_port *sport = (struct srpt_port *)mad_agent->context; | |
462 | struct ib_ah *ah; | |
463 | struct ib_mad_send_buf *rsp; | |
464 | struct ib_dm_mad *dm_mad; | |
465 | ||
466 | if (!mad_wc || !mad_wc->recv_buf.mad) | |
467 | return; | |
468 | ||
469 | ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, | |
470 | mad_wc->recv_buf.grh, mad_agent->port_num); | |
471 | if (IS_ERR(ah)) | |
472 | goto err; | |
473 | ||
474 | BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR); | |
475 | ||
476 | rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, | |
477 | mad_wc->wc->pkey_index, 0, | |
478 | IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, | |
479 | GFP_KERNEL); | |
480 | if (IS_ERR(rsp)) | |
481 | goto err_rsp; | |
482 | ||
483 | rsp->ah = ah; | |
484 | ||
485 | dm_mad = rsp->mad; | |
486 | memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad); | |
487 | dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | |
488 | dm_mad->mad_hdr.status = 0; | |
489 | ||
490 | switch (mad_wc->recv_buf.mad->mad_hdr.method) { | |
491 | case IB_MGMT_METHOD_GET: | |
492 | srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad); | |
493 | break; | |
494 | case IB_MGMT_METHOD_SET: | |
495 | dm_mad->mad_hdr.status = | |
496 | __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); | |
497 | break; | |
498 | default: | |
499 | dm_mad->mad_hdr.status = | |
500 | __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); | |
501 | break; | |
502 | } | |
503 | ||
504 | if (!ib_post_send_mad(rsp, NULL)) { | |
505 | ib_free_recv_mad(mad_wc); | |
506 | /* will destroy_ah & free_send_mad in send completion */ | |
507 | return; | |
508 | } | |
509 | ||
510 | ib_free_send_mad(rsp); | |
511 | ||
512 | err_rsp: | |
513 | ib_destroy_ah(ah); | |
514 | err: | |
515 | ib_free_recv_mad(mad_wc); | |
516 | } | |
517 | ||
518 | /** | |
519 | * srpt_refresh_port() - Configure a HCA port. | |
520 | * | |
521 | * Enable InfiniBand management datagram processing, update the cached sm_lid, | |
522 | * lid and gid values, and register a callback function for processing MADs | |
523 | * on the specified port. | |
524 | * | |
525 | * Note: It is safe to call this function more than once for the same port. | |
526 | */ | |
527 | static int srpt_refresh_port(struct srpt_port *sport) | |
528 | { | |
529 | struct ib_mad_reg_req reg_req; | |
530 | struct ib_port_modify port_modify; | |
531 | struct ib_port_attr port_attr; | |
532 | int ret; | |
533 | ||
534 | memset(&port_modify, 0, sizeof port_modify); | |
535 | port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; | |
536 | port_modify.clr_port_cap_mask = 0; | |
537 | ||
538 | ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); | |
539 | if (ret) | |
540 | goto err_mod_port; | |
541 | ||
542 | ret = ib_query_port(sport->sdev->device, sport->port, &port_attr); | |
543 | if (ret) | |
544 | goto err_query_port; | |
545 | ||
546 | sport->sm_lid = port_attr.sm_lid; | |
547 | sport->lid = port_attr.lid; | |
548 | ||
549 | ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid); | |
550 | if (ret) | |
551 | goto err_query_port; | |
552 | ||
553 | if (!sport->mad_agent) { | |
554 | memset(®_req, 0, sizeof reg_req); | |
555 | reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; | |
556 | reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION; | |
557 | set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); | |
558 | set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); | |
559 | ||
560 | sport->mad_agent = ib_register_mad_agent(sport->sdev->device, | |
561 | sport->port, | |
562 | IB_QPT_GSI, | |
563 | ®_req, 0, | |
564 | srpt_mad_send_handler, | |
565 | srpt_mad_recv_handler, | |
566 | sport); | |
567 | if (IS_ERR(sport->mad_agent)) { | |
568 | ret = PTR_ERR(sport->mad_agent); | |
569 | sport->mad_agent = NULL; | |
570 | goto err_query_port; | |
571 | } | |
572 | } | |
573 | ||
574 | return 0; | |
575 | ||
576 | err_query_port: | |
577 | ||
578 | port_modify.set_port_cap_mask = 0; | |
579 | port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; | |
580 | ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); | |
581 | ||
582 | err_mod_port: | |
583 | ||
584 | return ret; | |
585 | } | |
586 | ||
587 | /** | |
588 | * srpt_unregister_mad_agent() - Unregister MAD callback functions. | |
589 | * | |
590 | * Note: It is safe to call this function more than once for the same device. | |
591 | */ | |
592 | static void srpt_unregister_mad_agent(struct srpt_device *sdev) | |
593 | { | |
594 | struct ib_port_modify port_modify = { | |
595 | .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, | |
596 | }; | |
597 | struct srpt_port *sport; | |
598 | int i; | |
599 | ||
600 | for (i = 1; i <= sdev->device->phys_port_cnt; i++) { | |
601 | sport = &sdev->port[i - 1]; | |
602 | WARN_ON(sport->port != i); | |
603 | if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0) | |
604 | printk(KERN_ERR "disabling MAD processing failed.\n"); | |
605 | if (sport->mad_agent) { | |
606 | ib_unregister_mad_agent(sport->mad_agent); | |
607 | sport->mad_agent = NULL; | |
608 | } | |
609 | } | |
610 | } | |
611 | ||
612 | /** | |
613 | * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure. | |
614 | */ | |
615 | static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, | |
616 | int ioctx_size, int dma_size, | |
617 | enum dma_data_direction dir) | |
618 | { | |
619 | struct srpt_ioctx *ioctx; | |
620 | ||
621 | ioctx = kmalloc(ioctx_size, GFP_KERNEL); | |
622 | if (!ioctx) | |
623 | goto err; | |
624 | ||
625 | ioctx->buf = kmalloc(dma_size, GFP_KERNEL); | |
626 | if (!ioctx->buf) | |
627 | goto err_free_ioctx; | |
628 | ||
629 | ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir); | |
630 | if (ib_dma_mapping_error(sdev->device, ioctx->dma)) | |
631 | goto err_free_buf; | |
632 | ||
633 | return ioctx; | |
634 | ||
635 | err_free_buf: | |
636 | kfree(ioctx->buf); | |
637 | err_free_ioctx: | |
638 | kfree(ioctx); | |
639 | err: | |
640 | return NULL; | |
641 | } | |
642 | ||
643 | /** | |
644 | * srpt_free_ioctx() - Free an SRPT I/O context structure. | |
645 | */ | |
646 | static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, | |
647 | int dma_size, enum dma_data_direction dir) | |
648 | { | |
649 | if (!ioctx) | |
650 | return; | |
651 | ||
652 | ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir); | |
653 | kfree(ioctx->buf); | |
654 | kfree(ioctx); | |
655 | } | |
656 | ||
657 | /** | |
658 | * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures. | |
659 | * @sdev: Device to allocate the I/O context ring for. | |
660 | * @ring_size: Number of elements in the I/O context ring. | |
661 | * @ioctx_size: I/O context size. | |
662 | * @dma_size: DMA buffer size. | |
663 | * @dir: DMA data direction. | |
664 | */ | |
665 | static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, | |
666 | int ring_size, int ioctx_size, | |
667 | int dma_size, enum dma_data_direction dir) | |
668 | { | |
669 | struct srpt_ioctx **ring; | |
670 | int i; | |
671 | ||
672 | WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) | |
673 | && ioctx_size != sizeof(struct srpt_send_ioctx)); | |
674 | ||
675 | ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL); | |
676 | if (!ring) | |
677 | goto out; | |
678 | for (i = 0; i < ring_size; ++i) { | |
679 | ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir); | |
680 | if (!ring[i]) | |
681 | goto err; | |
682 | ring[i]->index = i; | |
683 | } | |
684 | goto out; | |
685 | ||
686 | err: | |
687 | while (--i >= 0) | |
688 | srpt_free_ioctx(sdev, ring[i], dma_size, dir); | |
689 | kfree(ring); | |
715252d4 | 690 | ring = NULL; |
a42d985b BVA |
691 | out: |
692 | return ring; | |
693 | } | |
694 | ||
695 | /** | |
696 | * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures. | |
697 | */ | |
698 | static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, | |
699 | struct srpt_device *sdev, int ring_size, | |
700 | int dma_size, enum dma_data_direction dir) | |
701 | { | |
702 | int i; | |
703 | ||
704 | for (i = 0; i < ring_size; ++i) | |
705 | srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir); | |
706 | kfree(ioctx_ring); | |
707 | } | |
708 | ||
709 | /** | |
710 | * srpt_get_cmd_state() - Get the state of a SCSI command. | |
711 | */ | |
712 | static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx) | |
713 | { | |
714 | enum srpt_command_state state; | |
715 | unsigned long flags; | |
716 | ||
717 | BUG_ON(!ioctx); | |
718 | ||
719 | spin_lock_irqsave(&ioctx->spinlock, flags); | |
720 | state = ioctx->state; | |
721 | spin_unlock_irqrestore(&ioctx->spinlock, flags); | |
722 | return state; | |
723 | } | |
724 | ||
725 | /** | |
726 | * srpt_set_cmd_state() - Set the state of a SCSI command. | |
727 | * | |
728 | * Does not modify the state of aborted commands. Returns the previous command | |
729 | * state. | |
730 | */ | |
731 | static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx, | |
732 | enum srpt_command_state new) | |
733 | { | |
734 | enum srpt_command_state previous; | |
735 | unsigned long flags; | |
736 | ||
737 | BUG_ON(!ioctx); | |
738 | ||
739 | spin_lock_irqsave(&ioctx->spinlock, flags); | |
740 | previous = ioctx->state; | |
741 | if (previous != SRPT_STATE_DONE) | |
742 | ioctx->state = new; | |
743 | spin_unlock_irqrestore(&ioctx->spinlock, flags); | |
744 | ||
745 | return previous; | |
746 | } | |
747 | ||
748 | /** | |
749 | * srpt_test_and_set_cmd_state() - Test and set the state of a command. | |
750 | * | |
751 | * Returns true if and only if the previous command state was equal to 'old'. | |
752 | */ | |
753 | static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx, | |
754 | enum srpt_command_state old, | |
755 | enum srpt_command_state new) | |
756 | { | |
757 | enum srpt_command_state previous; | |
758 | unsigned long flags; | |
759 | ||
760 | WARN_ON(!ioctx); | |
761 | WARN_ON(old == SRPT_STATE_DONE); | |
762 | WARN_ON(new == SRPT_STATE_NEW); | |
763 | ||
764 | spin_lock_irqsave(&ioctx->spinlock, flags); | |
765 | previous = ioctx->state; | |
766 | if (previous == old) | |
767 | ioctx->state = new; | |
768 | spin_unlock_irqrestore(&ioctx->spinlock, flags); | |
769 | return previous == old; | |
770 | } | |
771 | ||
772 | /** | |
773 | * srpt_post_recv() - Post an IB receive request. | |
774 | */ | |
775 | static int srpt_post_recv(struct srpt_device *sdev, | |
776 | struct srpt_recv_ioctx *ioctx) | |
777 | { | |
778 | struct ib_sge list; | |
779 | struct ib_recv_wr wr, *bad_wr; | |
780 | ||
781 | BUG_ON(!sdev); | |
782 | wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index); | |
783 | ||
784 | list.addr = ioctx->ioctx.dma; | |
785 | list.length = srp_max_req_size; | |
786 | list.lkey = sdev->mr->lkey; | |
787 | ||
788 | wr.next = NULL; | |
789 | wr.sg_list = &list; | |
790 | wr.num_sge = 1; | |
791 | ||
792 | return ib_post_srq_recv(sdev->srq, &wr, &bad_wr); | |
793 | } | |
794 | ||
795 | /** | |
796 | * srpt_post_send() - Post an IB send request. | |
797 | * | |
798 | * Returns zero upon success and a non-zero value upon failure. | |
799 | */ | |
800 | static int srpt_post_send(struct srpt_rdma_ch *ch, | |
801 | struct srpt_send_ioctx *ioctx, int len) | |
802 | { | |
803 | struct ib_sge list; | |
804 | struct ib_send_wr wr, *bad_wr; | |
805 | struct srpt_device *sdev = ch->sport->sdev; | |
806 | int ret; | |
807 | ||
808 | atomic_inc(&ch->req_lim); | |
809 | ||
810 | ret = -ENOMEM; | |
811 | if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) { | |
812 | printk(KERN_WARNING "IB send queue full (needed 1)\n"); | |
813 | goto out; | |
814 | } | |
815 | ||
816 | ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len, | |
817 | DMA_TO_DEVICE); | |
818 | ||
819 | list.addr = ioctx->ioctx.dma; | |
820 | list.length = len; | |
821 | list.lkey = sdev->mr->lkey; | |
822 | ||
823 | wr.next = NULL; | |
824 | wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index); | |
825 | wr.sg_list = &list; | |
826 | wr.num_sge = 1; | |
827 | wr.opcode = IB_WR_SEND; | |
828 | wr.send_flags = IB_SEND_SIGNALED; | |
829 | ||
830 | ret = ib_post_send(ch->qp, &wr, &bad_wr); | |
831 | ||
832 | out: | |
833 | if (ret < 0) { | |
834 | atomic_inc(&ch->sq_wr_avail); | |
835 | atomic_dec(&ch->req_lim); | |
836 | } | |
837 | return ret; | |
838 | } | |
839 | ||
840 | /** | |
841 | * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request. | |
842 | * @ioctx: Pointer to the I/O context associated with the request. | |
843 | * @srp_cmd: Pointer to the SRP_CMD request data. | |
844 | * @dir: Pointer to the variable to which the transfer direction will be | |
845 | * written. | |
846 | * @data_len: Pointer to the variable to which the total data length of all | |
847 | * descriptors in the SRP_CMD request will be written. | |
848 | * | |
849 | * This function initializes ioctx->nrbuf and ioctx->r_bufs. | |
850 | * | |
851 | * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors; | |
852 | * -ENOMEM when memory allocation fails and zero upon success. | |
853 | */ | |
854 | static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, | |
855 | struct srp_cmd *srp_cmd, | |
856 | enum dma_data_direction *dir, u64 *data_len) | |
857 | { | |
858 | struct srp_indirect_buf *idb; | |
859 | struct srp_direct_buf *db; | |
860 | unsigned add_cdb_offset; | |
861 | int ret; | |
862 | ||
863 | /* | |
864 | * The pointer computations below will only be compiled correctly | |
865 | * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check | |
866 | * whether srp_cmd::add_data has been declared as a byte pointer. | |
867 | */ | |
868 | BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) | |
869 | && !__same_type(srp_cmd->add_data[0], (u8)0)); | |
870 | ||
871 | BUG_ON(!dir); | |
872 | BUG_ON(!data_len); | |
873 | ||
874 | ret = 0; | |
875 | *data_len = 0; | |
876 | ||
877 | /* | |
878 | * The lower four bits of the buffer format field contain the DATA-IN | |
879 | * buffer descriptor format, and the highest four bits contain the | |
880 | * DATA-OUT buffer descriptor format. | |
881 | */ | |
882 | *dir = DMA_NONE; | |
883 | if (srp_cmd->buf_fmt & 0xf) | |
884 | /* DATA-IN: transfer data from target to initiator (read). */ | |
885 | *dir = DMA_FROM_DEVICE; | |
886 | else if (srp_cmd->buf_fmt >> 4) | |
887 | /* DATA-OUT: transfer data from initiator to target (write). */ | |
888 | *dir = DMA_TO_DEVICE; | |
889 | ||
890 | /* | |
891 | * According to the SRP spec, the lower two bits of the 'ADDITIONAL | |
892 | * CDB LENGTH' field are reserved and the size in bytes of this field | |
893 | * is four times the value specified in bits 3..7. Hence the "& ~3". | |
894 | */ | |
895 | add_cdb_offset = srp_cmd->add_cdb_len & ~3; | |
896 | if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || | |
897 | ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { | |
898 | ioctx->n_rbuf = 1; | |
899 | ioctx->rbufs = &ioctx->single_rbuf; | |
900 | ||
901 | db = (struct srp_direct_buf *)(srp_cmd->add_data | |
902 | + add_cdb_offset); | |
903 | memcpy(ioctx->rbufs, db, sizeof *db); | |
904 | *data_len = be32_to_cpu(db->len); | |
905 | } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || | |
906 | ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { | |
907 | idb = (struct srp_indirect_buf *)(srp_cmd->add_data | |
908 | + add_cdb_offset); | |
909 | ||
910 | ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db; | |
911 | ||
912 | if (ioctx->n_rbuf > | |
913 | (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { | |
914 | printk(KERN_ERR "received unsupported SRP_CMD request" | |
915 | " type (%u out + %u in != %u / %zu)\n", | |
916 | srp_cmd->data_out_desc_cnt, | |
917 | srp_cmd->data_in_desc_cnt, | |
918 | be32_to_cpu(idb->table_desc.len), | |
919 | sizeof(*db)); | |
920 | ioctx->n_rbuf = 0; | |
921 | ret = -EINVAL; | |
922 | goto out; | |
923 | } | |
924 | ||
925 | if (ioctx->n_rbuf == 1) | |
926 | ioctx->rbufs = &ioctx->single_rbuf; | |
927 | else { | |
928 | ioctx->rbufs = | |
929 | kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC); | |
930 | if (!ioctx->rbufs) { | |
931 | ioctx->n_rbuf = 0; | |
932 | ret = -ENOMEM; | |
933 | goto out; | |
934 | } | |
935 | } | |
936 | ||
937 | db = idb->desc_list; | |
938 | memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db); | |
939 | *data_len = be32_to_cpu(idb->len); | |
940 | } | |
941 | out: | |
942 | return ret; | |
943 | } | |
944 | ||
945 | /** | |
946 | * srpt_init_ch_qp() - Initialize queue pair attributes. | |
947 | * | |
948 | * Initialized the attributes of queue pair 'qp' by allowing local write, | |
949 | * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. | |
950 | */ | |
951 | static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) | |
952 | { | |
953 | struct ib_qp_attr *attr; | |
954 | int ret; | |
955 | ||
956 | attr = kzalloc(sizeof *attr, GFP_KERNEL); | |
957 | if (!attr) | |
958 | return -ENOMEM; | |
959 | ||
960 | attr->qp_state = IB_QPS_INIT; | |
961 | attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | | |
962 | IB_ACCESS_REMOTE_WRITE; | |
963 | attr->port_num = ch->sport->port; | |
964 | attr->pkey_index = 0; | |
965 | ||
966 | ret = ib_modify_qp(qp, attr, | |
967 | IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT | | |
968 | IB_QP_PKEY_INDEX); | |
969 | ||
970 | kfree(attr); | |
971 | return ret; | |
972 | } | |
973 | ||
974 | /** | |
975 | * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR). | |
976 | * @ch: channel of the queue pair. | |
977 | * @qp: queue pair to change the state of. | |
978 | * | |
979 | * Returns zero upon success and a negative value upon failure. | |
980 | * | |
981 | * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. | |
982 | * If this structure ever becomes larger, it might be necessary to allocate | |
983 | * it dynamically instead of on the stack. | |
984 | */ | |
985 | static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) | |
986 | { | |
987 | struct ib_qp_attr qp_attr; | |
988 | int attr_mask; | |
989 | int ret; | |
990 | ||
991 | qp_attr.qp_state = IB_QPS_RTR; | |
992 | ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); | |
993 | if (ret) | |
994 | goto out; | |
995 | ||
996 | qp_attr.max_dest_rd_atomic = 4; | |
997 | ||
998 | ret = ib_modify_qp(qp, &qp_attr, attr_mask); | |
999 | ||
1000 | out: | |
1001 | return ret; | |
1002 | } | |
1003 | ||
1004 | /** | |
1005 | * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS). | |
1006 | * @ch: channel of the queue pair. | |
1007 | * @qp: queue pair to change the state of. | |
1008 | * | |
1009 | * Returns zero upon success and a negative value upon failure. | |
1010 | * | |
1011 | * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. | |
1012 | * If this structure ever becomes larger, it might be necessary to allocate | |
1013 | * it dynamically instead of on the stack. | |
1014 | */ | |
1015 | static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) | |
1016 | { | |
1017 | struct ib_qp_attr qp_attr; | |
1018 | int attr_mask; | |
1019 | int ret; | |
1020 | ||
1021 | qp_attr.qp_state = IB_QPS_RTS; | |
1022 | ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); | |
1023 | if (ret) | |
1024 | goto out; | |
1025 | ||
1026 | qp_attr.max_rd_atomic = 4; | |
1027 | ||
1028 | ret = ib_modify_qp(qp, &qp_attr, attr_mask); | |
1029 | ||
1030 | out: | |
1031 | return ret; | |
1032 | } | |
1033 | ||
1034 | /** | |
1035 | * srpt_ch_qp_err() - Set the channel queue pair state to 'error'. | |
1036 | */ | |
1037 | static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) | |
1038 | { | |
1039 | struct ib_qp_attr qp_attr; | |
1040 | ||
1041 | qp_attr.qp_state = IB_QPS_ERR; | |
1042 | return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); | |
1043 | } | |
1044 | ||
1045 | /** | |
1046 | * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list. | |
1047 | */ | |
1048 | static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch, | |
1049 | struct srpt_send_ioctx *ioctx) | |
1050 | { | |
1051 | struct scatterlist *sg; | |
1052 | enum dma_data_direction dir; | |
1053 | ||
1054 | BUG_ON(!ch); | |
1055 | BUG_ON(!ioctx); | |
1056 | BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius); | |
1057 | ||
1058 | while (ioctx->n_rdma) | |
1059 | kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge); | |
1060 | ||
1061 | kfree(ioctx->rdma_ius); | |
1062 | ioctx->rdma_ius = NULL; | |
1063 | ||
1064 | if (ioctx->mapped_sg_count) { | |
1065 | sg = ioctx->sg; | |
1066 | WARN_ON(!sg); | |
1067 | dir = ioctx->cmd.data_direction; | |
1068 | BUG_ON(dir == DMA_NONE); | |
1069 | ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt, | |
1070 | opposite_dma_dir(dir)); | |
1071 | ioctx->mapped_sg_count = 0; | |
1072 | } | |
1073 | } | |
1074 | ||
1075 | /** | |
1076 | * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list. | |
1077 | */ | |
1078 | static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, | |
1079 | struct srpt_send_ioctx *ioctx) | |
1080 | { | |
1081 | struct se_cmd *cmd; | |
1082 | struct scatterlist *sg, *sg_orig; | |
1083 | int sg_cnt; | |
1084 | enum dma_data_direction dir; | |
1085 | struct rdma_iu *riu; | |
1086 | struct srp_direct_buf *db; | |
1087 | dma_addr_t dma_addr; | |
1088 | struct ib_sge *sge; | |
1089 | u64 raddr; | |
1090 | u32 rsize; | |
1091 | u32 tsize; | |
1092 | u32 dma_len; | |
1093 | int count, nrdma; | |
1094 | int i, j, k; | |
1095 | ||
1096 | BUG_ON(!ch); | |
1097 | BUG_ON(!ioctx); | |
1098 | cmd = &ioctx->cmd; | |
1099 | dir = cmd->data_direction; | |
1100 | BUG_ON(dir == DMA_NONE); | |
1101 | ||
6f9e7f01 RD |
1102 | ioctx->sg = sg = sg_orig = cmd->t_data_sg; |
1103 | ioctx->sg_cnt = sg_cnt = cmd->t_data_nents; | |
a42d985b BVA |
1104 | |
1105 | count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt, | |
1106 | opposite_dma_dir(dir)); | |
1107 | if (unlikely(!count)) | |
1108 | return -EAGAIN; | |
1109 | ||
1110 | ioctx->mapped_sg_count = count; | |
1111 | ||
1112 | if (ioctx->rdma_ius && ioctx->n_rdma_ius) | |
1113 | nrdma = ioctx->n_rdma_ius; | |
1114 | else { | |
1115 | nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE | |
1116 | + ioctx->n_rbuf; | |
1117 | ||
1118 | ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL); | |
1119 | if (!ioctx->rdma_ius) | |
1120 | goto free_mem; | |
1121 | ||
1122 | ioctx->n_rdma_ius = nrdma; | |
1123 | } | |
1124 | ||
1125 | db = ioctx->rbufs; | |
1126 | tsize = cmd->data_length; | |
1127 | dma_len = sg_dma_len(&sg[0]); | |
1128 | riu = ioctx->rdma_ius; | |
1129 | ||
1130 | /* | |
1131 | * For each remote desc - calculate the #ib_sge. | |
1132 | * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then | |
1133 | * each remote desc rdma_iu is required a rdma wr; | |
1134 | * else | |
1135 | * we need to allocate extra rdma_iu to carry extra #ib_sge in | |
1136 | * another rdma wr | |
1137 | */ | |
1138 | for (i = 0, j = 0; | |
1139 | j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { | |
1140 | rsize = be32_to_cpu(db->len); | |
1141 | raddr = be64_to_cpu(db->va); | |
1142 | riu->raddr = raddr; | |
1143 | riu->rkey = be32_to_cpu(db->key); | |
1144 | riu->sge_cnt = 0; | |
1145 | ||
1146 | /* calculate how many sge required for this remote_buf */ | |
1147 | while (rsize > 0 && tsize > 0) { | |
1148 | ||
1149 | if (rsize >= dma_len) { | |
1150 | tsize -= dma_len; | |
1151 | rsize -= dma_len; | |
1152 | raddr += dma_len; | |
1153 | ||
1154 | if (tsize > 0) { | |
1155 | ++j; | |
1156 | if (j < count) { | |
1157 | sg = sg_next(sg); | |
1158 | dma_len = sg_dma_len(sg); | |
1159 | } | |
1160 | } | |
1161 | } else { | |
1162 | tsize -= rsize; | |
1163 | dma_len -= rsize; | |
1164 | rsize = 0; | |
1165 | } | |
1166 | ||
1167 | ++riu->sge_cnt; | |
1168 | ||
1169 | if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) { | |
1170 | ++ioctx->n_rdma; | |
1171 | riu->sge = | |
1172 | kmalloc(riu->sge_cnt * sizeof *riu->sge, | |
1173 | GFP_KERNEL); | |
1174 | if (!riu->sge) | |
1175 | goto free_mem; | |
1176 | ||
1177 | ++riu; | |
1178 | riu->sge_cnt = 0; | |
1179 | riu->raddr = raddr; | |
1180 | riu->rkey = be32_to_cpu(db->key); | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | ++ioctx->n_rdma; | |
1185 | riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge, | |
1186 | GFP_KERNEL); | |
1187 | if (!riu->sge) | |
1188 | goto free_mem; | |
1189 | } | |
1190 | ||
1191 | db = ioctx->rbufs; | |
1192 | tsize = cmd->data_length; | |
1193 | riu = ioctx->rdma_ius; | |
1194 | sg = sg_orig; | |
1195 | dma_len = sg_dma_len(&sg[0]); | |
1196 | dma_addr = sg_dma_address(&sg[0]); | |
1197 | ||
1198 | /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ | |
1199 | for (i = 0, j = 0; | |
1200 | j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { | |
1201 | rsize = be32_to_cpu(db->len); | |
1202 | sge = riu->sge; | |
1203 | k = 0; | |
1204 | ||
1205 | while (rsize > 0 && tsize > 0) { | |
1206 | sge->addr = dma_addr; | |
1207 | sge->lkey = ch->sport->sdev->mr->lkey; | |
1208 | ||
1209 | if (rsize >= dma_len) { | |
1210 | sge->length = | |
1211 | (tsize < dma_len) ? tsize : dma_len; | |
1212 | tsize -= dma_len; | |
1213 | rsize -= dma_len; | |
1214 | ||
1215 | if (tsize > 0) { | |
1216 | ++j; | |
1217 | if (j < count) { | |
1218 | sg = sg_next(sg); | |
1219 | dma_len = sg_dma_len(sg); | |
1220 | dma_addr = sg_dma_address(sg); | |
1221 | } | |
1222 | } | |
1223 | } else { | |
1224 | sge->length = (tsize < rsize) ? tsize : rsize; | |
1225 | tsize -= rsize; | |
1226 | dma_len -= rsize; | |
1227 | dma_addr += rsize; | |
1228 | rsize = 0; | |
1229 | } | |
1230 | ||
1231 | ++k; | |
1232 | if (k == riu->sge_cnt && rsize > 0 && tsize > 0) { | |
1233 | ++riu; | |
1234 | sge = riu->sge; | |
1235 | k = 0; | |
1236 | } else if (rsize > 0 && tsize > 0) | |
1237 | ++sge; | |
1238 | } | |
1239 | } | |
1240 | ||
1241 | return 0; | |
1242 | ||
1243 | free_mem: | |
1244 | srpt_unmap_sg_to_ib_sge(ch, ioctx); | |
1245 | ||
1246 | return -ENOMEM; | |
1247 | } | |
1248 | ||
1249 | /** | |
1250 | * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator. | |
1251 | */ | |
1252 | static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) | |
1253 | { | |
1254 | struct srpt_send_ioctx *ioctx; | |
1255 | unsigned long flags; | |
1256 | ||
1257 | BUG_ON(!ch); | |
1258 | ||
1259 | ioctx = NULL; | |
1260 | spin_lock_irqsave(&ch->spinlock, flags); | |
1261 | if (!list_empty(&ch->free_list)) { | |
1262 | ioctx = list_first_entry(&ch->free_list, | |
1263 | struct srpt_send_ioctx, free_list); | |
1264 | list_del(&ioctx->free_list); | |
1265 | } | |
1266 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
1267 | ||
1268 | if (!ioctx) | |
1269 | return ioctx; | |
1270 | ||
1271 | BUG_ON(ioctx->ch != ch); | |
a42d985b BVA |
1272 | spin_lock_init(&ioctx->spinlock); |
1273 | ioctx->state = SRPT_STATE_NEW; | |
1274 | ioctx->n_rbuf = 0; | |
1275 | ioctx->rbufs = NULL; | |
1276 | ioctx->n_rdma = 0; | |
1277 | ioctx->n_rdma_ius = 0; | |
1278 | ioctx->rdma_ius = NULL; | |
1279 | ioctx->mapped_sg_count = 0; | |
1280 | init_completion(&ioctx->tx_done); | |
1281 | ioctx->queue_status_only = false; | |
1282 | /* | |
1283 | * transport_init_se_cmd() does not initialize all fields, so do it | |
1284 | * here. | |
1285 | */ | |
1286 | memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); | |
1287 | memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); | |
1288 | ||
1289 | return ioctx; | |
1290 | } | |
1291 | ||
a42d985b BVA |
1292 | /** |
1293 | * srpt_abort_cmd() - Abort a SCSI command. | |
1294 | * @ioctx: I/O context associated with the SCSI command. | |
1295 | * @context: Preferred execution context. | |
1296 | */ | |
1297 | static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) | |
1298 | { | |
1299 | enum srpt_command_state state; | |
1300 | unsigned long flags; | |
1301 | ||
1302 | BUG_ON(!ioctx); | |
1303 | ||
1304 | /* | |
1305 | * If the command is in a state where the target core is waiting for | |
1306 | * the ib_srpt driver, change the state to the next state. Changing | |
1307 | * the state of the command from SRPT_STATE_NEED_DATA to | |
1308 | * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this | |
1309 | * function a second time. | |
1310 | */ | |
1311 | ||
1312 | spin_lock_irqsave(&ioctx->spinlock, flags); | |
1313 | state = ioctx->state; | |
1314 | switch (state) { | |
1315 | case SRPT_STATE_NEED_DATA: | |
1316 | ioctx->state = SRPT_STATE_DATA_IN; | |
1317 | break; | |
1318 | case SRPT_STATE_DATA_IN: | |
1319 | case SRPT_STATE_CMD_RSP_SENT: | |
1320 | case SRPT_STATE_MGMT_RSP_SENT: | |
1321 | ioctx->state = SRPT_STATE_DONE; | |
1322 | break; | |
1323 | default: | |
1324 | break; | |
1325 | } | |
1326 | spin_unlock_irqrestore(&ioctx->spinlock, flags); | |
1327 | ||
9474b043 NB |
1328 | if (state == SRPT_STATE_DONE) { |
1329 | struct srpt_rdma_ch *ch = ioctx->ch; | |
1330 | ||
1331 | BUG_ON(ch->sess == NULL); | |
1332 | ||
1333 | target_put_sess_cmd(ch->sess, &ioctx->cmd); | |
a42d985b | 1334 | goto out; |
9474b043 | 1335 | } |
a42d985b BVA |
1336 | |
1337 | pr_debug("Aborting cmd with state %d and tag %lld\n", state, | |
1338 | ioctx->tag); | |
1339 | ||
1340 | switch (state) { | |
1341 | case SRPT_STATE_NEW: | |
1342 | case SRPT_STATE_DATA_IN: | |
1343 | case SRPT_STATE_MGMT: | |
1344 | /* | |
1345 | * Do nothing - defer abort processing until | |
1346 | * srpt_queue_response() is invoked. | |
1347 | */ | |
1348 | WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false)); | |
1349 | break; | |
1350 | case SRPT_STATE_NEED_DATA: | |
1351 | /* DMA_TO_DEVICE (write) - RDMA read error. */ | |
e672a47f CH |
1352 | |
1353 | /* XXX(hch): this is a horrible layering violation.. */ | |
7d680f3b | 1354 | spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); |
e672a47f | 1355 | ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; |
7d680f3b | 1356 | spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); |
a42d985b BVA |
1357 | break; |
1358 | case SRPT_STATE_CMD_RSP_SENT: | |
1359 | /* | |
1360 | * SRP_RSP sending failed or the SRP_RSP send completion has | |
1361 | * not been received in time. | |
1362 | */ | |
1363 | srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); | |
9474b043 | 1364 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); |
a42d985b BVA |
1365 | break; |
1366 | case SRPT_STATE_MGMT_RSP_SENT: | |
1367 | srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); | |
9474b043 | 1368 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); |
a42d985b BVA |
1369 | break; |
1370 | default: | |
532ec6f1 | 1371 | WARN(1, "Unexpected command state (%d)", state); |
a42d985b BVA |
1372 | break; |
1373 | } | |
1374 | ||
1375 | out: | |
1376 | return state; | |
1377 | } | |
1378 | ||
1379 | /** | |
1380 | * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion. | |
1381 | */ | |
1382 | static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) | |
1383 | { | |
1384 | struct srpt_send_ioctx *ioctx; | |
1385 | enum srpt_command_state state; | |
1386 | struct se_cmd *cmd; | |
1387 | u32 index; | |
1388 | ||
1389 | atomic_inc(&ch->sq_wr_avail); | |
1390 | ||
1391 | index = idx_from_wr_id(wr_id); | |
1392 | ioctx = ch->ioctx_ring[index]; | |
1393 | state = srpt_get_cmd_state(ioctx); | |
1394 | cmd = &ioctx->cmd; | |
1395 | ||
1396 | WARN_ON(state != SRPT_STATE_CMD_RSP_SENT | |
1397 | && state != SRPT_STATE_MGMT_RSP_SENT | |
1398 | && state != SRPT_STATE_NEED_DATA | |
1399 | && state != SRPT_STATE_DONE); | |
1400 | ||
1401 | /* If SRP_RSP sending failed, undo the ch->req_lim change. */ | |
1402 | if (state == SRPT_STATE_CMD_RSP_SENT | |
1403 | || state == SRPT_STATE_MGMT_RSP_SENT) | |
1404 | atomic_dec(&ch->req_lim); | |
1405 | ||
1406 | srpt_abort_cmd(ioctx); | |
1407 | } | |
1408 | ||
1409 | /** | |
1410 | * srpt_handle_send_comp() - Process an IB send completion notification. | |
1411 | */ | |
1412 | static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, | |
1413 | struct srpt_send_ioctx *ioctx) | |
1414 | { | |
1415 | enum srpt_command_state state; | |
1416 | ||
1417 | atomic_inc(&ch->sq_wr_avail); | |
1418 | ||
1419 | state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); | |
1420 | ||
1421 | if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT | |
1422 | && state != SRPT_STATE_MGMT_RSP_SENT | |
1423 | && state != SRPT_STATE_DONE)) | |
1424 | pr_debug("state = %d\n", state); | |
1425 | ||
9474b043 NB |
1426 | if (state != SRPT_STATE_DONE) { |
1427 | srpt_unmap_sg_to_ib_sge(ch, ioctx); | |
1428 | transport_generic_free_cmd(&ioctx->cmd, 0); | |
1429 | } else { | |
a42d985b BVA |
1430 | printk(KERN_ERR "IB completion has been received too late for" |
1431 | " wr_id = %u.\n", ioctx->ioctx.index); | |
9474b043 | 1432 | } |
a42d985b BVA |
1433 | } |
1434 | ||
1435 | /** | |
1436 | * srpt_handle_rdma_comp() - Process an IB RDMA completion notification. | |
1437 | * | |
e672a47f CH |
1438 | * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping |
1439 | * the data that has been transferred via IB RDMA had to be postponed until the | |
142ad5db | 1440 | * check_stop_free() callback. None of this is necessary anymore and needs to |
e672a47f | 1441 | * be cleaned up. |
a42d985b BVA |
1442 | */ |
1443 | static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, | |
1444 | struct srpt_send_ioctx *ioctx, | |
1445 | enum srpt_opcode opcode) | |
1446 | { | |
1447 | WARN_ON(ioctx->n_rdma <= 0); | |
1448 | atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); | |
1449 | ||
1450 | if (opcode == SRPT_RDMA_READ_LAST) { | |
1451 | if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, | |
1452 | SRPT_STATE_DATA_IN)) | |
e672a47f | 1453 | target_execute_cmd(&ioctx->cmd); |
a42d985b BVA |
1454 | else |
1455 | printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, | |
1456 | __LINE__, srpt_get_cmd_state(ioctx)); | |
1457 | } else if (opcode == SRPT_RDMA_ABORT) { | |
1458 | ioctx->rdma_aborted = true; | |
1459 | } else { | |
1460 | WARN(true, "unexpected opcode %d\n", opcode); | |
1461 | } | |
1462 | } | |
1463 | ||
1464 | /** | |
1465 | * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion. | |
1466 | */ | |
1467 | static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, | |
1468 | struct srpt_send_ioctx *ioctx, | |
1469 | enum srpt_opcode opcode) | |
1470 | { | |
1471 | struct se_cmd *cmd; | |
1472 | enum srpt_command_state state; | |
1473 | ||
1474 | cmd = &ioctx->cmd; | |
1475 | state = srpt_get_cmd_state(ioctx); | |
1476 | switch (opcode) { | |
1477 | case SRPT_RDMA_READ_LAST: | |
1478 | if (ioctx->n_rdma <= 0) { | |
1479 | printk(KERN_ERR "Received invalid RDMA read" | |
1480 | " error completion with idx %d\n", | |
1481 | ioctx->ioctx.index); | |
1482 | break; | |
1483 | } | |
1484 | atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); | |
1485 | if (state == SRPT_STATE_NEED_DATA) | |
1486 | srpt_abort_cmd(ioctx); | |
1487 | else | |
1488 | printk(KERN_ERR "%s[%d]: wrong state = %d\n", | |
1489 | __func__, __LINE__, state); | |
1490 | break; | |
1491 | case SRPT_RDMA_WRITE_LAST: | |
a42d985b BVA |
1492 | break; |
1493 | default: | |
1494 | printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, | |
1495 | __LINE__, opcode); | |
1496 | break; | |
1497 | } | |
1498 | } | |
1499 | ||
1500 | /** | |
1501 | * srpt_build_cmd_rsp() - Build an SRP_RSP response. | |
1502 | * @ch: RDMA channel through which the request has been received. | |
1503 | * @ioctx: I/O context associated with the SRP_CMD request. The response will | |
1504 | * be built in the buffer ioctx->buf points at and hence this function will | |
1505 | * overwrite the request data. | |
1506 | * @tag: tag of the request for which this response is being generated. | |
1507 | * @status: value for the STATUS field of the SRP_RSP information unit. | |
1508 | * | |
1509 | * Returns the size in bytes of the SRP_RSP response. | |
1510 | * | |
1511 | * An SRP_RSP response contains a SCSI status or service response. See also | |
1512 | * section 6.9 in the SRP r16a document for the format of an SRP_RSP | |
1513 | * response. See also SPC-2 for more information about sense data. | |
1514 | */ | |
1515 | static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, | |
1516 | struct srpt_send_ioctx *ioctx, u64 tag, | |
1517 | int status) | |
1518 | { | |
1519 | struct srp_rsp *srp_rsp; | |
1520 | const u8 *sense_data; | |
1521 | int sense_data_len, max_sense_len; | |
1522 | ||
1523 | /* | |
1524 | * The lowest bit of all SAM-3 status codes is zero (see also | |
1525 | * paragraph 5.3 in SAM-3). | |
1526 | */ | |
1527 | WARN_ON(status & 1); | |
1528 | ||
1529 | srp_rsp = ioctx->ioctx.buf; | |
1530 | BUG_ON(!srp_rsp); | |
1531 | ||
1532 | sense_data = ioctx->sense_data; | |
1533 | sense_data_len = ioctx->cmd.scsi_sense_length; | |
1534 | WARN_ON(sense_data_len > sizeof(ioctx->sense_data)); | |
1535 | ||
1536 | memset(srp_rsp, 0, sizeof *srp_rsp); | |
1537 | srp_rsp->opcode = SRP_RSP; | |
1538 | srp_rsp->req_lim_delta = | |
1539 | __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); | |
1540 | srp_rsp->tag = tag; | |
1541 | srp_rsp->status = status; | |
1542 | ||
1543 | if (sense_data_len) { | |
1544 | BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); | |
1545 | max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); | |
1546 | if (sense_data_len > max_sense_len) { | |
1547 | printk(KERN_WARNING "truncated sense data from %d to %d" | |
1548 | " bytes\n", sense_data_len, max_sense_len); | |
1549 | sense_data_len = max_sense_len; | |
1550 | } | |
1551 | ||
1552 | srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID; | |
1553 | srp_rsp->sense_data_len = cpu_to_be32(sense_data_len); | |
1554 | memcpy(srp_rsp + 1, sense_data, sense_data_len); | |
1555 | } | |
1556 | ||
1557 | return sizeof(*srp_rsp) + sense_data_len; | |
1558 | } | |
1559 | ||
1560 | /** | |
1561 | * srpt_build_tskmgmt_rsp() - Build a task management response. | |
1562 | * @ch: RDMA channel through which the request has been received. | |
1563 | * @ioctx: I/O context in which the SRP_RSP response will be built. | |
1564 | * @rsp_code: RSP_CODE that will be stored in the response. | |
1565 | * @tag: Tag of the request for which this response is being generated. | |
1566 | * | |
1567 | * Returns the size in bytes of the SRP_RSP response. | |
1568 | * | |
1569 | * An SRP_RSP response contains a SCSI status or service response. See also | |
1570 | * section 6.9 in the SRP r16a document for the format of an SRP_RSP | |
1571 | * response. | |
1572 | */ | |
1573 | static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, | |
1574 | struct srpt_send_ioctx *ioctx, | |
1575 | u8 rsp_code, u64 tag) | |
1576 | { | |
1577 | struct srp_rsp *srp_rsp; | |
1578 | int resp_data_len; | |
1579 | int resp_len; | |
1580 | ||
c807f643 | 1581 | resp_data_len = 4; |
a42d985b BVA |
1582 | resp_len = sizeof(*srp_rsp) + resp_data_len; |
1583 | ||
1584 | srp_rsp = ioctx->ioctx.buf; | |
1585 | BUG_ON(!srp_rsp); | |
1586 | memset(srp_rsp, 0, sizeof *srp_rsp); | |
1587 | ||
1588 | srp_rsp->opcode = SRP_RSP; | |
1589 | srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 | |
1590 | + atomic_xchg(&ch->req_lim_delta, 0)); | |
1591 | srp_rsp->tag = tag; | |
1592 | ||
c807f643 JW |
1593 | srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; |
1594 | srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); | |
1595 | srp_rsp->data[3] = rsp_code; | |
a42d985b BVA |
1596 | |
1597 | return resp_len; | |
1598 | } | |
1599 | ||
1600 | #define NO_SUCH_LUN ((uint64_t)-1LL) | |
1601 | ||
1602 | /* | |
1603 | * SCSI LUN addressing method. See also SAM-2 and the section about | |
1604 | * eight byte LUNs. | |
1605 | */ | |
1606 | enum scsi_lun_addr_method { | |
1607 | SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0, | |
1608 | SCSI_LUN_ADDR_METHOD_FLAT = 1, | |
1609 | SCSI_LUN_ADDR_METHOD_LUN = 2, | |
1610 | SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3, | |
1611 | }; | |
1612 | ||
1613 | /* | |
1614 | * srpt_unpack_lun() - Convert from network LUN to linear LUN. | |
1615 | * | |
1616 | * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte | |
1617 | * order (big endian) to a linear LUN. Supports three LUN addressing methods: | |
1618 | * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40). | |
1619 | */ | |
1620 | static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) | |
1621 | { | |
1622 | uint64_t res = NO_SUCH_LUN; | |
1623 | int addressing_method; | |
1624 | ||
1625 | if (unlikely(len < 2)) { | |
1626 | printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or " | |
1627 | "more", len); | |
1628 | goto out; | |
1629 | } | |
1630 | ||
1631 | switch (len) { | |
1632 | case 8: | |
1633 | if ((*((__be64 *)lun) & | |
1634 | __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) | |
1635 | goto out_err; | |
1636 | break; | |
1637 | case 4: | |
1638 | if (*((__be16 *)&lun[2]) != 0) | |
1639 | goto out_err; | |
1640 | break; | |
1641 | case 6: | |
1642 | if (*((__be32 *)&lun[2]) != 0) | |
1643 | goto out_err; | |
1644 | break; | |
1645 | case 2: | |
1646 | break; | |
1647 | default: | |
1648 | goto out_err; | |
1649 | } | |
1650 | ||
1651 | addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */ | |
1652 | switch (addressing_method) { | |
1653 | case SCSI_LUN_ADDR_METHOD_PERIPHERAL: | |
1654 | case SCSI_LUN_ADDR_METHOD_FLAT: | |
1655 | case SCSI_LUN_ADDR_METHOD_LUN: | |
1656 | res = *(lun + 1) | (((*lun) & 0x3f) << 8); | |
1657 | break; | |
1658 | ||
1659 | case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN: | |
1660 | default: | |
1661 | printk(KERN_ERR "Unimplemented LUN addressing method %u", | |
1662 | addressing_method); | |
1663 | break; | |
1664 | } | |
1665 | ||
1666 | out: | |
1667 | return res; | |
1668 | ||
1669 | out_err: | |
1670 | printk(KERN_ERR "Support for multi-level LUNs has not yet been" | |
1671 | " implemented"); | |
1672 | goto out; | |
1673 | } | |
1674 | ||
1675 | static int srpt_check_stop_free(struct se_cmd *cmd) | |
1676 | { | |
9474b043 NB |
1677 | struct srpt_send_ioctx *ioctx = container_of(cmd, |
1678 | struct srpt_send_ioctx, cmd); | |
a42d985b | 1679 | |
9474b043 | 1680 | return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); |
a42d985b BVA |
1681 | } |
1682 | ||
1683 | /** | |
1684 | * srpt_handle_cmd() - Process SRP_CMD. | |
1685 | */ | |
1686 | static int srpt_handle_cmd(struct srpt_rdma_ch *ch, | |
1687 | struct srpt_recv_ioctx *recv_ioctx, | |
1688 | struct srpt_send_ioctx *send_ioctx) | |
1689 | { | |
1690 | struct se_cmd *cmd; | |
1691 | struct srp_cmd *srp_cmd; | |
1692 | uint64_t unpacked_lun; | |
1693 | u64 data_len; | |
1694 | enum dma_data_direction dir; | |
de103c93 | 1695 | sense_reason_t ret; |
9474b043 | 1696 | int rc; |
a42d985b BVA |
1697 | |
1698 | BUG_ON(!send_ioctx); | |
1699 | ||
1700 | srp_cmd = recv_ioctx->ioctx.buf; | |
a42d985b BVA |
1701 | cmd = &send_ioctx->cmd; |
1702 | send_ioctx->tag = srp_cmd->tag; | |
1703 | ||
1704 | switch (srp_cmd->task_attr) { | |
1705 | case SRP_CMD_SIMPLE_Q: | |
1706 | cmd->sam_task_attr = MSG_SIMPLE_TAG; | |
1707 | break; | |
1708 | case SRP_CMD_ORDERED_Q: | |
1709 | default: | |
1710 | cmd->sam_task_attr = MSG_ORDERED_TAG; | |
1711 | break; | |
1712 | case SRP_CMD_HEAD_OF_Q: | |
1713 | cmd->sam_task_attr = MSG_HEAD_TAG; | |
1714 | break; | |
1715 | case SRP_CMD_ACA: | |
1716 | cmd->sam_task_attr = MSG_ACA_TAG; | |
1717 | break; | |
1718 | } | |
1719 | ||
de103c93 | 1720 | if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) { |
a42d985b BVA |
1721 | printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n", |
1722 | srp_cmd->tag); | |
de103c93 | 1723 | ret = TCM_INVALID_CDB_FIELD; |
a42d985b BVA |
1724 | goto send_sense; |
1725 | } | |
1726 | ||
a42d985b BVA |
1727 | unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, |
1728 | sizeof(srp_cmd->lun)); | |
9474b043 NB |
1729 | rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb, |
1730 | &send_ioctx->sense_data[0], unpacked_lun, data_len, | |
1731 | MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF); | |
1732 | if (rc != 0) { | |
1733 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
a42d985b | 1734 | goto send_sense; |
187e70a5 | 1735 | } |
a42d985b BVA |
1736 | return 0; |
1737 | ||
1738 | send_sense: | |
de103c93 | 1739 | transport_send_check_condition_and_sense(cmd, ret, 0); |
a42d985b BVA |
1740 | return -1; |
1741 | } | |
1742 | ||
1743 | /** | |
1744 | * srpt_rx_mgmt_fn_tag() - Process a task management function by tag. | |
1745 | * @ch: RDMA channel of the task management request. | |
1746 | * @fn: Task management function to perform. | |
1747 | * @req_tag: Tag of the SRP task management request. | |
1748 | * @mgmt_ioctx: I/O context of the task management request. | |
1749 | * | |
1750 | * Returns zero if the target core will process the task management | |
1751 | * request asynchronously. | |
1752 | * | |
1753 | * Note: It is assumed that the initiator serializes tag-based task management | |
1754 | * requests. | |
1755 | */ | |
1756 | static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag) | |
1757 | { | |
1758 | struct srpt_device *sdev; | |
1759 | struct srpt_rdma_ch *ch; | |
1760 | struct srpt_send_ioctx *target; | |
1761 | int ret, i; | |
1762 | ||
1763 | ret = -EINVAL; | |
1764 | ch = ioctx->ch; | |
1765 | BUG_ON(!ch); | |
1766 | BUG_ON(!ch->sport); | |
1767 | sdev = ch->sport->sdev; | |
1768 | BUG_ON(!sdev); | |
1769 | spin_lock_irq(&sdev->spinlock); | |
1770 | for (i = 0; i < ch->rq_size; ++i) { | |
1771 | target = ch->ioctx_ring[i]; | |
1772 | if (target->cmd.se_lun == ioctx->cmd.se_lun && | |
1773 | target->tag == tag && | |
1774 | srpt_get_cmd_state(target) != SRPT_STATE_DONE) { | |
1775 | ret = 0; | |
1776 | /* now let the target core abort &target->cmd; */ | |
1777 | break; | |
1778 | } | |
1779 | } | |
1780 | spin_unlock_irq(&sdev->spinlock); | |
1781 | return ret; | |
1782 | } | |
1783 | ||
1784 | static int srp_tmr_to_tcm(int fn) | |
1785 | { | |
1786 | switch (fn) { | |
1787 | case SRP_TSK_ABORT_TASK: | |
1788 | return TMR_ABORT_TASK; | |
1789 | case SRP_TSK_ABORT_TASK_SET: | |
1790 | return TMR_ABORT_TASK_SET; | |
1791 | case SRP_TSK_CLEAR_TASK_SET: | |
1792 | return TMR_CLEAR_TASK_SET; | |
1793 | case SRP_TSK_LUN_RESET: | |
1794 | return TMR_LUN_RESET; | |
1795 | case SRP_TSK_CLEAR_ACA: | |
1796 | return TMR_CLEAR_ACA; | |
1797 | default: | |
1798 | return -1; | |
1799 | } | |
1800 | } | |
1801 | ||
1802 | /** | |
1803 | * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit. | |
1804 | * | |
1805 | * Returns 0 if and only if the request will be processed by the target core. | |
1806 | * | |
1807 | * For more information about SRP_TSK_MGMT information units, see also section | |
1808 | * 6.7 in the SRP r16a document. | |
1809 | */ | |
1810 | static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, | |
1811 | struct srpt_recv_ioctx *recv_ioctx, | |
1812 | struct srpt_send_ioctx *send_ioctx) | |
1813 | { | |
1814 | struct srp_tsk_mgmt *srp_tsk; | |
1815 | struct se_cmd *cmd; | |
3e4f5748 | 1816 | struct se_session *sess = ch->sess; |
a42d985b | 1817 | uint64_t unpacked_lun; |
3e4f5748 | 1818 | uint32_t tag = 0; |
a42d985b | 1819 | int tcm_tmr; |
3e4f5748 | 1820 | int rc; |
a42d985b BVA |
1821 | |
1822 | BUG_ON(!send_ioctx); | |
1823 | ||
1824 | srp_tsk = recv_ioctx->ioctx.buf; | |
1825 | cmd = &send_ioctx->cmd; | |
1826 | ||
1827 | pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld" | |
1828 | " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func, | |
1829 | srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); | |
1830 | ||
1831 | srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); | |
1832 | send_ioctx->tag = srp_tsk->tag; | |
1833 | tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); | |
1834 | if (tcm_tmr < 0) { | |
a42d985b BVA |
1835 | send_ioctx->cmd.se_tmr_req->response = |
1836 | TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | |
de103c93 | 1837 | goto fail; |
a42d985b | 1838 | } |
a42d985b BVA |
1839 | unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun, |
1840 | sizeof(srp_tsk->lun)); | |
3e4f5748 NB |
1841 | |
1842 | if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) { | |
1843 | rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag); | |
1844 | if (rc < 0) { | |
1845 | send_ioctx->cmd.se_tmr_req->response = | |
1846 | TMR_TASK_DOES_NOT_EXIST; | |
1847 | goto fail; | |
1848 | } | |
1849 | tag = srp_tsk->task_tag; | |
1850 | } | |
1851 | rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun, | |
1852 | srp_tsk, tcm_tmr, GFP_KERNEL, tag, | |
1853 | TARGET_SCF_ACK_KREF); | |
1854 | if (rc != 0) { | |
1855 | send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; | |
de103c93 | 1856 | goto fail; |
a42d985b | 1857 | } |
de103c93 CH |
1858 | return; |
1859 | fail: | |
de103c93 | 1860 | transport_send_check_condition_and_sense(cmd, 0, 0); // XXX: |
a42d985b BVA |
1861 | } |
1862 | ||
1863 | /** | |
1864 | * srpt_handle_new_iu() - Process a newly received information unit. | |
1865 | * @ch: RDMA channel through which the information unit has been received. | |
1866 | * @ioctx: SRPT I/O context associated with the information unit. | |
1867 | */ | |
1868 | static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, | |
1869 | struct srpt_recv_ioctx *recv_ioctx, | |
1870 | struct srpt_send_ioctx *send_ioctx) | |
1871 | { | |
1872 | struct srp_cmd *srp_cmd; | |
1873 | enum rdma_ch_state ch_state; | |
1874 | ||
1875 | BUG_ON(!ch); | |
1876 | BUG_ON(!recv_ioctx); | |
1877 | ||
1878 | ib_dma_sync_single_for_cpu(ch->sport->sdev->device, | |
1879 | recv_ioctx->ioctx.dma, srp_max_req_size, | |
1880 | DMA_FROM_DEVICE); | |
1881 | ||
1882 | ch_state = srpt_get_ch_state(ch); | |
1883 | if (unlikely(ch_state == CH_CONNECTING)) { | |
1884 | list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list); | |
1885 | goto out; | |
1886 | } | |
1887 | ||
1888 | if (unlikely(ch_state != CH_LIVE)) | |
1889 | goto out; | |
1890 | ||
1891 | srp_cmd = recv_ioctx->ioctx.buf; | |
1892 | if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) { | |
1893 | if (!send_ioctx) | |
1894 | send_ioctx = srpt_get_send_ioctx(ch); | |
1895 | if (unlikely(!send_ioctx)) { | |
1896 | list_add_tail(&recv_ioctx->wait_list, | |
1897 | &ch->cmd_wait_list); | |
1898 | goto out; | |
1899 | } | |
1900 | } | |
1901 | ||
a42d985b BVA |
1902 | switch (srp_cmd->opcode) { |
1903 | case SRP_CMD: | |
1904 | srpt_handle_cmd(ch, recv_ioctx, send_ioctx); | |
1905 | break; | |
1906 | case SRP_TSK_MGMT: | |
1907 | srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); | |
1908 | break; | |
1909 | case SRP_I_LOGOUT: | |
1910 | printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n"); | |
1911 | break; | |
1912 | case SRP_CRED_RSP: | |
1913 | pr_debug("received SRP_CRED_RSP\n"); | |
1914 | break; | |
1915 | case SRP_AER_RSP: | |
1916 | pr_debug("received SRP_AER_RSP\n"); | |
1917 | break; | |
1918 | case SRP_RSP: | |
1919 | printk(KERN_ERR "Received SRP_RSP\n"); | |
1920 | break; | |
1921 | default: | |
1922 | printk(KERN_ERR "received IU with unknown opcode 0x%x\n", | |
1923 | srp_cmd->opcode); | |
1924 | break; | |
1925 | } | |
1926 | ||
1927 | srpt_post_recv(ch->sport->sdev, recv_ioctx); | |
1928 | out: | |
1929 | return; | |
1930 | } | |
1931 | ||
1932 | static void srpt_process_rcv_completion(struct ib_cq *cq, | |
1933 | struct srpt_rdma_ch *ch, | |
1934 | struct ib_wc *wc) | |
1935 | { | |
1936 | struct srpt_device *sdev = ch->sport->sdev; | |
1937 | struct srpt_recv_ioctx *ioctx; | |
1938 | u32 index; | |
1939 | ||
1940 | index = idx_from_wr_id(wc->wr_id); | |
1941 | if (wc->status == IB_WC_SUCCESS) { | |
1942 | int req_lim; | |
1943 | ||
1944 | req_lim = atomic_dec_return(&ch->req_lim); | |
1945 | if (unlikely(req_lim < 0)) | |
1946 | printk(KERN_ERR "req_lim = %d < 0\n", req_lim); | |
1947 | ioctx = sdev->ioctx_ring[index]; | |
1948 | srpt_handle_new_iu(ch, ioctx, NULL); | |
1949 | } else { | |
1950 | printk(KERN_INFO "receiving failed for idx %u with status %d\n", | |
1951 | index, wc->status); | |
1952 | } | |
1953 | } | |
1954 | ||
1955 | /** | |
1956 | * srpt_process_send_completion() - Process an IB send completion. | |
1957 | * | |
1958 | * Note: Although this has not yet been observed during tests, at least in | |
1959 | * theory it is possible that the srpt_get_send_ioctx() call invoked by | |
1960 | * srpt_handle_new_iu() fails. This is possible because the req_lim_delta | |
1961 | * value in each response is set to one, and it is possible that this response | |
1962 | * makes the initiator send a new request before the send completion for that | |
1963 | * response has been processed. This could e.g. happen if the call to | |
1964 | * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or | |
1965 | * if IB retransmission causes generation of the send completion to be | |
1966 | * delayed. Incoming information units for which srpt_get_send_ioctx() fails | |
1967 | * are queued on cmd_wait_list. The code below processes these delayed | |
1968 | * requests one at a time. | |
1969 | */ | |
1970 | static void srpt_process_send_completion(struct ib_cq *cq, | |
1971 | struct srpt_rdma_ch *ch, | |
1972 | struct ib_wc *wc) | |
1973 | { | |
1974 | struct srpt_send_ioctx *send_ioctx; | |
1975 | uint32_t index; | |
1976 | enum srpt_opcode opcode; | |
1977 | ||
1978 | index = idx_from_wr_id(wc->wr_id); | |
1979 | opcode = opcode_from_wr_id(wc->wr_id); | |
1980 | send_ioctx = ch->ioctx_ring[index]; | |
1981 | if (wc->status == IB_WC_SUCCESS) { | |
1982 | if (opcode == SRPT_SEND) | |
1983 | srpt_handle_send_comp(ch, send_ioctx); | |
1984 | else { | |
1985 | WARN_ON(opcode != SRPT_RDMA_ABORT && | |
1986 | wc->opcode != IB_WC_RDMA_READ); | |
1987 | srpt_handle_rdma_comp(ch, send_ioctx, opcode); | |
1988 | } | |
1989 | } else { | |
1990 | if (opcode == SRPT_SEND) { | |
1991 | printk(KERN_INFO "sending response for idx %u failed" | |
1992 | " with status %d\n", index, wc->status); | |
1993 | srpt_handle_send_err_comp(ch, wc->wr_id); | |
1994 | } else if (opcode != SRPT_RDMA_MID) { | |
1995 | printk(KERN_INFO "RDMA t %d for idx %u failed with" | |
1996 | " status %d", opcode, index, wc->status); | |
1997 | srpt_handle_rdma_err_comp(ch, send_ioctx, opcode); | |
1998 | } | |
1999 | } | |
2000 | ||
2001 | while (unlikely(opcode == SRPT_SEND | |
2002 | && !list_empty(&ch->cmd_wait_list) | |
2003 | && srpt_get_ch_state(ch) == CH_LIVE | |
2004 | && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) { | |
2005 | struct srpt_recv_ioctx *recv_ioctx; | |
2006 | ||
2007 | recv_ioctx = list_first_entry(&ch->cmd_wait_list, | |
2008 | struct srpt_recv_ioctx, | |
2009 | wait_list); | |
2010 | list_del(&recv_ioctx->wait_list); | |
2011 | srpt_handle_new_iu(ch, recv_ioctx, send_ioctx); | |
2012 | } | |
2013 | } | |
2014 | ||
2015 | static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch) | |
2016 | { | |
2017 | struct ib_wc *const wc = ch->wc; | |
2018 | int i, n; | |
2019 | ||
2020 | WARN_ON(cq != ch->cq); | |
2021 | ||
2022 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | |
2023 | while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) { | |
2024 | for (i = 0; i < n; i++) { | |
2025 | if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV) | |
2026 | srpt_process_rcv_completion(cq, ch, &wc[i]); | |
2027 | else | |
2028 | srpt_process_send_completion(cq, ch, &wc[i]); | |
2029 | } | |
2030 | } | |
2031 | } | |
2032 | ||
2033 | /** | |
2034 | * srpt_completion() - IB completion queue callback function. | |
2035 | * | |
2036 | * Notes: | |
2037 | * - It is guaranteed that a completion handler will never be invoked | |
2038 | * concurrently on two different CPUs for the same completion queue. See also | |
2039 | * Documentation/infiniband/core_locking.txt and the implementation of | |
2040 | * handle_edge_irq() in kernel/irq/chip.c. | |
2041 | * - When threaded IRQs are enabled, completion handlers are invoked in thread | |
2042 | * context instead of interrupt context. | |
2043 | */ | |
2044 | static void srpt_completion(struct ib_cq *cq, void *ctx) | |
2045 | { | |
2046 | struct srpt_rdma_ch *ch = ctx; | |
2047 | ||
2048 | wake_up_interruptible(&ch->wait_queue); | |
2049 | } | |
2050 | ||
2051 | static int srpt_compl_thread(void *arg) | |
2052 | { | |
2053 | struct srpt_rdma_ch *ch; | |
2054 | ||
2055 | /* Hibernation / freezing of the SRPT kernel thread is not supported. */ | |
2056 | current->flags |= PF_NOFREEZE; | |
2057 | ||
2058 | ch = arg; | |
2059 | BUG_ON(!ch); | |
2060 | printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n", | |
2061 | ch->sess_name, ch->thread->comm, current->pid); | |
2062 | while (!kthread_should_stop()) { | |
2063 | wait_event_interruptible(ch->wait_queue, | |
2064 | (srpt_process_completion(ch->cq, ch), | |
2065 | kthread_should_stop())); | |
2066 | } | |
2067 | printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n", | |
2068 | ch->sess_name, ch->thread->comm, current->pid); | |
2069 | return 0; | |
2070 | } | |
2071 | ||
2072 | /** | |
2073 | * srpt_create_ch_ib() - Create receive and send completion queues. | |
2074 | */ | |
2075 | static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) | |
2076 | { | |
2077 | struct ib_qp_init_attr *qp_init; | |
2078 | struct srpt_port *sport = ch->sport; | |
2079 | struct srpt_device *sdev = sport->sdev; | |
2080 | u32 srp_sq_size = sport->port_attrib.srp_sq_size; | |
2081 | int ret; | |
2082 | ||
2083 | WARN_ON(ch->rq_size < 1); | |
2084 | ||
2085 | ret = -ENOMEM; | |
2086 | qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL); | |
2087 | if (!qp_init) | |
2088 | goto out; | |
2089 | ||
2090 | ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, | |
2091 | ch->rq_size + srp_sq_size, 0); | |
2092 | if (IS_ERR(ch->cq)) { | |
2093 | ret = PTR_ERR(ch->cq); | |
2094 | printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n", | |
2095 | ch->rq_size + srp_sq_size, ret); | |
2096 | goto out; | |
2097 | } | |
2098 | ||
2099 | qp_init->qp_context = (void *)ch; | |
2100 | qp_init->event_handler | |
2101 | = (void(*)(struct ib_event *, void*))srpt_qp_event; | |
2102 | qp_init->send_cq = ch->cq; | |
2103 | qp_init->recv_cq = ch->cq; | |
2104 | qp_init->srq = sdev->srq; | |
2105 | qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; | |
2106 | qp_init->qp_type = IB_QPT_RC; | |
2107 | qp_init->cap.max_send_wr = srp_sq_size; | |
2108 | qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; | |
2109 | ||
2110 | ch->qp = ib_create_qp(sdev->pd, qp_init); | |
2111 | if (IS_ERR(ch->qp)) { | |
2112 | ret = PTR_ERR(ch->qp); | |
2113 | printk(KERN_ERR "failed to create_qp ret= %d\n", ret); | |
2114 | goto err_destroy_cq; | |
2115 | } | |
2116 | ||
2117 | atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr); | |
2118 | ||
2119 | pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", | |
2120 | __func__, ch->cq->cqe, qp_init->cap.max_send_sge, | |
2121 | qp_init->cap.max_send_wr, ch->cm_id); | |
2122 | ||
2123 | ret = srpt_init_ch_qp(ch, ch->qp); | |
2124 | if (ret) | |
2125 | goto err_destroy_qp; | |
2126 | ||
2127 | init_waitqueue_head(&ch->wait_queue); | |
2128 | ||
2129 | pr_debug("creating thread for session %s\n", ch->sess_name); | |
2130 | ||
2131 | ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl"); | |
2132 | if (IS_ERR(ch->thread)) { | |
2133 | printk(KERN_ERR "failed to create kernel thread %ld\n", | |
2134 | PTR_ERR(ch->thread)); | |
2135 | ch->thread = NULL; | |
2136 | goto err_destroy_qp; | |
2137 | } | |
2138 | ||
2139 | out: | |
2140 | kfree(qp_init); | |
2141 | return ret; | |
2142 | ||
2143 | err_destroy_qp: | |
2144 | ib_destroy_qp(ch->qp); | |
2145 | err_destroy_cq: | |
2146 | ib_destroy_cq(ch->cq); | |
2147 | goto out; | |
2148 | } | |
2149 | ||
2150 | static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) | |
2151 | { | |
2152 | if (ch->thread) | |
2153 | kthread_stop(ch->thread); | |
2154 | ||
2155 | ib_destroy_qp(ch->qp); | |
2156 | ib_destroy_cq(ch->cq); | |
2157 | } | |
2158 | ||
2159 | /** | |
2160 | * __srpt_close_ch() - Close an RDMA channel by setting the QP error state. | |
2161 | * | |
2162 | * Reset the QP and make sure all resources associated with the channel will | |
2163 | * be deallocated at an appropriate time. | |
2164 | * | |
2165 | * Note: The caller must hold ch->sport->sdev->spinlock. | |
2166 | */ | |
2167 | static void __srpt_close_ch(struct srpt_rdma_ch *ch) | |
2168 | { | |
2169 | struct srpt_device *sdev; | |
2170 | enum rdma_ch_state prev_state; | |
2171 | unsigned long flags; | |
2172 | ||
2173 | sdev = ch->sport->sdev; | |
2174 | ||
2175 | spin_lock_irqsave(&ch->spinlock, flags); | |
2176 | prev_state = ch->state; | |
2177 | switch (prev_state) { | |
2178 | case CH_CONNECTING: | |
2179 | case CH_LIVE: | |
2180 | ch->state = CH_DISCONNECTING; | |
2181 | break; | |
2182 | default: | |
2183 | break; | |
2184 | } | |
2185 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
2186 | ||
2187 | switch (prev_state) { | |
2188 | case CH_CONNECTING: | |
2189 | ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, | |
2190 | NULL, 0); | |
2191 | /* fall through */ | |
2192 | case CH_LIVE: | |
2193 | if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) | |
2194 | printk(KERN_ERR "sending CM DREQ failed.\n"); | |
2195 | break; | |
2196 | case CH_DISCONNECTING: | |
2197 | break; | |
2198 | case CH_DRAINING: | |
2199 | case CH_RELEASING: | |
2200 | break; | |
2201 | } | |
2202 | } | |
2203 | ||
2204 | /** | |
2205 | * srpt_close_ch() - Close an RDMA channel. | |
2206 | */ | |
2207 | static void srpt_close_ch(struct srpt_rdma_ch *ch) | |
2208 | { | |
2209 | struct srpt_device *sdev; | |
2210 | ||
2211 | sdev = ch->sport->sdev; | |
2212 | spin_lock_irq(&sdev->spinlock); | |
2213 | __srpt_close_ch(ch); | |
2214 | spin_unlock_irq(&sdev->spinlock); | |
2215 | } | |
2216 | ||
1d19f780 NB |
2217 | /** |
2218 | * srpt_shutdown_session() - Whether or not a session may be shut down. | |
2219 | */ | |
2220 | static int srpt_shutdown_session(struct se_session *se_sess) | |
2221 | { | |
2222 | struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; | |
2223 | unsigned long flags; | |
2224 | ||
2225 | spin_lock_irqsave(&ch->spinlock, flags); | |
2226 | if (ch->in_shutdown) { | |
2227 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
2228 | return true; | |
2229 | } | |
2230 | ||
2231 | ch->in_shutdown = true; | |
2232 | target_sess_cmd_list_set_waiting(se_sess); | |
2233 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
2234 | ||
2235 | return true; | |
2236 | } | |
2237 | ||
a42d985b BVA |
2238 | /** |
2239 | * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. | |
2240 | * @cm_id: Pointer to the CM ID of the channel to be drained. | |
2241 | * | |
2242 | * Note: Must be called from inside srpt_cm_handler to avoid a race between | |
2243 | * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one() | |
2244 | * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one() | |
2245 | * waits until all target sessions for the associated IB device have been | |
2246 | * unregistered and target session registration involves a call to | |
2247 | * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until | |
2248 | * this function has finished). | |
2249 | */ | |
2250 | static void srpt_drain_channel(struct ib_cm_id *cm_id) | |
2251 | { | |
2252 | struct srpt_device *sdev; | |
2253 | struct srpt_rdma_ch *ch; | |
2254 | int ret; | |
2255 | bool do_reset = false; | |
2256 | ||
2257 | WARN_ON_ONCE(irqs_disabled()); | |
2258 | ||
2259 | sdev = cm_id->context; | |
2260 | BUG_ON(!sdev); | |
2261 | spin_lock_irq(&sdev->spinlock); | |
2262 | list_for_each_entry(ch, &sdev->rch_list, list) { | |
2263 | if (ch->cm_id == cm_id) { | |
2264 | do_reset = srpt_test_and_set_ch_state(ch, | |
2265 | CH_CONNECTING, CH_DRAINING) || | |
2266 | srpt_test_and_set_ch_state(ch, | |
2267 | CH_LIVE, CH_DRAINING) || | |
2268 | srpt_test_and_set_ch_state(ch, | |
2269 | CH_DISCONNECTING, CH_DRAINING); | |
2270 | break; | |
2271 | } | |
2272 | } | |
2273 | spin_unlock_irq(&sdev->spinlock); | |
2274 | ||
2275 | if (do_reset) { | |
1d19f780 NB |
2276 | if (ch->sess) |
2277 | srpt_shutdown_session(ch->sess); | |
2278 | ||
a42d985b BVA |
2279 | ret = srpt_ch_qp_err(ch); |
2280 | if (ret < 0) | |
2281 | printk(KERN_ERR "Setting queue pair in error state" | |
2282 | " failed: %d\n", ret); | |
2283 | } | |
2284 | } | |
2285 | ||
2286 | /** | |
2287 | * srpt_find_channel() - Look up an RDMA channel. | |
2288 | * @cm_id: Pointer to the CM ID of the channel to be looked up. | |
2289 | * | |
2290 | * Return NULL if no matching RDMA channel has been found. | |
2291 | */ | |
2292 | static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev, | |
2293 | struct ib_cm_id *cm_id) | |
2294 | { | |
2295 | struct srpt_rdma_ch *ch; | |
2296 | bool found; | |
2297 | ||
2298 | WARN_ON_ONCE(irqs_disabled()); | |
2299 | BUG_ON(!sdev); | |
2300 | ||
2301 | found = false; | |
2302 | spin_lock_irq(&sdev->spinlock); | |
2303 | list_for_each_entry(ch, &sdev->rch_list, list) { | |
2304 | if (ch->cm_id == cm_id) { | |
2305 | found = true; | |
2306 | break; | |
2307 | } | |
2308 | } | |
2309 | spin_unlock_irq(&sdev->spinlock); | |
2310 | ||
2311 | return found ? ch : NULL; | |
2312 | } | |
2313 | ||
2314 | /** | |
2315 | * srpt_release_channel() - Release channel resources. | |
2316 | * | |
2317 | * Schedules the actual release because: | |
2318 | * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would | |
2319 | * trigger a deadlock. | |
2320 | * - It is not safe to call TCM transport_* functions from interrupt context. | |
2321 | */ | |
2322 | static void srpt_release_channel(struct srpt_rdma_ch *ch) | |
2323 | { | |
2324 | schedule_work(&ch->release_work); | |
2325 | } | |
2326 | ||
2327 | static void srpt_release_channel_work(struct work_struct *w) | |
2328 | { | |
2329 | struct srpt_rdma_ch *ch; | |
2330 | struct srpt_device *sdev; | |
9474b043 | 2331 | struct se_session *se_sess; |
a42d985b BVA |
2332 | |
2333 | ch = container_of(w, struct srpt_rdma_ch, release_work); | |
2334 | pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess, | |
2335 | ch->release_done); | |
2336 | ||
2337 | sdev = ch->sport->sdev; | |
2338 | BUG_ON(!sdev); | |
2339 | ||
9474b043 NB |
2340 | se_sess = ch->sess; |
2341 | BUG_ON(!se_sess); | |
2342 | ||
be646c2d | 2343 | target_wait_for_sess_cmds(se_sess); |
9474b043 NB |
2344 | |
2345 | transport_deregister_session_configfs(se_sess); | |
2346 | transport_deregister_session(se_sess); | |
a42d985b BVA |
2347 | ch->sess = NULL; |
2348 | ||
0b41d6ca NB |
2349 | ib_destroy_cm_id(ch->cm_id); |
2350 | ||
a42d985b BVA |
2351 | srpt_destroy_ch_ib(ch); |
2352 | ||
2353 | srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, | |
2354 | ch->sport->sdev, ch->rq_size, | |
2355 | ch->rsp_size, DMA_TO_DEVICE); | |
2356 | ||
2357 | spin_lock_irq(&sdev->spinlock); | |
2358 | list_del(&ch->list); | |
2359 | spin_unlock_irq(&sdev->spinlock); | |
2360 | ||
a42d985b BVA |
2361 | if (ch->release_done) |
2362 | complete(ch->release_done); | |
2363 | ||
2364 | wake_up(&sdev->ch_releaseQ); | |
2365 | ||
2366 | kfree(ch); | |
2367 | } | |
2368 | ||
2369 | static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport, | |
2370 | u8 i_port_id[16]) | |
2371 | { | |
2372 | struct srpt_node_acl *nacl; | |
2373 | ||
2374 | list_for_each_entry(nacl, &sport->port_acl_list, list) | |
2375 | if (memcmp(nacl->i_port_id, i_port_id, | |
2376 | sizeof(nacl->i_port_id)) == 0) | |
2377 | return nacl; | |
2378 | ||
2379 | return NULL; | |
2380 | } | |
2381 | ||
2382 | static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport, | |
2383 | u8 i_port_id[16]) | |
2384 | { | |
2385 | struct srpt_node_acl *nacl; | |
2386 | ||
2387 | spin_lock_irq(&sport->port_acl_lock); | |
2388 | nacl = __srpt_lookup_acl(sport, i_port_id); | |
2389 | spin_unlock_irq(&sport->port_acl_lock); | |
2390 | ||
2391 | return nacl; | |
2392 | } | |
2393 | ||
2394 | /** | |
2395 | * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED. | |
2396 | * | |
2397 | * Ownership of the cm_id is transferred to the target session if this | |
2398 | * functions returns zero. Otherwise the caller remains the owner of cm_id. | |
2399 | */ | |
2400 | static int srpt_cm_req_recv(struct ib_cm_id *cm_id, | |
2401 | struct ib_cm_req_event_param *param, | |
2402 | void *private_data) | |
2403 | { | |
2404 | struct srpt_device *sdev = cm_id->context; | |
2405 | struct srpt_port *sport = &sdev->port[param->port - 1]; | |
2406 | struct srp_login_req *req; | |
2407 | struct srp_login_rsp *rsp; | |
2408 | struct srp_login_rej *rej; | |
2409 | struct ib_cm_rep_param *rep_param; | |
2410 | struct srpt_rdma_ch *ch, *tmp_ch; | |
2411 | struct srpt_node_acl *nacl; | |
2412 | u32 it_iu_len; | |
2413 | int i; | |
2414 | int ret = 0; | |
2415 | ||
2416 | WARN_ON_ONCE(irqs_disabled()); | |
2417 | ||
2418 | if (WARN_ON(!sdev || !private_data)) | |
2419 | return -EINVAL; | |
2420 | ||
2421 | req = (struct srp_login_req *)private_data; | |
2422 | ||
2423 | it_iu_len = be32_to_cpu(req->req_it_iu_len); | |
2424 | ||
2425 | printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx," | |
2426 | " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d" | |
2427 | " (guid=0x%llx:0x%llx)\n", | |
2428 | be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]), | |
2429 | be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]), | |
2430 | be64_to_cpu(*(__be64 *)&req->target_port_id[0]), | |
2431 | be64_to_cpu(*(__be64 *)&req->target_port_id[8]), | |
2432 | it_iu_len, | |
2433 | param->port, | |
2434 | be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]), | |
2435 | be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8])); | |
2436 | ||
2437 | rsp = kzalloc(sizeof *rsp, GFP_KERNEL); | |
2438 | rej = kzalloc(sizeof *rej, GFP_KERNEL); | |
2439 | rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL); | |
2440 | ||
2441 | if (!rsp || !rej || !rep_param) { | |
2442 | ret = -ENOMEM; | |
2443 | goto out; | |
2444 | } | |
2445 | ||
2446 | if (it_iu_len > srp_max_req_size || it_iu_len < 64) { | |
2447 | rej->reason = __constant_cpu_to_be32( | |
2448 | SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); | |
2449 | ret = -EINVAL; | |
2450 | printk(KERN_ERR "rejected SRP_LOGIN_REQ because its" | |
2451 | " length (%d bytes) is out of range (%d .. %d)\n", | |
2452 | it_iu_len, 64, srp_max_req_size); | |
2453 | goto reject; | |
2454 | } | |
2455 | ||
2456 | if (!sport->enabled) { | |
2457 | rej->reason = __constant_cpu_to_be32( | |
2458 | SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); | |
2459 | ret = -EINVAL; | |
2460 | printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port" | |
2461 | " has not yet been enabled\n"); | |
2462 | goto reject; | |
2463 | } | |
2464 | ||
2465 | if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) { | |
2466 | rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN; | |
2467 | ||
2468 | spin_lock_irq(&sdev->spinlock); | |
2469 | ||
2470 | list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) { | |
2471 | if (!memcmp(ch->i_port_id, req->initiator_port_id, 16) | |
2472 | && !memcmp(ch->t_port_id, req->target_port_id, 16) | |
2473 | && param->port == ch->sport->port | |
2474 | && param->listen_id == ch->sport->sdev->cm_id | |
2475 | && ch->cm_id) { | |
2476 | enum rdma_ch_state ch_state; | |
2477 | ||
2478 | ch_state = srpt_get_ch_state(ch); | |
2479 | if (ch_state != CH_CONNECTING | |
2480 | && ch_state != CH_LIVE) | |
2481 | continue; | |
2482 | ||
2483 | /* found an existing channel */ | |
2484 | pr_debug("Found existing channel %s" | |
2485 | " cm_id= %p state= %d\n", | |
2486 | ch->sess_name, ch->cm_id, ch_state); | |
2487 | ||
2488 | __srpt_close_ch(ch); | |
2489 | ||
2490 | rsp->rsp_flags = | |
2491 | SRP_LOGIN_RSP_MULTICHAN_TERMINATED; | |
2492 | } | |
2493 | } | |
2494 | ||
2495 | spin_unlock_irq(&sdev->spinlock); | |
2496 | ||
2497 | } else | |
2498 | rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; | |
2499 | ||
2500 | if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) | |
2501 | || *(__be64 *)(req->target_port_id + 8) != | |
2502 | cpu_to_be64(srpt_service_guid)) { | |
2503 | rej->reason = __constant_cpu_to_be32( | |
2504 | SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); | |
2505 | ret = -ENOMEM; | |
2506 | printk(KERN_ERR "rejected SRP_LOGIN_REQ because it" | |
2507 | " has an invalid target port identifier.\n"); | |
2508 | goto reject; | |
2509 | } | |
2510 | ||
2511 | ch = kzalloc(sizeof *ch, GFP_KERNEL); | |
2512 | if (!ch) { | |
2513 | rej->reason = __constant_cpu_to_be32( | |
2514 | SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); | |
2515 | printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n"); | |
2516 | ret = -ENOMEM; | |
2517 | goto reject; | |
2518 | } | |
2519 | ||
2520 | INIT_WORK(&ch->release_work, srpt_release_channel_work); | |
2521 | memcpy(ch->i_port_id, req->initiator_port_id, 16); | |
2522 | memcpy(ch->t_port_id, req->target_port_id, 16); | |
2523 | ch->sport = &sdev->port[param->port - 1]; | |
2524 | ch->cm_id = cm_id; | |
2525 | /* | |
2526 | * Avoid QUEUE_FULL conditions by limiting the number of buffers used | |
2527 | * for the SRP protocol to the command queue size. | |
2528 | */ | |
2529 | ch->rq_size = SRPT_RQ_SIZE; | |
2530 | spin_lock_init(&ch->spinlock); | |
2531 | ch->state = CH_CONNECTING; | |
2532 | INIT_LIST_HEAD(&ch->cmd_wait_list); | |
2533 | ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size; | |
2534 | ||
2535 | ch->ioctx_ring = (struct srpt_send_ioctx **) | |
2536 | srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, | |
2537 | sizeof(*ch->ioctx_ring[0]), | |
2538 | ch->rsp_size, DMA_TO_DEVICE); | |
2539 | if (!ch->ioctx_ring) | |
2540 | goto free_ch; | |
2541 | ||
2542 | INIT_LIST_HEAD(&ch->free_list); | |
2543 | for (i = 0; i < ch->rq_size; i++) { | |
2544 | ch->ioctx_ring[i]->ch = ch; | |
2545 | list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); | |
2546 | } | |
2547 | ||
2548 | ret = srpt_create_ch_ib(ch); | |
2549 | if (ret) { | |
2550 | rej->reason = __constant_cpu_to_be32( | |
2551 | SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); | |
2552 | printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating" | |
2553 | " a new RDMA channel failed.\n"); | |
2554 | goto free_ring; | |
2555 | } | |
2556 | ||
2557 | ret = srpt_ch_qp_rtr(ch, ch->qp); | |
2558 | if (ret) { | |
2559 | rej->reason = __constant_cpu_to_be32( | |
2560 | SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); | |
2561 | printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling" | |
2562 | " RTR failed (error code = %d)\n", ret); | |
2563 | goto destroy_ib; | |
2564 | } | |
2565 | /* | |
2566 | * Use the initator port identifier as the session name. | |
2567 | */ | |
2568 | snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx", | |
2569 | be64_to_cpu(*(__be64 *)ch->i_port_id), | |
2570 | be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); | |
2571 | ||
2572 | pr_debug("registering session %s\n", ch->sess_name); | |
2573 | ||
2574 | nacl = srpt_lookup_acl(sport, ch->i_port_id); | |
2575 | if (!nacl) { | |
2576 | printk(KERN_INFO "Rejected login because no ACL has been" | |
2577 | " configured yet for initiator %s.\n", ch->sess_name); | |
2578 | rej->reason = __constant_cpu_to_be32( | |
2579 | SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); | |
2580 | goto destroy_ib; | |
2581 | } | |
2582 | ||
2583 | ch->sess = transport_init_session(); | |
3af33637 | 2584 | if (IS_ERR(ch->sess)) { |
a42d985b BVA |
2585 | rej->reason = __constant_cpu_to_be32( |
2586 | SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); | |
2587 | pr_debug("Failed to create session\n"); | |
2588 | goto deregister_session; | |
2589 | } | |
2590 | ch->sess->se_node_acl = &nacl->nacl; | |
2591 | transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch); | |
2592 | ||
2593 | pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, | |
2594 | ch->sess_name, ch->cm_id); | |
2595 | ||
2596 | /* create srp_login_response */ | |
2597 | rsp->opcode = SRP_LOGIN_RSP; | |
2598 | rsp->tag = req->tag; | |
2599 | rsp->max_it_iu_len = req->req_it_iu_len; | |
2600 | rsp->max_ti_iu_len = req->req_it_iu_len; | |
2601 | ch->max_ti_iu_len = it_iu_len; | |
2602 | rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT | |
2603 | | SRP_BUF_FORMAT_INDIRECT); | |
2604 | rsp->req_lim_delta = cpu_to_be32(ch->rq_size); | |
2605 | atomic_set(&ch->req_lim, ch->rq_size); | |
2606 | atomic_set(&ch->req_lim_delta, 0); | |
2607 | ||
2608 | /* create cm reply */ | |
2609 | rep_param->qp_num = ch->qp->qp_num; | |
2610 | rep_param->private_data = (void *)rsp; | |
2611 | rep_param->private_data_len = sizeof *rsp; | |
2612 | rep_param->rnr_retry_count = 7; | |
2613 | rep_param->flow_control = 1; | |
2614 | rep_param->failover_accepted = 0; | |
2615 | rep_param->srq = 1; | |
2616 | rep_param->responder_resources = 4; | |
2617 | rep_param->initiator_depth = 4; | |
2618 | ||
2619 | ret = ib_send_cm_rep(cm_id, rep_param); | |
2620 | if (ret) { | |
2621 | printk(KERN_ERR "sending SRP_LOGIN_REQ response failed" | |
2622 | " (error code = %d)\n", ret); | |
2623 | goto release_channel; | |
2624 | } | |
2625 | ||
2626 | spin_lock_irq(&sdev->spinlock); | |
2627 | list_add_tail(&ch->list, &sdev->rch_list); | |
2628 | spin_unlock_irq(&sdev->spinlock); | |
2629 | ||
2630 | goto out; | |
2631 | ||
2632 | release_channel: | |
2633 | srpt_set_ch_state(ch, CH_RELEASING); | |
2634 | transport_deregister_session_configfs(ch->sess); | |
2635 | ||
2636 | deregister_session: | |
2637 | transport_deregister_session(ch->sess); | |
2638 | ch->sess = NULL; | |
2639 | ||
2640 | destroy_ib: | |
2641 | srpt_destroy_ch_ib(ch); | |
2642 | ||
2643 | free_ring: | |
2644 | srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, | |
2645 | ch->sport->sdev, ch->rq_size, | |
2646 | ch->rsp_size, DMA_TO_DEVICE); | |
2647 | free_ch: | |
2648 | kfree(ch); | |
2649 | ||
2650 | reject: | |
2651 | rej->opcode = SRP_LOGIN_REJ; | |
2652 | rej->tag = req->tag; | |
2653 | rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT | |
2654 | | SRP_BUF_FORMAT_INDIRECT); | |
2655 | ||
2656 | ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, | |
2657 | (void *)rej, sizeof *rej); | |
2658 | ||
2659 | out: | |
2660 | kfree(rep_param); | |
2661 | kfree(rsp); | |
2662 | kfree(rej); | |
2663 | ||
2664 | return ret; | |
2665 | } | |
2666 | ||
2667 | static void srpt_cm_rej_recv(struct ib_cm_id *cm_id) | |
2668 | { | |
2669 | printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id); | |
2670 | srpt_drain_channel(cm_id); | |
2671 | } | |
2672 | ||
2673 | /** | |
2674 | * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event. | |
2675 | * | |
2676 | * An IB_CM_RTU_RECEIVED message indicates that the connection is established | |
2677 | * and that the recipient may begin transmitting (RTU = ready to use). | |
2678 | */ | |
2679 | static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id) | |
2680 | { | |
2681 | struct srpt_rdma_ch *ch; | |
2682 | int ret; | |
2683 | ||
2684 | ch = srpt_find_channel(cm_id->context, cm_id); | |
2685 | BUG_ON(!ch); | |
2686 | ||
2687 | if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) { | |
2688 | struct srpt_recv_ioctx *ioctx, *ioctx_tmp; | |
2689 | ||
2690 | ret = srpt_ch_qp_rts(ch, ch->qp); | |
2691 | ||
2692 | list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list, | |
2693 | wait_list) { | |
2694 | list_del(&ioctx->wait_list); | |
2695 | srpt_handle_new_iu(ch, ioctx, NULL); | |
2696 | } | |
2697 | if (ret) | |
2698 | srpt_close_ch(ch); | |
2699 | } | |
2700 | } | |
2701 | ||
2702 | static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id) | |
2703 | { | |
2704 | printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id); | |
2705 | srpt_drain_channel(cm_id); | |
2706 | } | |
2707 | ||
2708 | static void srpt_cm_rep_error(struct ib_cm_id *cm_id) | |
2709 | { | |
2710 | printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id); | |
2711 | srpt_drain_channel(cm_id); | |
2712 | } | |
2713 | ||
2714 | /** | |
2715 | * srpt_cm_dreq_recv() - Process reception of a DREQ message. | |
2716 | */ | |
2717 | static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id) | |
2718 | { | |
2719 | struct srpt_rdma_ch *ch; | |
2720 | unsigned long flags; | |
2721 | bool send_drep = false; | |
2722 | ||
2723 | ch = srpt_find_channel(cm_id->context, cm_id); | |
2724 | BUG_ON(!ch); | |
2725 | ||
2726 | pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch)); | |
2727 | ||
2728 | spin_lock_irqsave(&ch->spinlock, flags); | |
2729 | switch (ch->state) { | |
2730 | case CH_CONNECTING: | |
2731 | case CH_LIVE: | |
2732 | send_drep = true; | |
2733 | ch->state = CH_DISCONNECTING; | |
2734 | break; | |
2735 | case CH_DISCONNECTING: | |
2736 | case CH_DRAINING: | |
2737 | case CH_RELEASING: | |
2738 | WARN(true, "unexpected channel state %d\n", ch->state); | |
2739 | break; | |
2740 | } | |
2741 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
2742 | ||
2743 | if (send_drep) { | |
2744 | if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0) | |
2745 | printk(KERN_ERR "Sending IB DREP failed.\n"); | |
2746 | printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n", | |
2747 | ch->sess_name); | |
2748 | } | |
2749 | } | |
2750 | ||
2751 | /** | |
2752 | * srpt_cm_drep_recv() - Process reception of a DREP message. | |
2753 | */ | |
2754 | static void srpt_cm_drep_recv(struct ib_cm_id *cm_id) | |
2755 | { | |
2756 | printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n", | |
2757 | cm_id); | |
2758 | srpt_drain_channel(cm_id); | |
2759 | } | |
2760 | ||
2761 | /** | |
2762 | * srpt_cm_handler() - IB connection manager callback function. | |
2763 | * | |
2764 | * A non-zero return value will cause the caller destroy the CM ID. | |
2765 | * | |
2766 | * Note: srpt_cm_handler() must only return a non-zero value when transferring | |
2767 | * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning | |
2768 | * a non-zero value in any other case will trigger a race with the | |
2769 | * ib_destroy_cm_id() call in srpt_release_channel(). | |
2770 | */ | |
2771 | static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
2772 | { | |
2773 | int ret; | |
2774 | ||
2775 | ret = 0; | |
2776 | switch (event->event) { | |
2777 | case IB_CM_REQ_RECEIVED: | |
2778 | ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd, | |
2779 | event->private_data); | |
2780 | break; | |
2781 | case IB_CM_REJ_RECEIVED: | |
2782 | srpt_cm_rej_recv(cm_id); | |
2783 | break; | |
2784 | case IB_CM_RTU_RECEIVED: | |
2785 | case IB_CM_USER_ESTABLISHED: | |
2786 | srpt_cm_rtu_recv(cm_id); | |
2787 | break; | |
2788 | case IB_CM_DREQ_RECEIVED: | |
2789 | srpt_cm_dreq_recv(cm_id); | |
2790 | break; | |
2791 | case IB_CM_DREP_RECEIVED: | |
2792 | srpt_cm_drep_recv(cm_id); | |
2793 | break; | |
2794 | case IB_CM_TIMEWAIT_EXIT: | |
2795 | srpt_cm_timewait_exit(cm_id); | |
2796 | break; | |
2797 | case IB_CM_REP_ERROR: | |
2798 | srpt_cm_rep_error(cm_id); | |
2799 | break; | |
2800 | case IB_CM_DREQ_ERROR: | |
2801 | printk(KERN_INFO "Received IB DREQ ERROR event.\n"); | |
2802 | break; | |
2803 | case IB_CM_MRA_RECEIVED: | |
2804 | printk(KERN_INFO "Received IB MRA event\n"); | |
2805 | break; | |
2806 | default: | |
2807 | printk(KERN_ERR "received unrecognized IB CM event %d\n", | |
2808 | event->event); | |
2809 | break; | |
2810 | } | |
2811 | ||
2812 | return ret; | |
2813 | } | |
2814 | ||
2815 | /** | |
2816 | * srpt_perform_rdmas() - Perform IB RDMA. | |
2817 | * | |
2818 | * Returns zero upon success or a negative number upon failure. | |
2819 | */ | |
2820 | static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, | |
2821 | struct srpt_send_ioctx *ioctx) | |
2822 | { | |
2823 | struct ib_send_wr wr; | |
2824 | struct ib_send_wr *bad_wr; | |
2825 | struct rdma_iu *riu; | |
2826 | int i; | |
2827 | int ret; | |
2828 | int sq_wr_avail; | |
2829 | enum dma_data_direction dir; | |
2830 | const int n_rdma = ioctx->n_rdma; | |
2831 | ||
2832 | dir = ioctx->cmd.data_direction; | |
2833 | if (dir == DMA_TO_DEVICE) { | |
2834 | /* write */ | |
2835 | ret = -ENOMEM; | |
2836 | sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail); | |
2837 | if (sq_wr_avail < 0) { | |
2838 | printk(KERN_WARNING "IB send queue full (needed %d)\n", | |
2839 | n_rdma); | |
2840 | goto out; | |
2841 | } | |
2842 | } | |
2843 | ||
2844 | ioctx->rdma_aborted = false; | |
2845 | ret = 0; | |
2846 | riu = ioctx->rdma_ius; | |
2847 | memset(&wr, 0, sizeof wr); | |
2848 | ||
2849 | for (i = 0; i < n_rdma; ++i, ++riu) { | |
2850 | if (dir == DMA_FROM_DEVICE) { | |
2851 | wr.opcode = IB_WR_RDMA_WRITE; | |
2852 | wr.wr_id = encode_wr_id(i == n_rdma - 1 ? | |
2853 | SRPT_RDMA_WRITE_LAST : | |
2854 | SRPT_RDMA_MID, | |
2855 | ioctx->ioctx.index); | |
2856 | } else { | |
2857 | wr.opcode = IB_WR_RDMA_READ; | |
2858 | wr.wr_id = encode_wr_id(i == n_rdma - 1 ? | |
2859 | SRPT_RDMA_READ_LAST : | |
2860 | SRPT_RDMA_MID, | |
2861 | ioctx->ioctx.index); | |
2862 | } | |
2863 | wr.next = NULL; | |
2864 | wr.wr.rdma.remote_addr = riu->raddr; | |
2865 | wr.wr.rdma.rkey = riu->rkey; | |
2866 | wr.num_sge = riu->sge_cnt; | |
2867 | wr.sg_list = riu->sge; | |
2868 | ||
2869 | /* only get completion event for the last rdma write */ | |
2870 | if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) | |
2871 | wr.send_flags = IB_SEND_SIGNALED; | |
2872 | ||
2873 | ret = ib_post_send(ch->qp, &wr, &bad_wr); | |
2874 | if (ret) | |
2875 | break; | |
2876 | } | |
2877 | ||
2878 | if (ret) | |
2879 | printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d", | |
2880 | __func__, __LINE__, ret, i, n_rdma); | |
2881 | if (ret && i > 0) { | |
2882 | wr.num_sge = 0; | |
2883 | wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); | |
2884 | wr.send_flags = IB_SEND_SIGNALED; | |
2885 | while (ch->state == CH_LIVE && | |
2886 | ib_post_send(ch->qp, &wr, &bad_wr) != 0) { | |
2887 | printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]", | |
2888 | ioctx->ioctx.index); | |
2889 | msleep(1000); | |
2890 | } | |
2891 | while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) { | |
2892 | printk(KERN_INFO "Waiting until RDMA abort finished [%d]", | |
2893 | ioctx->ioctx.index); | |
2894 | msleep(1000); | |
2895 | } | |
2896 | } | |
2897 | out: | |
2898 | if (unlikely(dir == DMA_TO_DEVICE && ret < 0)) | |
2899 | atomic_add(n_rdma, &ch->sq_wr_avail); | |
2900 | return ret; | |
2901 | } | |
2902 | ||
2903 | /** | |
2904 | * srpt_xfer_data() - Start data transfer from initiator to target. | |
2905 | */ | |
2906 | static int srpt_xfer_data(struct srpt_rdma_ch *ch, | |
2907 | struct srpt_send_ioctx *ioctx) | |
2908 | { | |
2909 | int ret; | |
2910 | ||
2911 | ret = srpt_map_sg_to_ib_sge(ch, ioctx); | |
2912 | if (ret) { | |
2913 | printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret); | |
2914 | goto out; | |
2915 | } | |
2916 | ||
2917 | ret = srpt_perform_rdmas(ch, ioctx); | |
2918 | if (ret) { | |
2919 | if (ret == -EAGAIN || ret == -ENOMEM) | |
2920 | printk(KERN_INFO "%s[%d] queue full -- ret=%d\n", | |
2921 | __func__, __LINE__, ret); | |
2922 | else | |
2923 | printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n", | |
2924 | __func__, __LINE__, ret); | |
2925 | goto out_unmap; | |
2926 | } | |
2927 | ||
2928 | out: | |
2929 | return ret; | |
2930 | out_unmap: | |
2931 | srpt_unmap_sg_to_ib_sge(ch, ioctx); | |
2932 | goto out; | |
2933 | } | |
2934 | ||
2935 | static int srpt_write_pending_status(struct se_cmd *se_cmd) | |
2936 | { | |
2937 | struct srpt_send_ioctx *ioctx; | |
2938 | ||
2939 | ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); | |
2940 | return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA; | |
2941 | } | |
2942 | ||
2943 | /* | |
2944 | * srpt_write_pending() - Start data transfer from initiator to target (write). | |
2945 | */ | |
2946 | static int srpt_write_pending(struct se_cmd *se_cmd) | |
2947 | { | |
2948 | struct srpt_rdma_ch *ch; | |
2949 | struct srpt_send_ioctx *ioctx; | |
2950 | enum srpt_command_state new_state; | |
2951 | enum rdma_ch_state ch_state; | |
2952 | int ret; | |
2953 | ||
2954 | ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); | |
2955 | ||
2956 | new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA); | |
2957 | WARN_ON(new_state == SRPT_STATE_DONE); | |
2958 | ||
2959 | ch = ioctx->ch; | |
2960 | BUG_ON(!ch); | |
2961 | ||
2962 | ch_state = srpt_get_ch_state(ch); | |
2963 | switch (ch_state) { | |
2964 | case CH_CONNECTING: | |
2965 | WARN(true, "unexpected channel state %d\n", ch_state); | |
2966 | ret = -EINVAL; | |
2967 | goto out; | |
2968 | case CH_LIVE: | |
2969 | break; | |
2970 | case CH_DISCONNECTING: | |
2971 | case CH_DRAINING: | |
2972 | case CH_RELEASING: | |
2973 | pr_debug("cmd with tag %lld: channel disconnecting\n", | |
2974 | ioctx->tag); | |
2975 | srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); | |
2976 | ret = -EINVAL; | |
2977 | goto out; | |
2978 | } | |
2979 | ret = srpt_xfer_data(ch, ioctx); | |
2980 | ||
2981 | out: | |
2982 | return ret; | |
2983 | } | |
2984 | ||
2985 | static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status) | |
2986 | { | |
2987 | switch (tcm_mgmt_status) { | |
2988 | case TMR_FUNCTION_COMPLETE: | |
2989 | return SRP_TSK_MGMT_SUCCESS; | |
2990 | case TMR_FUNCTION_REJECTED: | |
2991 | return SRP_TSK_MGMT_FUNC_NOT_SUPP; | |
2992 | } | |
2993 | return SRP_TSK_MGMT_FAILED; | |
2994 | } | |
2995 | ||
2996 | /** | |
2997 | * srpt_queue_response() - Transmits the response to a SCSI command. | |
2998 | * | |
2999 | * Callback function called by the TCM core. Must not block since it can be | |
3000 | * invoked on the context of the IB completion handler. | |
3001 | */ | |
b79fafac | 3002 | static void srpt_queue_response(struct se_cmd *cmd) |
a42d985b BVA |
3003 | { |
3004 | struct srpt_rdma_ch *ch; | |
3005 | struct srpt_send_ioctx *ioctx; | |
3006 | enum srpt_command_state state; | |
3007 | unsigned long flags; | |
3008 | int ret; | |
3009 | enum dma_data_direction dir; | |
3010 | int resp_len; | |
3011 | u8 srp_tm_status; | |
3012 | ||
a42d985b BVA |
3013 | ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); |
3014 | ch = ioctx->ch; | |
3015 | BUG_ON(!ch); | |
3016 | ||
3017 | spin_lock_irqsave(&ioctx->spinlock, flags); | |
3018 | state = ioctx->state; | |
3019 | switch (state) { | |
3020 | case SRPT_STATE_NEW: | |
3021 | case SRPT_STATE_DATA_IN: | |
3022 | ioctx->state = SRPT_STATE_CMD_RSP_SENT; | |
3023 | break; | |
3024 | case SRPT_STATE_MGMT: | |
3025 | ioctx->state = SRPT_STATE_MGMT_RSP_SENT; | |
3026 | break; | |
3027 | default: | |
3028 | WARN(true, "ch %p; cmd %d: unexpected command state %d\n", | |
3029 | ch, ioctx->ioctx.index, ioctx->state); | |
3030 | break; | |
3031 | } | |
3032 | spin_unlock_irqrestore(&ioctx->spinlock, flags); | |
3033 | ||
3034 | if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) | |
3035 | || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) { | |
3036 | atomic_inc(&ch->req_lim_delta); | |
3037 | srpt_abort_cmd(ioctx); | |
b79fafac | 3038 | return; |
a42d985b BVA |
3039 | } |
3040 | ||
3041 | dir = ioctx->cmd.data_direction; | |
3042 | ||
3043 | /* For read commands, transfer the data to the initiator. */ | |
3044 | if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length && | |
3045 | !ioctx->queue_status_only) { | |
3046 | ret = srpt_xfer_data(ch, ioctx); | |
3047 | if (ret) { | |
3048 | printk(KERN_ERR "xfer_data failed for tag %llu\n", | |
3049 | ioctx->tag); | |
b79fafac | 3050 | return; |
a42d985b BVA |
3051 | } |
3052 | } | |
3053 | ||
3054 | if (state != SRPT_STATE_MGMT) | |
3055 | resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag, | |
3056 | cmd->scsi_status); | |
3057 | else { | |
3058 | srp_tm_status | |
3059 | = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); | |
3060 | resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, | |
3061 | ioctx->tag); | |
3062 | } | |
3063 | ret = srpt_post_send(ch, ioctx, resp_len); | |
3064 | if (ret) { | |
3065 | printk(KERN_ERR "sending cmd response failed for tag %llu\n", | |
3066 | ioctx->tag); | |
3067 | srpt_unmap_sg_to_ib_sge(ch, ioctx); | |
3068 | srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); | |
9474b043 | 3069 | target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); |
a42d985b | 3070 | } |
b79fafac | 3071 | } |
a42d985b | 3072 | |
b79fafac JE |
3073 | static int srpt_queue_data_in(struct se_cmd *cmd) |
3074 | { | |
3075 | srpt_queue_response(cmd); | |
3076 | return 0; | |
3077 | } | |
3078 | ||
3079 | static void srpt_queue_tm_rsp(struct se_cmd *cmd) | |
3080 | { | |
3081 | srpt_queue_response(cmd); | |
a42d985b BVA |
3082 | } |
3083 | ||
3084 | static int srpt_queue_status(struct se_cmd *cmd) | |
3085 | { | |
3086 | struct srpt_send_ioctx *ioctx; | |
3087 | ||
3088 | ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); | |
3089 | BUG_ON(ioctx->sense_data != cmd->sense_buffer); | |
3090 | if (cmd->se_cmd_flags & | |
3091 | (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE)) | |
3092 | WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION); | |
3093 | ioctx->queue_status_only = true; | |
b79fafac JE |
3094 | srpt_queue_response(cmd); |
3095 | return 0; | |
a42d985b BVA |
3096 | } |
3097 | ||
3098 | static void srpt_refresh_port_work(struct work_struct *work) | |
3099 | { | |
3100 | struct srpt_port *sport = container_of(work, struct srpt_port, work); | |
3101 | ||
3102 | srpt_refresh_port(sport); | |
3103 | } | |
3104 | ||
3105 | static int srpt_ch_list_empty(struct srpt_device *sdev) | |
3106 | { | |
3107 | int res; | |
3108 | ||
3109 | spin_lock_irq(&sdev->spinlock); | |
3110 | res = list_empty(&sdev->rch_list); | |
3111 | spin_unlock_irq(&sdev->spinlock); | |
3112 | ||
3113 | return res; | |
3114 | } | |
3115 | ||
3116 | /** | |
3117 | * srpt_release_sdev() - Free the channel resources associated with a target. | |
3118 | */ | |
3119 | static int srpt_release_sdev(struct srpt_device *sdev) | |
3120 | { | |
3121 | struct srpt_rdma_ch *ch, *tmp_ch; | |
3122 | int res; | |
3123 | ||
3124 | WARN_ON_ONCE(irqs_disabled()); | |
3125 | ||
3126 | BUG_ON(!sdev); | |
3127 | ||
3128 | spin_lock_irq(&sdev->spinlock); | |
3129 | list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) | |
3130 | __srpt_close_ch(ch); | |
3131 | spin_unlock_irq(&sdev->spinlock); | |
3132 | ||
3133 | res = wait_event_interruptible(sdev->ch_releaseQ, | |
3134 | srpt_ch_list_empty(sdev)); | |
3135 | if (res) | |
3136 | printk(KERN_ERR "%s: interrupted.\n", __func__); | |
3137 | ||
3138 | return 0; | |
3139 | } | |
3140 | ||
3141 | static struct srpt_port *__srpt_lookup_port(const char *name) | |
3142 | { | |
3143 | struct ib_device *dev; | |
3144 | struct srpt_device *sdev; | |
3145 | struct srpt_port *sport; | |
3146 | int i; | |
3147 | ||
3148 | list_for_each_entry(sdev, &srpt_dev_list, list) { | |
3149 | dev = sdev->device; | |
3150 | if (!dev) | |
3151 | continue; | |
3152 | ||
3153 | for (i = 0; i < dev->phys_port_cnt; i++) { | |
3154 | sport = &sdev->port[i]; | |
3155 | ||
3156 | if (!strcmp(sport->port_guid, name)) | |
3157 | return sport; | |
3158 | } | |
3159 | } | |
3160 | ||
3161 | return NULL; | |
3162 | } | |
3163 | ||
3164 | static struct srpt_port *srpt_lookup_port(const char *name) | |
3165 | { | |
3166 | struct srpt_port *sport; | |
3167 | ||
3168 | spin_lock(&srpt_dev_lock); | |
3169 | sport = __srpt_lookup_port(name); | |
3170 | spin_unlock(&srpt_dev_lock); | |
3171 | ||
3172 | return sport; | |
3173 | } | |
3174 | ||
3175 | /** | |
3176 | * srpt_add_one() - Infiniband device addition callback function. | |
3177 | */ | |
3178 | static void srpt_add_one(struct ib_device *device) | |
3179 | { | |
3180 | struct srpt_device *sdev; | |
3181 | struct srpt_port *sport; | |
3182 | struct ib_srq_init_attr srq_attr; | |
3183 | int i; | |
3184 | ||
3185 | pr_debug("device = %p, device->dma_ops = %p\n", device, | |
3186 | device->dma_ops); | |
3187 | ||
3188 | sdev = kzalloc(sizeof *sdev, GFP_KERNEL); | |
3189 | if (!sdev) | |
3190 | goto err; | |
3191 | ||
3192 | sdev->device = device; | |
3193 | INIT_LIST_HEAD(&sdev->rch_list); | |
3194 | init_waitqueue_head(&sdev->ch_releaseQ); | |
3195 | spin_lock_init(&sdev->spinlock); | |
3196 | ||
3197 | if (ib_query_device(device, &sdev->dev_attr)) | |
3198 | goto free_dev; | |
3199 | ||
3200 | sdev->pd = ib_alloc_pd(device); | |
3201 | if (IS_ERR(sdev->pd)) | |
3202 | goto free_dev; | |
3203 | ||
3204 | sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE); | |
3205 | if (IS_ERR(sdev->mr)) | |
3206 | goto err_pd; | |
3207 | ||
3208 | sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr); | |
3209 | ||
3210 | srq_attr.event_handler = srpt_srq_event; | |
3211 | srq_attr.srq_context = (void *)sdev; | |
3212 | srq_attr.attr.max_wr = sdev->srq_size; | |
3213 | srq_attr.attr.max_sge = 1; | |
3214 | srq_attr.attr.srq_limit = 0; | |
6f360336 | 3215 | srq_attr.srq_type = IB_SRQT_BASIC; |
a42d985b BVA |
3216 | |
3217 | sdev->srq = ib_create_srq(sdev->pd, &srq_attr); | |
3218 | if (IS_ERR(sdev->srq)) | |
3219 | goto err_mr; | |
3220 | ||
3221 | pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n", | |
3222 | __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr, | |
3223 | device->name); | |
3224 | ||
3225 | if (!srpt_service_guid) | |
3226 | srpt_service_guid = be64_to_cpu(device->node_guid); | |
3227 | ||
3228 | sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev); | |
3229 | if (IS_ERR(sdev->cm_id)) | |
3230 | goto err_srq; | |
3231 | ||
3232 | /* print out target login information */ | |
3233 | pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx," | |
3234 | "pkey=ffff,service_id=%016llx\n", srpt_service_guid, | |
3235 | srpt_service_guid, srpt_service_guid); | |
3236 | ||
3237 | /* | |
3238 | * We do not have a consistent service_id (ie. also id_ext of target_id) | |
3239 | * to identify this target. We currently use the guid of the first HCA | |
3240 | * in the system as service_id; therefore, the target_id will change | |
3241 | * if this HCA is gone bad and replaced by different HCA | |
3242 | */ | |
3243 | if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL)) | |
3244 | goto err_cm; | |
3245 | ||
3246 | INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, | |
3247 | srpt_event_handler); | |
3248 | if (ib_register_event_handler(&sdev->event_handler)) | |
3249 | goto err_cm; | |
3250 | ||
3251 | sdev->ioctx_ring = (struct srpt_recv_ioctx **) | |
3252 | srpt_alloc_ioctx_ring(sdev, sdev->srq_size, | |
3253 | sizeof(*sdev->ioctx_ring[0]), | |
3254 | srp_max_req_size, DMA_FROM_DEVICE); | |
3255 | if (!sdev->ioctx_ring) | |
3256 | goto err_event; | |
3257 | ||
3258 | for (i = 0; i < sdev->srq_size; ++i) | |
3259 | srpt_post_recv(sdev, sdev->ioctx_ring[i]); | |
3260 | ||
f225066b | 3261 | WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port)); |
a42d985b BVA |
3262 | |
3263 | for (i = 1; i <= sdev->device->phys_port_cnt; i++) { | |
3264 | sport = &sdev->port[i - 1]; | |
3265 | sport->sdev = sdev; | |
3266 | sport->port = i; | |
3267 | sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE; | |
3268 | sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE; | |
3269 | sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; | |
3270 | INIT_WORK(&sport->work, srpt_refresh_port_work); | |
3271 | INIT_LIST_HEAD(&sport->port_acl_list); | |
3272 | spin_lock_init(&sport->port_acl_lock); | |
3273 | ||
3274 | if (srpt_refresh_port(sport)) { | |
3275 | printk(KERN_ERR "MAD registration failed for %s-%d.\n", | |
3276 | srpt_sdev_name(sdev), i); | |
3277 | goto err_ring; | |
3278 | } | |
3279 | snprintf(sport->port_guid, sizeof(sport->port_guid), | |
3280 | "0x%016llx%016llx", | |
3281 | be64_to_cpu(sport->gid.global.subnet_prefix), | |
3282 | be64_to_cpu(sport->gid.global.interface_id)); | |
3283 | } | |
3284 | ||
3285 | spin_lock(&srpt_dev_lock); | |
3286 | list_add_tail(&sdev->list, &srpt_dev_list); | |
3287 | spin_unlock(&srpt_dev_lock); | |
3288 | ||
3289 | out: | |
3290 | ib_set_client_data(device, &srpt_client, sdev); | |
3291 | pr_debug("added %s.\n", device->name); | |
3292 | return; | |
3293 | ||
3294 | err_ring: | |
3295 | srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, | |
3296 | sdev->srq_size, srp_max_req_size, | |
3297 | DMA_FROM_DEVICE); | |
3298 | err_event: | |
3299 | ib_unregister_event_handler(&sdev->event_handler); | |
3300 | err_cm: | |
3301 | ib_destroy_cm_id(sdev->cm_id); | |
3302 | err_srq: | |
3303 | ib_destroy_srq(sdev->srq); | |
3304 | err_mr: | |
3305 | ib_dereg_mr(sdev->mr); | |
3306 | err_pd: | |
3307 | ib_dealloc_pd(sdev->pd); | |
3308 | free_dev: | |
3309 | kfree(sdev); | |
3310 | err: | |
3311 | sdev = NULL; | |
3312 | printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name); | |
3313 | goto out; | |
3314 | } | |
3315 | ||
3316 | /** | |
3317 | * srpt_remove_one() - InfiniBand device removal callback function. | |
3318 | */ | |
3319 | static void srpt_remove_one(struct ib_device *device) | |
3320 | { | |
3321 | struct srpt_device *sdev; | |
3322 | int i; | |
3323 | ||
3324 | sdev = ib_get_client_data(device, &srpt_client); | |
3325 | if (!sdev) { | |
3326 | printk(KERN_INFO "%s(%s): nothing to do.\n", __func__, | |
3327 | device->name); | |
3328 | return; | |
3329 | } | |
3330 | ||
3331 | srpt_unregister_mad_agent(sdev); | |
3332 | ||
3333 | ib_unregister_event_handler(&sdev->event_handler); | |
3334 | ||
3335 | /* Cancel any work queued by the just unregistered IB event handler. */ | |
3336 | for (i = 0; i < sdev->device->phys_port_cnt; i++) | |
3337 | cancel_work_sync(&sdev->port[i].work); | |
3338 | ||
3339 | ib_destroy_cm_id(sdev->cm_id); | |
3340 | ||
3341 | /* | |
3342 | * Unregistering a target must happen after destroying sdev->cm_id | |
3343 | * such that no new SRP_LOGIN_REQ information units can arrive while | |
3344 | * destroying the target. | |
3345 | */ | |
3346 | spin_lock(&srpt_dev_lock); | |
3347 | list_del(&sdev->list); | |
3348 | spin_unlock(&srpt_dev_lock); | |
3349 | srpt_release_sdev(sdev); | |
3350 | ||
3351 | ib_destroy_srq(sdev->srq); | |
3352 | ib_dereg_mr(sdev->mr); | |
3353 | ib_dealloc_pd(sdev->pd); | |
3354 | ||
3355 | srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, | |
3356 | sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE); | |
3357 | sdev->ioctx_ring = NULL; | |
3358 | kfree(sdev); | |
3359 | } | |
3360 | ||
3361 | static struct ib_client srpt_client = { | |
3362 | .name = DRV_NAME, | |
3363 | .add = srpt_add_one, | |
3364 | .remove = srpt_remove_one | |
3365 | }; | |
3366 | ||
3367 | static int srpt_check_true(struct se_portal_group *se_tpg) | |
3368 | { | |
3369 | return 1; | |
3370 | } | |
3371 | ||
3372 | static int srpt_check_false(struct se_portal_group *se_tpg) | |
3373 | { | |
3374 | return 0; | |
3375 | } | |
3376 | ||
3377 | static char *srpt_get_fabric_name(void) | |
3378 | { | |
3379 | return "srpt"; | |
3380 | } | |
3381 | ||
3382 | static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg) | |
3383 | { | |
3384 | return SCSI_TRANSPORTID_PROTOCOLID_SRP; | |
3385 | } | |
3386 | ||
3387 | static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) | |
3388 | { | |
3389 | struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); | |
3390 | ||
3391 | return sport->port_guid; | |
3392 | } | |
3393 | ||
3394 | static u16 srpt_get_tag(struct se_portal_group *tpg) | |
3395 | { | |
3396 | return 1; | |
3397 | } | |
3398 | ||
3399 | static u32 srpt_get_default_depth(struct se_portal_group *se_tpg) | |
3400 | { | |
3401 | return 1; | |
3402 | } | |
3403 | ||
3404 | static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg, | |
3405 | struct se_node_acl *se_nacl, | |
3406 | struct t10_pr_registration *pr_reg, | |
3407 | int *format_code, unsigned char *buf) | |
3408 | { | |
3409 | struct srpt_node_acl *nacl; | |
3410 | struct spc_rdma_transport_id *tr_id; | |
3411 | ||
3412 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | |
3413 | tr_id = (void *)buf; | |
3414 | tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP; | |
3415 | memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id)); | |
3416 | return sizeof(*tr_id); | |
3417 | } | |
3418 | ||
3419 | static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg, | |
3420 | struct se_node_acl *se_nacl, | |
3421 | struct t10_pr_registration *pr_reg, | |
3422 | int *format_code) | |
3423 | { | |
3424 | *format_code = 0; | |
3425 | return sizeof(struct spc_rdma_transport_id); | |
3426 | } | |
3427 | ||
3428 | static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | |
3429 | const char *buf, u32 *out_tid_len, | |
3430 | char **port_nexus_ptr) | |
3431 | { | |
3432 | struct spc_rdma_transport_id *tr_id; | |
3433 | ||
3434 | *port_nexus_ptr = NULL; | |
3435 | *out_tid_len = sizeof(struct spc_rdma_transport_id); | |
3436 | tr_id = (void *)buf; | |
3437 | return (char *)tr_id->i_port_id; | |
3438 | } | |
3439 | ||
3440 | static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) | |
3441 | { | |
3442 | struct srpt_node_acl *nacl; | |
3443 | ||
3444 | nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); | |
3445 | if (!nacl) { | |
7367d99b | 3446 | printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n"); |
a42d985b BVA |
3447 | return NULL; |
3448 | } | |
3449 | ||
3450 | return &nacl->nacl; | |
3451 | } | |
3452 | ||
3453 | static void srpt_release_fabric_acl(struct se_portal_group *se_tpg, | |
3454 | struct se_node_acl *se_nacl) | |
3455 | { | |
3456 | struct srpt_node_acl *nacl; | |
3457 | ||
3458 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | |
3459 | kfree(nacl); | |
3460 | } | |
3461 | ||
3462 | static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) | |
3463 | { | |
3464 | return 1; | |
3465 | } | |
3466 | ||
3467 | static void srpt_release_cmd(struct se_cmd *se_cmd) | |
3468 | { | |
9474b043 NB |
3469 | struct srpt_send_ioctx *ioctx = container_of(se_cmd, |
3470 | struct srpt_send_ioctx, cmd); | |
3471 | struct srpt_rdma_ch *ch = ioctx->ch; | |
3472 | unsigned long flags; | |
3473 | ||
3474 | WARN_ON(ioctx->state != SRPT_STATE_DONE); | |
3475 | WARN_ON(ioctx->mapped_sg_count != 0); | |
3476 | ||
3477 | if (ioctx->n_rbuf > 1) { | |
3478 | kfree(ioctx->rbufs); | |
3479 | ioctx->rbufs = NULL; | |
3480 | ioctx->n_rbuf = 0; | |
3481 | } | |
3482 | ||
3483 | spin_lock_irqsave(&ch->spinlock, flags); | |
3484 | list_add(&ioctx->free_list, &ch->free_list); | |
3485 | spin_unlock_irqrestore(&ch->spinlock, flags); | |
a42d985b BVA |
3486 | } |
3487 | ||
a42d985b BVA |
3488 | /** |
3489 | * srpt_close_session() - Forcibly close a session. | |
3490 | * | |
3491 | * Callback function invoked by the TCM core to clean up sessions associated | |
3492 | * with a node ACL when the user invokes | |
3493 | * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id | |
3494 | */ | |
3495 | static void srpt_close_session(struct se_session *se_sess) | |
3496 | { | |
3497 | DECLARE_COMPLETION_ONSTACK(release_done); | |
3498 | struct srpt_rdma_ch *ch; | |
3499 | struct srpt_device *sdev; | |
3500 | int res; | |
3501 | ||
3502 | ch = se_sess->fabric_sess_ptr; | |
3503 | WARN_ON(ch->sess != se_sess); | |
3504 | ||
3505 | pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch)); | |
3506 | ||
3507 | sdev = ch->sport->sdev; | |
3508 | spin_lock_irq(&sdev->spinlock); | |
3509 | BUG_ON(ch->release_done); | |
3510 | ch->release_done = &release_done; | |
3511 | __srpt_close_ch(ch); | |
3512 | spin_unlock_irq(&sdev->spinlock); | |
3513 | ||
3514 | res = wait_for_completion_timeout(&release_done, 60 * HZ); | |
3515 | WARN_ON(res <= 0); | |
3516 | } | |
3517 | ||
a42d985b BVA |
3518 | /** |
3519 | * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB). | |
3520 | * | |
3521 | * A quote from RFC 4455 (SCSI-MIB) about this MIB object: | |
3522 | * This object represents an arbitrary integer used to uniquely identify a | |
3523 | * particular attached remote initiator port to a particular SCSI target port | |
3524 | * within a particular SCSI target device within a particular SCSI instance. | |
3525 | */ | |
3526 | static u32 srpt_sess_get_index(struct se_session *se_sess) | |
3527 | { | |
3528 | return 0; | |
3529 | } | |
3530 | ||
3531 | static void srpt_set_default_node_attrs(struct se_node_acl *nacl) | |
3532 | { | |
3533 | } | |
3534 | ||
3535 | static u32 srpt_get_task_tag(struct se_cmd *se_cmd) | |
3536 | { | |
3537 | struct srpt_send_ioctx *ioctx; | |
3538 | ||
3539 | ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); | |
3540 | return ioctx->tag; | |
3541 | } | |
3542 | ||
3543 | /* Note: only used from inside debug printk's by the TCM core. */ | |
3544 | static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) | |
3545 | { | |
3546 | struct srpt_send_ioctx *ioctx; | |
3547 | ||
3548 | ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); | |
3549 | return srpt_get_cmd_state(ioctx); | |
3550 | } | |
3551 | ||
a42d985b BVA |
3552 | /** |
3553 | * srpt_parse_i_port_id() - Parse an initiator port ID. | |
3554 | * @name: ASCII representation of a 128-bit initiator port ID. | |
3555 | * @i_port_id: Binary 128-bit port ID. | |
3556 | */ | |
3557 | static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) | |
3558 | { | |
3559 | const char *p; | |
3560 | unsigned len, count, leading_zero_bytes; | |
3561 | int ret, rc; | |
3562 | ||
3563 | p = name; | |
3564 | if (strnicmp(p, "0x", 2) == 0) | |
3565 | p += 2; | |
3566 | ret = -EINVAL; | |
3567 | len = strlen(p); | |
3568 | if (len % 2) | |
3569 | goto out; | |
3570 | count = min(len / 2, 16U); | |
3571 | leading_zero_bytes = 16 - count; | |
3572 | memset(i_port_id, 0, leading_zero_bytes); | |
3573 | rc = hex2bin(i_port_id + leading_zero_bytes, p, count); | |
3574 | if (rc < 0) | |
3575 | pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); | |
3576 | ret = 0; | |
3577 | out: | |
3578 | return ret; | |
3579 | } | |
3580 | ||
3581 | /* | |
3582 | * configfs callback function invoked for | |
3583 | * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id | |
3584 | */ | |
3585 | static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, | |
3586 | struct config_group *group, | |
3587 | const char *name) | |
3588 | { | |
3589 | struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); | |
3590 | struct se_node_acl *se_nacl, *se_nacl_new; | |
3591 | struct srpt_node_acl *nacl; | |
3592 | int ret = 0; | |
3593 | u32 nexus_depth = 1; | |
3594 | u8 i_port_id[16]; | |
3595 | ||
3596 | if (srpt_parse_i_port_id(i_port_id, name) < 0) { | |
3597 | printk(KERN_ERR "invalid initiator port ID %s\n", name); | |
3598 | ret = -EINVAL; | |
3599 | goto err; | |
3600 | } | |
3601 | ||
3602 | se_nacl_new = srpt_alloc_fabric_acl(tpg); | |
3603 | if (!se_nacl_new) { | |
3604 | ret = -ENOMEM; | |
3605 | goto err; | |
3606 | } | |
3607 | /* | |
3608 | * nacl_new may be released by core_tpg_add_initiator_node_acl() | |
3609 | * when converting a node ACL from demo mode to explict | |
3610 | */ | |
3611 | se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name, | |
3612 | nexus_depth); | |
3613 | if (IS_ERR(se_nacl)) { | |
3614 | ret = PTR_ERR(se_nacl); | |
3615 | goto err; | |
3616 | } | |
3617 | /* Locate our struct srpt_node_acl and set sdev and i_port_id. */ | |
3618 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | |
3619 | memcpy(&nacl->i_port_id[0], &i_port_id[0], 16); | |
3620 | nacl->sport = sport; | |
3621 | ||
3622 | spin_lock_irq(&sport->port_acl_lock); | |
3623 | list_add_tail(&nacl->list, &sport->port_acl_list); | |
3624 | spin_unlock_irq(&sport->port_acl_lock); | |
3625 | ||
3626 | return se_nacl; | |
3627 | err: | |
3628 | return ERR_PTR(ret); | |
3629 | } | |
3630 | ||
3631 | /* | |
3632 | * configfs callback function invoked for | |
3633 | * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id | |
3634 | */ | |
3635 | static void srpt_drop_nodeacl(struct se_node_acl *se_nacl) | |
3636 | { | |
3637 | struct srpt_node_acl *nacl; | |
3638 | struct srpt_device *sdev; | |
3639 | struct srpt_port *sport; | |
3640 | ||
3641 | nacl = container_of(se_nacl, struct srpt_node_acl, nacl); | |
3642 | sport = nacl->sport; | |
3643 | sdev = sport->sdev; | |
3644 | spin_lock_irq(&sport->port_acl_lock); | |
3645 | list_del(&nacl->list); | |
3646 | spin_unlock_irq(&sport->port_acl_lock); | |
3647 | core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1); | |
3648 | srpt_release_fabric_acl(NULL, se_nacl); | |
3649 | } | |
3650 | ||
3651 | static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size( | |
3652 | struct se_portal_group *se_tpg, | |
3653 | char *page) | |
3654 | { | |
3655 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3656 | ||
3657 | return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size); | |
3658 | } | |
3659 | ||
3660 | static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( | |
3661 | struct se_portal_group *se_tpg, | |
3662 | const char *page, | |
3663 | size_t count) | |
3664 | { | |
3665 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3666 | unsigned long val; | |
3667 | int ret; | |
3668 | ||
9d8abf45 | 3669 | ret = kstrtoul(page, 0, &val); |
a42d985b | 3670 | if (ret < 0) { |
9d8abf45 | 3671 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
a42d985b BVA |
3672 | return -EINVAL; |
3673 | } | |
3674 | if (val > MAX_SRPT_RDMA_SIZE) { | |
3675 | pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val, | |
3676 | MAX_SRPT_RDMA_SIZE); | |
3677 | return -EINVAL; | |
3678 | } | |
3679 | if (val < DEFAULT_MAX_RDMA_SIZE) { | |
3680 | pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n", | |
3681 | val, DEFAULT_MAX_RDMA_SIZE); | |
3682 | return -EINVAL; | |
3683 | } | |
3684 | sport->port_attrib.srp_max_rdma_size = val; | |
3685 | ||
3686 | return count; | |
3687 | } | |
3688 | ||
3689 | TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR); | |
3690 | ||
3691 | static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size( | |
3692 | struct se_portal_group *se_tpg, | |
3693 | char *page) | |
3694 | { | |
3695 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3696 | ||
3697 | return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size); | |
3698 | } | |
3699 | ||
3700 | static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( | |
3701 | struct se_portal_group *se_tpg, | |
3702 | const char *page, | |
3703 | size_t count) | |
3704 | { | |
3705 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3706 | unsigned long val; | |
3707 | int ret; | |
3708 | ||
9d8abf45 | 3709 | ret = kstrtoul(page, 0, &val); |
a42d985b | 3710 | if (ret < 0) { |
9d8abf45 | 3711 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
a42d985b BVA |
3712 | return -EINVAL; |
3713 | } | |
3714 | if (val > MAX_SRPT_RSP_SIZE) { | |
3715 | pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val, | |
3716 | MAX_SRPT_RSP_SIZE); | |
3717 | return -EINVAL; | |
3718 | } | |
3719 | if (val < MIN_MAX_RSP_SIZE) { | |
3720 | pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val, | |
3721 | MIN_MAX_RSP_SIZE); | |
3722 | return -EINVAL; | |
3723 | } | |
3724 | sport->port_attrib.srp_max_rsp_size = val; | |
3725 | ||
3726 | return count; | |
3727 | } | |
3728 | ||
3729 | TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR); | |
3730 | ||
3731 | static ssize_t srpt_tpg_attrib_show_srp_sq_size( | |
3732 | struct se_portal_group *se_tpg, | |
3733 | char *page) | |
3734 | { | |
3735 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3736 | ||
3737 | return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size); | |
3738 | } | |
3739 | ||
3740 | static ssize_t srpt_tpg_attrib_store_srp_sq_size( | |
3741 | struct se_portal_group *se_tpg, | |
3742 | const char *page, | |
3743 | size_t count) | |
3744 | { | |
3745 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3746 | unsigned long val; | |
3747 | int ret; | |
3748 | ||
9d8abf45 | 3749 | ret = kstrtoul(page, 0, &val); |
a42d985b | 3750 | if (ret < 0) { |
9d8abf45 | 3751 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
a42d985b BVA |
3752 | return -EINVAL; |
3753 | } | |
3754 | if (val > MAX_SRPT_SRQ_SIZE) { | |
3755 | pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val, | |
3756 | MAX_SRPT_SRQ_SIZE); | |
3757 | return -EINVAL; | |
3758 | } | |
3759 | if (val < MIN_SRPT_SRQ_SIZE) { | |
3760 | pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val, | |
3761 | MIN_SRPT_SRQ_SIZE); | |
3762 | return -EINVAL; | |
3763 | } | |
3764 | sport->port_attrib.srp_sq_size = val; | |
3765 | ||
3766 | return count; | |
3767 | } | |
3768 | ||
3769 | TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR); | |
3770 | ||
3771 | static struct configfs_attribute *srpt_tpg_attrib_attrs[] = { | |
3772 | &srpt_tpg_attrib_srp_max_rdma_size.attr, | |
3773 | &srpt_tpg_attrib_srp_max_rsp_size.attr, | |
3774 | &srpt_tpg_attrib_srp_sq_size.attr, | |
3775 | NULL, | |
3776 | }; | |
3777 | ||
3778 | static ssize_t srpt_tpg_show_enable( | |
3779 | struct se_portal_group *se_tpg, | |
3780 | char *page) | |
3781 | { | |
3782 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3783 | ||
3784 | return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0); | |
3785 | } | |
3786 | ||
3787 | static ssize_t srpt_tpg_store_enable( | |
3788 | struct se_portal_group *se_tpg, | |
3789 | const char *page, | |
3790 | size_t count) | |
3791 | { | |
3792 | struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); | |
3793 | unsigned long tmp; | |
3794 | int ret; | |
3795 | ||
9d8abf45 | 3796 | ret = kstrtoul(page, 0, &tmp); |
a42d985b BVA |
3797 | if (ret < 0) { |
3798 | printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); | |
3799 | return -EINVAL; | |
3800 | } | |
3801 | ||
3802 | if ((tmp != 0) && (tmp != 1)) { | |
3803 | printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp); | |
3804 | return -EINVAL; | |
3805 | } | |
3806 | if (tmp == 1) | |
3807 | sport->enabled = true; | |
3808 | else | |
3809 | sport->enabled = false; | |
3810 | ||
3811 | return count; | |
3812 | } | |
3813 | ||
3814 | TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR); | |
3815 | ||
3816 | static struct configfs_attribute *srpt_tpg_attrs[] = { | |
3817 | &srpt_tpg_enable.attr, | |
3818 | NULL, | |
3819 | }; | |
3820 | ||
3821 | /** | |
3822 | * configfs callback invoked for | |
3823 | * mkdir /sys/kernel/config/target/$driver/$port/$tpg | |
3824 | */ | |
3825 | static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, | |
3826 | struct config_group *group, | |
3827 | const char *name) | |
3828 | { | |
3829 | struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); | |
3830 | int res; | |
3831 | ||
3832 | /* Initialize sport->port_wwn and sport->port_tpg_1 */ | |
3833 | res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn, | |
3834 | &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); | |
3835 | if (res) | |
3836 | return ERR_PTR(res); | |
3837 | ||
3838 | return &sport->port_tpg_1; | |
3839 | } | |
3840 | ||
3841 | /** | |
3842 | * configfs callback invoked for | |
3843 | * rmdir /sys/kernel/config/target/$driver/$port/$tpg | |
3844 | */ | |
3845 | static void srpt_drop_tpg(struct se_portal_group *tpg) | |
3846 | { | |
3847 | struct srpt_port *sport = container_of(tpg, | |
3848 | struct srpt_port, port_tpg_1); | |
3849 | ||
3850 | sport->enabled = false; | |
3851 | core_tpg_deregister(&sport->port_tpg_1); | |
3852 | } | |
3853 | ||
3854 | /** | |
3855 | * configfs callback invoked for | |
3856 | * mkdir /sys/kernel/config/target/$driver/$port | |
3857 | */ | |
3858 | static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, | |
3859 | struct config_group *group, | |
3860 | const char *name) | |
3861 | { | |
3862 | struct srpt_port *sport; | |
3863 | int ret; | |
3864 | ||
3865 | sport = srpt_lookup_port(name); | |
3866 | pr_debug("make_tport(%s)\n", name); | |
3867 | ret = -EINVAL; | |
3868 | if (!sport) | |
3869 | goto err; | |
3870 | ||
3871 | return &sport->port_wwn; | |
3872 | ||
3873 | err: | |
3874 | return ERR_PTR(ret); | |
3875 | } | |
3876 | ||
3877 | /** | |
3878 | * configfs callback invoked for | |
3879 | * rmdir /sys/kernel/config/target/$driver/$port | |
3880 | */ | |
3881 | static void srpt_drop_tport(struct se_wwn *wwn) | |
3882 | { | |
3883 | struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); | |
3884 | ||
3885 | pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item)); | |
3886 | } | |
3887 | ||
3888 | static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf, | |
3889 | char *buf) | |
3890 | { | |
3891 | return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); | |
3892 | } | |
3893 | ||
3894 | TF_WWN_ATTR_RO(srpt, version); | |
3895 | ||
3896 | static struct configfs_attribute *srpt_wwn_attrs[] = { | |
3897 | &srpt_wwn_version.attr, | |
3898 | NULL, | |
3899 | }; | |
3900 | ||
3901 | static struct target_core_fabric_ops srpt_template = { | |
3902 | .get_fabric_name = srpt_get_fabric_name, | |
3903 | .get_fabric_proto_ident = srpt_get_fabric_proto_ident, | |
3904 | .tpg_get_wwn = srpt_get_fabric_wwn, | |
3905 | .tpg_get_tag = srpt_get_tag, | |
3906 | .tpg_get_default_depth = srpt_get_default_depth, | |
3907 | .tpg_get_pr_transport_id = srpt_get_pr_transport_id, | |
3908 | .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len, | |
3909 | .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id, | |
3910 | .tpg_check_demo_mode = srpt_check_false, | |
3911 | .tpg_check_demo_mode_cache = srpt_check_true, | |
3912 | .tpg_check_demo_mode_write_protect = srpt_check_true, | |
3913 | .tpg_check_prod_mode_write_protect = srpt_check_false, | |
3914 | .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl, | |
3915 | .tpg_release_fabric_acl = srpt_release_fabric_acl, | |
3916 | .tpg_get_inst_index = srpt_tpg_get_inst_index, | |
3917 | .release_cmd = srpt_release_cmd, | |
3918 | .check_stop_free = srpt_check_stop_free, | |
3919 | .shutdown_session = srpt_shutdown_session, | |
3920 | .close_session = srpt_close_session, | |
a42d985b BVA |
3921 | .sess_get_index = srpt_sess_get_index, |
3922 | .sess_get_initiator_sid = NULL, | |
3923 | .write_pending = srpt_write_pending, | |
3924 | .write_pending_status = srpt_write_pending_status, | |
3925 | .set_default_node_attributes = srpt_set_default_node_attrs, | |
3926 | .get_task_tag = srpt_get_task_tag, | |
3927 | .get_cmd_state = srpt_get_tcm_cmd_state, | |
b79fafac | 3928 | .queue_data_in = srpt_queue_data_in, |
a42d985b | 3929 | .queue_status = srpt_queue_status, |
b79fafac | 3930 | .queue_tm_rsp = srpt_queue_tm_rsp, |
a42d985b BVA |
3931 | /* |
3932 | * Setup function pointers for generic logic in | |
3933 | * target_core_fabric_configfs.c | |
3934 | */ | |
3935 | .fabric_make_wwn = srpt_make_tport, | |
3936 | .fabric_drop_wwn = srpt_drop_tport, | |
3937 | .fabric_make_tpg = srpt_make_tpg, | |
3938 | .fabric_drop_tpg = srpt_drop_tpg, | |
3939 | .fabric_post_link = NULL, | |
3940 | .fabric_pre_unlink = NULL, | |
3941 | .fabric_make_np = NULL, | |
3942 | .fabric_drop_np = NULL, | |
3943 | .fabric_make_nodeacl = srpt_make_nodeacl, | |
3944 | .fabric_drop_nodeacl = srpt_drop_nodeacl, | |
3945 | }; | |
3946 | ||
3947 | /** | |
3948 | * srpt_init_module() - Kernel module initialization. | |
3949 | * | |
3950 | * Note: Since ib_register_client() registers callback functions, and since at | |
3951 | * least one of these callback functions (srpt_add_one()) calls target core | |
3952 | * functions, this driver must be registered with the target core before | |
3953 | * ib_register_client() is called. | |
3954 | */ | |
3955 | static int __init srpt_init_module(void) | |
3956 | { | |
3957 | int ret; | |
3958 | ||
3959 | ret = -EINVAL; | |
3960 | if (srp_max_req_size < MIN_MAX_REQ_SIZE) { | |
3961 | printk(KERN_ERR "invalid value %d for kernel module parameter" | |
3962 | " srp_max_req_size -- must be at least %d.\n", | |
3963 | srp_max_req_size, MIN_MAX_REQ_SIZE); | |
3964 | goto out; | |
3965 | } | |
3966 | ||
3967 | if (srpt_srq_size < MIN_SRPT_SRQ_SIZE | |
3968 | || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { | |
3969 | printk(KERN_ERR "invalid value %d for kernel module parameter" | |
3970 | " srpt_srq_size -- must be in the range [%d..%d].\n", | |
3971 | srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); | |
3972 | goto out; | |
3973 | } | |
3974 | ||
a42d985b | 3975 | srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt"); |
3af33637 | 3976 | if (IS_ERR(srpt_target)) { |
a42d985b | 3977 | printk(KERN_ERR "couldn't register\n"); |
3af33637 | 3978 | ret = PTR_ERR(srpt_target); |
a42d985b BVA |
3979 | goto out; |
3980 | } | |
3981 | ||
3982 | srpt_target->tf_ops = srpt_template; | |
3983 | ||
a42d985b BVA |
3984 | /* |
3985 | * Set up default attribute lists. | |
3986 | */ | |
3987 | srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs; | |
3988 | srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs; | |
3989 | srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs; | |
3990 | srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | |
3991 | srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | |
3992 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; | |
3993 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; | |
3994 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; | |
3995 | srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; | |
3996 | ||
3997 | ret = target_fabric_configfs_register(srpt_target); | |
3998 | if (ret < 0) { | |
3999 | printk(KERN_ERR "couldn't register\n"); | |
4000 | goto out_free_target; | |
4001 | } | |
4002 | ||
4003 | ret = ib_register_client(&srpt_client); | |
4004 | if (ret) { | |
4005 | printk(KERN_ERR "couldn't register IB client\n"); | |
4006 | goto out_unregister_target; | |
4007 | } | |
4008 | ||
4009 | return 0; | |
4010 | ||
4011 | out_unregister_target: | |
4012 | target_fabric_configfs_deregister(srpt_target); | |
4013 | srpt_target = NULL; | |
4014 | out_free_target: | |
4015 | if (srpt_target) | |
4016 | target_fabric_configfs_free(srpt_target); | |
4017 | out: | |
4018 | return ret; | |
4019 | } | |
4020 | ||
4021 | static void __exit srpt_cleanup_module(void) | |
4022 | { | |
4023 | ib_unregister_client(&srpt_client); | |
4024 | target_fabric_configfs_deregister(srpt_target); | |
4025 | srpt_target = NULL; | |
4026 | } | |
4027 | ||
4028 | module_init(srpt_init_module); | |
4029 | module_exit(srpt_cleanup_module); |