Commit | Line | Data |
---|---|---|
a07b4970 CH |
1 | /* |
2 | * Common code for the NVMe target. | |
3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | #include <linux/module.h> | |
28b89118 | 16 | #include <linux/random.h> |
a07b4970 CH |
17 | #include "nvmet.h" |
18 | ||
19 | static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; | |
20 | ||
21 | /* | |
22 | * This read/write semaphore is used to synchronize access to configuration | |
23 | * information on a target system that will result in discovery log page | |
24 | * information change for at least one host. | |
25 | * The full list of resources to protected by this semaphore is: | |
26 | * | |
27 | * - subsystems list | |
28 | * - per-subsystem allowed hosts list | |
29 | * - allow_any_host subsystem attribute | |
30 | * - nvmet_genctr | |
31 | * - the nvmet_transports array | |
32 | * | |
33 | * When updating any of those lists/structures write lock should be obtained, | |
34 | * while when reading (popolating discovery log page or checking host-subsystem | |
35 | * link) read lock is obtained to allow concurrent reads. | |
36 | */ | |
37 | DECLARE_RWSEM(nvmet_config_sem); | |
38 | ||
39 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, | |
40 | const char *subsysnqn); | |
41 | ||
42 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, | |
43 | size_t len) | |
44 | { | |
45 | if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) | |
46 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; | |
47 | return 0; | |
48 | } | |
49 | ||
50 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) | |
51 | { | |
52 | if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) | |
53 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; | |
54 | return 0; | |
55 | } | |
56 | ||
57 | static u32 nvmet_async_event_result(struct nvmet_async_event *aen) | |
58 | { | |
59 | return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); | |
60 | } | |
61 | ||
62 | static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) | |
63 | { | |
64 | struct nvmet_req *req; | |
65 | ||
66 | while (1) { | |
67 | mutex_lock(&ctrl->lock); | |
68 | if (!ctrl->nr_async_event_cmds) { | |
69 | mutex_unlock(&ctrl->lock); | |
70 | return; | |
71 | } | |
72 | ||
73 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; | |
74 | mutex_unlock(&ctrl->lock); | |
75 | nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); | |
76 | } | |
77 | } | |
78 | ||
79 | static void nvmet_async_event_work(struct work_struct *work) | |
80 | { | |
81 | struct nvmet_ctrl *ctrl = | |
82 | container_of(work, struct nvmet_ctrl, async_event_work); | |
83 | struct nvmet_async_event *aen; | |
84 | struct nvmet_req *req; | |
85 | ||
86 | while (1) { | |
87 | mutex_lock(&ctrl->lock); | |
88 | aen = list_first_entry_or_null(&ctrl->async_events, | |
89 | struct nvmet_async_event, entry); | |
90 | if (!aen || !ctrl->nr_async_event_cmds) { | |
91 | mutex_unlock(&ctrl->lock); | |
92 | return; | |
93 | } | |
94 | ||
95 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; | |
96 | nvmet_set_result(req, nvmet_async_event_result(aen)); | |
97 | ||
98 | list_del(&aen->entry); | |
99 | kfree(aen); | |
100 | ||
101 | mutex_unlock(&ctrl->lock); | |
102 | nvmet_req_complete(req, 0); | |
103 | } | |
104 | } | |
105 | ||
106 | static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, | |
107 | u8 event_info, u8 log_page) | |
108 | { | |
109 | struct nvmet_async_event *aen; | |
110 | ||
111 | aen = kmalloc(sizeof(*aen), GFP_KERNEL); | |
112 | if (!aen) | |
113 | return; | |
114 | ||
115 | aen->event_type = event_type; | |
116 | aen->event_info = event_info; | |
117 | aen->log_page = log_page; | |
118 | ||
119 | mutex_lock(&ctrl->lock); | |
120 | list_add_tail(&aen->entry, &ctrl->async_events); | |
121 | mutex_unlock(&ctrl->lock); | |
122 | ||
123 | schedule_work(&ctrl->async_event_work); | |
124 | } | |
125 | ||
126 | int nvmet_register_transport(struct nvmet_fabrics_ops *ops) | |
127 | { | |
128 | int ret = 0; | |
129 | ||
130 | down_write(&nvmet_config_sem); | |
131 | if (nvmet_transports[ops->type]) | |
132 | ret = -EINVAL; | |
133 | else | |
134 | nvmet_transports[ops->type] = ops; | |
135 | up_write(&nvmet_config_sem); | |
136 | ||
137 | return ret; | |
138 | } | |
139 | EXPORT_SYMBOL_GPL(nvmet_register_transport); | |
140 | ||
141 | void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops) | |
142 | { | |
143 | down_write(&nvmet_config_sem); | |
144 | nvmet_transports[ops->type] = NULL; | |
145 | up_write(&nvmet_config_sem); | |
146 | } | |
147 | EXPORT_SYMBOL_GPL(nvmet_unregister_transport); | |
148 | ||
149 | int nvmet_enable_port(struct nvmet_port *port) | |
150 | { | |
151 | struct nvmet_fabrics_ops *ops; | |
152 | int ret; | |
153 | ||
154 | lockdep_assert_held(&nvmet_config_sem); | |
155 | ||
156 | ops = nvmet_transports[port->disc_addr.trtype]; | |
157 | if (!ops) { | |
158 | up_write(&nvmet_config_sem); | |
159 | request_module("nvmet-transport-%d", port->disc_addr.trtype); | |
160 | down_write(&nvmet_config_sem); | |
161 | ops = nvmet_transports[port->disc_addr.trtype]; | |
162 | if (!ops) { | |
163 | pr_err("transport type %d not supported\n", | |
164 | port->disc_addr.trtype); | |
165 | return -EINVAL; | |
166 | } | |
167 | } | |
168 | ||
169 | if (!try_module_get(ops->owner)) | |
170 | return -EINVAL; | |
171 | ||
172 | ret = ops->add_port(port); | |
173 | if (ret) { | |
174 | module_put(ops->owner); | |
175 | return ret; | |
176 | } | |
177 | ||
178 | port->enabled = true; | |
179 | return 0; | |
180 | } | |
181 | ||
182 | void nvmet_disable_port(struct nvmet_port *port) | |
183 | { | |
184 | struct nvmet_fabrics_ops *ops; | |
185 | ||
186 | lockdep_assert_held(&nvmet_config_sem); | |
187 | ||
188 | port->enabled = false; | |
189 | ||
190 | ops = nvmet_transports[port->disc_addr.trtype]; | |
191 | ops->remove_port(port); | |
192 | module_put(ops->owner); | |
193 | } | |
194 | ||
195 | static void nvmet_keep_alive_timer(struct work_struct *work) | |
196 | { | |
197 | struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), | |
198 | struct nvmet_ctrl, ka_work); | |
199 | ||
200 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", | |
201 | ctrl->cntlid, ctrl->kato); | |
202 | ||
203 | ctrl->ops->delete_ctrl(ctrl); | |
204 | } | |
205 | ||
206 | static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) | |
207 | { | |
208 | pr_debug("ctrl %d start keep-alive timer for %d secs\n", | |
209 | ctrl->cntlid, ctrl->kato); | |
210 | ||
211 | INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); | |
212 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | |
213 | } | |
214 | ||
215 | static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) | |
216 | { | |
217 | pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); | |
218 | ||
219 | cancel_delayed_work_sync(&ctrl->ka_work); | |
220 | } | |
221 | ||
222 | static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl, | |
223 | __le32 nsid) | |
224 | { | |
225 | struct nvmet_ns *ns; | |
226 | ||
227 | list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { | |
228 | if (ns->nsid == le32_to_cpu(nsid)) | |
229 | return ns; | |
230 | } | |
231 | ||
232 | return NULL; | |
233 | } | |
234 | ||
235 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) | |
236 | { | |
237 | struct nvmet_ns *ns; | |
238 | ||
239 | rcu_read_lock(); | |
240 | ns = __nvmet_find_namespace(ctrl, nsid); | |
241 | if (ns) | |
242 | percpu_ref_get(&ns->ref); | |
243 | rcu_read_unlock(); | |
244 | ||
245 | return ns; | |
246 | } | |
247 | ||
248 | static void nvmet_destroy_namespace(struct percpu_ref *ref) | |
249 | { | |
250 | struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); | |
251 | ||
252 | complete(&ns->disable_done); | |
253 | } | |
254 | ||
255 | void nvmet_put_namespace(struct nvmet_ns *ns) | |
256 | { | |
257 | percpu_ref_put(&ns->ref); | |
258 | } | |
259 | ||
260 | int nvmet_ns_enable(struct nvmet_ns *ns) | |
261 | { | |
262 | struct nvmet_subsys *subsys = ns->subsys; | |
263 | struct nvmet_ctrl *ctrl; | |
264 | int ret = 0; | |
265 | ||
266 | mutex_lock(&subsys->lock); | |
267 | if (!list_empty(&ns->dev_link)) | |
268 | goto out_unlock; | |
269 | ||
270 | ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE, | |
271 | NULL); | |
272 | if (IS_ERR(ns->bdev)) { | |
273 | pr_err("nvmet: failed to open block device %s: (%ld)\n", | |
274 | ns->device_path, PTR_ERR(ns->bdev)); | |
275 | ret = PTR_ERR(ns->bdev); | |
276 | ns->bdev = NULL; | |
277 | goto out_unlock; | |
278 | } | |
279 | ||
280 | ns->size = i_size_read(ns->bdev->bd_inode); | |
281 | ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); | |
282 | ||
283 | ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, | |
284 | 0, GFP_KERNEL); | |
285 | if (ret) | |
286 | goto out_blkdev_put; | |
287 | ||
288 | if (ns->nsid > subsys->max_nsid) | |
289 | subsys->max_nsid = ns->nsid; | |
290 | ||
291 | /* | |
292 | * The namespaces list needs to be sorted to simplify the implementation | |
293 | * of the Identify Namepace List subcommand. | |
294 | */ | |
295 | if (list_empty(&subsys->namespaces)) { | |
296 | list_add_tail_rcu(&ns->dev_link, &subsys->namespaces); | |
297 | } else { | |
298 | struct nvmet_ns *old; | |
299 | ||
300 | list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { | |
301 | BUG_ON(ns->nsid == old->nsid); | |
302 | if (ns->nsid < old->nsid) | |
303 | break; | |
304 | } | |
305 | ||
306 | list_add_tail_rcu(&ns->dev_link, &old->dev_link); | |
307 | } | |
308 | ||
309 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
310 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); | |
311 | ||
312 | ret = 0; | |
313 | out_unlock: | |
314 | mutex_unlock(&subsys->lock); | |
315 | return ret; | |
316 | out_blkdev_put: | |
317 | blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); | |
318 | ns->bdev = NULL; | |
319 | goto out_unlock; | |
320 | } | |
321 | ||
322 | void nvmet_ns_disable(struct nvmet_ns *ns) | |
323 | { | |
324 | struct nvmet_subsys *subsys = ns->subsys; | |
325 | struct nvmet_ctrl *ctrl; | |
326 | ||
327 | mutex_lock(&subsys->lock); | |
328 | if (list_empty(&ns->dev_link)) { | |
329 | mutex_unlock(&subsys->lock); | |
330 | return; | |
331 | } | |
332 | list_del_init(&ns->dev_link); | |
333 | mutex_unlock(&subsys->lock); | |
334 | ||
335 | /* | |
336 | * Now that we removed the namespaces from the lookup list, we | |
337 | * can kill the per_cpu ref and wait for any remaining references | |
338 | * to be dropped, as well as a RCU grace period for anyone only | |
339 | * using the namepace under rcu_read_lock(). Note that we can't | |
340 | * use call_rcu here as we need to ensure the namespaces have | |
341 | * been fully destroyed before unloading the module. | |
342 | */ | |
343 | percpu_ref_kill(&ns->ref); | |
344 | synchronize_rcu(); | |
345 | wait_for_completion(&ns->disable_done); | |
346 | percpu_ref_exit(&ns->ref); | |
347 | ||
348 | mutex_lock(&subsys->lock); | |
349 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | |
350 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); | |
351 | ||
352 | if (ns->bdev) | |
353 | blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); | |
354 | mutex_unlock(&subsys->lock); | |
355 | } | |
356 | ||
357 | void nvmet_ns_free(struct nvmet_ns *ns) | |
358 | { | |
359 | nvmet_ns_disable(ns); | |
360 | ||
361 | kfree(ns->device_path); | |
362 | kfree(ns); | |
363 | } | |
364 | ||
365 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) | |
366 | { | |
367 | struct nvmet_ns *ns; | |
368 | ||
369 | ns = kzalloc(sizeof(*ns), GFP_KERNEL); | |
370 | if (!ns) | |
371 | return NULL; | |
372 | ||
373 | INIT_LIST_HEAD(&ns->dev_link); | |
374 | init_completion(&ns->disable_done); | |
375 | ||
376 | ns->nsid = nsid; | |
377 | ns->subsys = subsys; | |
378 | ||
379 | return ns; | |
380 | } | |
381 | ||
382 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) | |
383 | { | |
384 | if (status) | |
385 | nvmet_set_status(req, status); | |
386 | ||
387 | /* XXX: need to fill in something useful for sq_head */ | |
388 | req->rsp->sq_head = 0; | |
389 | if (likely(req->sq)) /* may happen during early failure */ | |
390 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); | |
391 | req->rsp->command_id = req->cmd->common.command_id; | |
392 | ||
393 | if (req->ns) | |
394 | nvmet_put_namespace(req->ns); | |
395 | req->ops->queue_response(req); | |
396 | } | |
397 | ||
398 | void nvmet_req_complete(struct nvmet_req *req, u16 status) | |
399 | { | |
400 | __nvmet_req_complete(req, status); | |
401 | percpu_ref_put(&req->sq->ref); | |
402 | } | |
403 | EXPORT_SYMBOL_GPL(nvmet_req_complete); | |
404 | ||
405 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, | |
406 | u16 qid, u16 size) | |
407 | { | |
408 | cq->qid = qid; | |
409 | cq->size = size; | |
410 | ||
411 | ctrl->cqs[qid] = cq; | |
412 | } | |
413 | ||
414 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, | |
415 | u16 qid, u16 size) | |
416 | { | |
417 | sq->qid = qid; | |
418 | sq->size = size; | |
419 | ||
420 | ctrl->sqs[qid] = sq; | |
421 | } | |
422 | ||
423 | void nvmet_sq_destroy(struct nvmet_sq *sq) | |
424 | { | |
425 | /* | |
426 | * If this is the admin queue, complete all AERs so that our | |
427 | * queue doesn't have outstanding requests on it. | |
428 | */ | |
429 | if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) | |
430 | nvmet_async_events_free(sq->ctrl); | |
431 | percpu_ref_kill(&sq->ref); | |
432 | wait_for_completion(&sq->free_done); | |
433 | percpu_ref_exit(&sq->ref); | |
434 | ||
435 | if (sq->ctrl) { | |
436 | nvmet_ctrl_put(sq->ctrl); | |
437 | sq->ctrl = NULL; /* allows reusing the queue later */ | |
438 | } | |
439 | } | |
440 | EXPORT_SYMBOL_GPL(nvmet_sq_destroy); | |
441 | ||
442 | static void nvmet_sq_free(struct percpu_ref *ref) | |
443 | { | |
444 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); | |
445 | ||
446 | complete(&sq->free_done); | |
447 | } | |
448 | ||
449 | int nvmet_sq_init(struct nvmet_sq *sq) | |
450 | { | |
451 | int ret; | |
452 | ||
453 | ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); | |
454 | if (ret) { | |
455 | pr_err("percpu_ref init failed!\n"); | |
456 | return ret; | |
457 | } | |
458 | init_completion(&sq->free_done); | |
459 | ||
460 | return 0; | |
461 | } | |
462 | EXPORT_SYMBOL_GPL(nvmet_sq_init); | |
463 | ||
464 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, | |
465 | struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops) | |
466 | { | |
467 | u8 flags = req->cmd->common.flags; | |
468 | u16 status; | |
469 | ||
470 | req->cq = cq; | |
471 | req->sq = sq; | |
472 | req->ops = ops; | |
473 | req->sg = NULL; | |
474 | req->sg_cnt = 0; | |
475 | req->rsp->status = 0; | |
476 | ||
477 | /* no support for fused commands yet */ | |
478 | if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { | |
479 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
480 | goto fail; | |
481 | } | |
482 | ||
483 | /* either variant of SGLs is fine, as we don't support metadata */ | |
484 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && | |
485 | (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { | |
486 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
487 | goto fail; | |
488 | } | |
489 | ||
490 | if (unlikely(!req->sq->ctrl)) | |
491 | /* will return an error for any Non-connect command: */ | |
492 | status = nvmet_parse_connect_cmd(req); | |
493 | else if (likely(req->sq->qid != 0)) | |
494 | status = nvmet_parse_io_cmd(req); | |
495 | else if (req->cmd->common.opcode == nvme_fabrics_command) | |
496 | status = nvmet_parse_fabrics_cmd(req); | |
497 | else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) | |
498 | status = nvmet_parse_discovery_cmd(req); | |
499 | else | |
500 | status = nvmet_parse_admin_cmd(req); | |
501 | ||
502 | if (status) | |
503 | goto fail; | |
504 | ||
505 | if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { | |
506 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
507 | goto fail; | |
508 | } | |
509 | ||
510 | return true; | |
511 | ||
512 | fail: | |
513 | __nvmet_req_complete(req, status); | |
514 | return false; | |
515 | } | |
516 | EXPORT_SYMBOL_GPL(nvmet_req_init); | |
517 | ||
518 | static inline bool nvmet_cc_en(u32 cc) | |
519 | { | |
520 | return cc & 0x1; | |
521 | } | |
522 | ||
523 | static inline u8 nvmet_cc_css(u32 cc) | |
524 | { | |
525 | return (cc >> 4) & 0x7; | |
526 | } | |
527 | ||
528 | static inline u8 nvmet_cc_mps(u32 cc) | |
529 | { | |
530 | return (cc >> 7) & 0xf; | |
531 | } | |
532 | ||
533 | static inline u8 nvmet_cc_ams(u32 cc) | |
534 | { | |
535 | return (cc >> 11) & 0x7; | |
536 | } | |
537 | ||
538 | static inline u8 nvmet_cc_shn(u32 cc) | |
539 | { | |
540 | return (cc >> 14) & 0x3; | |
541 | } | |
542 | ||
543 | static inline u8 nvmet_cc_iosqes(u32 cc) | |
544 | { | |
545 | return (cc >> 16) & 0xf; | |
546 | } | |
547 | ||
548 | static inline u8 nvmet_cc_iocqes(u32 cc) | |
549 | { | |
550 | return (cc >> 20) & 0xf; | |
551 | } | |
552 | ||
553 | static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) | |
554 | { | |
555 | lockdep_assert_held(&ctrl->lock); | |
556 | ||
557 | if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || | |
558 | nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || | |
559 | nvmet_cc_mps(ctrl->cc) != 0 || | |
560 | nvmet_cc_ams(ctrl->cc) != 0 || | |
561 | nvmet_cc_css(ctrl->cc) != 0) { | |
562 | ctrl->csts = NVME_CSTS_CFS; | |
563 | return; | |
564 | } | |
565 | ||
566 | ctrl->csts = NVME_CSTS_RDY; | |
567 | } | |
568 | ||
569 | static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) | |
570 | { | |
571 | lockdep_assert_held(&ctrl->lock); | |
572 | ||
573 | /* XXX: tear down queues? */ | |
574 | ctrl->csts &= ~NVME_CSTS_RDY; | |
575 | ctrl->cc = 0; | |
576 | } | |
577 | ||
578 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) | |
579 | { | |
580 | u32 old; | |
581 | ||
582 | mutex_lock(&ctrl->lock); | |
583 | old = ctrl->cc; | |
584 | ctrl->cc = new; | |
585 | ||
586 | if (nvmet_cc_en(new) && !nvmet_cc_en(old)) | |
587 | nvmet_start_ctrl(ctrl); | |
588 | if (!nvmet_cc_en(new) && nvmet_cc_en(old)) | |
589 | nvmet_clear_ctrl(ctrl); | |
590 | if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) { | |
591 | nvmet_clear_ctrl(ctrl); | |
592 | ctrl->csts |= NVME_CSTS_SHST_CMPLT; | |
593 | } | |
594 | if (!nvmet_cc_shn(new) && nvmet_cc_shn(old)) | |
595 | ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; | |
596 | mutex_unlock(&ctrl->lock); | |
597 | } | |
598 | ||
599 | static void nvmet_init_cap(struct nvmet_ctrl *ctrl) | |
600 | { | |
601 | /* command sets supported: NVMe command set: */ | |
602 | ctrl->cap = (1ULL << 37); | |
603 | /* CC.EN timeout in 500msec units: */ | |
604 | ctrl->cap |= (15ULL << 24); | |
605 | /* maximum queue entries supported: */ | |
606 | ctrl->cap |= NVMET_QUEUE_SIZE - 1; | |
607 | } | |
608 | ||
609 | u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, | |
610 | struct nvmet_req *req, struct nvmet_ctrl **ret) | |
611 | { | |
612 | struct nvmet_subsys *subsys; | |
613 | struct nvmet_ctrl *ctrl; | |
614 | u16 status = 0; | |
615 | ||
616 | subsys = nvmet_find_get_subsys(req->port, subsysnqn); | |
617 | if (!subsys) { | |
618 | pr_warn("connect request for invalid subsystem %s!\n", | |
619 | subsysnqn); | |
620 | req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); | |
621 | return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
622 | } | |
623 | ||
624 | mutex_lock(&subsys->lock); | |
625 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { | |
626 | if (ctrl->cntlid == cntlid) { | |
627 | if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { | |
628 | pr_warn("hostnqn mismatch.\n"); | |
629 | continue; | |
630 | } | |
631 | if (!kref_get_unless_zero(&ctrl->ref)) | |
632 | continue; | |
633 | ||
634 | *ret = ctrl; | |
635 | goto out; | |
636 | } | |
637 | } | |
638 | ||
639 | pr_warn("could not find controller %d for subsys %s / host %s\n", | |
640 | cntlid, subsysnqn, hostnqn); | |
641 | req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid); | |
642 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
643 | ||
644 | out: | |
645 | mutex_unlock(&subsys->lock); | |
646 | nvmet_subsys_put(subsys); | |
647 | return status; | |
648 | } | |
649 | ||
650 | static bool __nvmet_host_allowed(struct nvmet_subsys *subsys, | |
651 | const char *hostnqn) | |
652 | { | |
653 | struct nvmet_host_link *p; | |
654 | ||
655 | if (subsys->allow_any_host) | |
656 | return true; | |
657 | ||
658 | list_for_each_entry(p, &subsys->hosts, entry) { | |
659 | if (!strcmp(nvmet_host_name(p->host), hostnqn)) | |
660 | return true; | |
661 | } | |
662 | ||
663 | return false; | |
664 | } | |
665 | ||
666 | static bool nvmet_host_discovery_allowed(struct nvmet_req *req, | |
667 | const char *hostnqn) | |
668 | { | |
669 | struct nvmet_subsys_link *s; | |
670 | ||
671 | list_for_each_entry(s, &req->port->subsystems, entry) { | |
672 | if (__nvmet_host_allowed(s->subsys, hostnqn)) | |
673 | return true; | |
674 | } | |
675 | ||
676 | return false; | |
677 | } | |
678 | ||
679 | bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, | |
680 | const char *hostnqn) | |
681 | { | |
682 | lockdep_assert_held(&nvmet_config_sem); | |
683 | ||
684 | if (subsys->type == NVME_NQN_DISC) | |
685 | return nvmet_host_discovery_allowed(req, hostnqn); | |
686 | else | |
687 | return __nvmet_host_allowed(subsys, hostnqn); | |
688 | } | |
689 | ||
690 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, | |
691 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) | |
692 | { | |
693 | struct nvmet_subsys *subsys; | |
694 | struct nvmet_ctrl *ctrl; | |
695 | int ret; | |
696 | u16 status; | |
697 | ||
698 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
699 | subsys = nvmet_find_get_subsys(req->port, subsysnqn); | |
700 | if (!subsys) { | |
701 | pr_warn("connect request for invalid subsystem %s!\n", | |
702 | subsysnqn); | |
703 | req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); | |
704 | goto out; | |
705 | } | |
706 | ||
707 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | |
708 | down_read(&nvmet_config_sem); | |
709 | if (!nvmet_host_allowed(req, subsys, hostnqn)) { | |
710 | pr_info("connect by host %s for subsystem %s not allowed\n", | |
711 | hostnqn, subsysnqn); | |
712 | req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn); | |
713 | up_read(&nvmet_config_sem); | |
714 | goto out_put_subsystem; | |
715 | } | |
716 | up_read(&nvmet_config_sem); | |
717 | ||
718 | status = NVME_SC_INTERNAL; | |
719 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | |
720 | if (!ctrl) | |
721 | goto out_put_subsystem; | |
722 | mutex_init(&ctrl->lock); | |
723 | ||
724 | nvmet_init_cap(ctrl); | |
725 | ||
726 | INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); | |
727 | INIT_LIST_HEAD(&ctrl->async_events); | |
728 | ||
729 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); | |
730 | memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); | |
731 | ||
28b89118 SG |
732 | /* generate a random serial number as our controllers are ephemeral: */ |
733 | get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); | |
734 | ||
a07b4970 CH |
735 | kref_init(&ctrl->ref); |
736 | ctrl->subsys = subsys; | |
737 | ||
738 | ctrl->cqs = kcalloc(subsys->max_qid + 1, | |
739 | sizeof(struct nvmet_cq *), | |
740 | GFP_KERNEL); | |
741 | if (!ctrl->cqs) | |
742 | goto out_free_ctrl; | |
743 | ||
744 | ctrl->sqs = kcalloc(subsys->max_qid + 1, | |
745 | sizeof(struct nvmet_sq *), | |
746 | GFP_KERNEL); | |
747 | if (!ctrl->sqs) | |
748 | goto out_free_cqs; | |
749 | ||
750 | ret = ida_simple_get(&subsys->cntlid_ida, | |
751 | NVME_CNTLID_MIN, NVME_CNTLID_MAX, | |
752 | GFP_KERNEL); | |
753 | if (ret < 0) { | |
754 | status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; | |
755 | goto out_free_sqs; | |
756 | } | |
757 | ctrl->cntlid = ret; | |
758 | ||
759 | ctrl->ops = req->ops; | |
760 | if (ctrl->subsys->type == NVME_NQN_DISC) { | |
761 | /* Don't accept keep-alive timeout for discovery controllers */ | |
762 | if (kato) { | |
763 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | |
764 | goto out_free_sqs; | |
765 | } | |
766 | ||
767 | /* | |
768 | * Discovery controllers use some arbitrary high value in order | |
769 | * to cleanup stale discovery sessions | |
770 | * | |
771 | * From the latest base diff RC: | |
772 | * "The Keep Alive command is not supported by | |
773 | * Discovery controllers. A transport may specify a | |
774 | * fixed Discovery controller activity timeout value | |
775 | * (e.g., 2 minutes). If no commands are received | |
776 | * by a Discovery controller within that time | |
777 | * period, the controller may perform the | |
778 | * actions for Keep Alive Timer expiration". | |
779 | */ | |
780 | ctrl->kato = NVMET_DISC_KATO; | |
781 | } else { | |
782 | /* keep-alive timeout in seconds */ | |
783 | ctrl->kato = DIV_ROUND_UP(kato, 1000); | |
784 | } | |
785 | nvmet_start_keep_alive_timer(ctrl); | |
786 | ||
787 | mutex_lock(&subsys->lock); | |
788 | list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); | |
789 | mutex_unlock(&subsys->lock); | |
790 | ||
791 | *ctrlp = ctrl; | |
792 | return 0; | |
793 | ||
794 | out_free_sqs: | |
795 | kfree(ctrl->sqs); | |
796 | out_free_cqs: | |
797 | kfree(ctrl->cqs); | |
798 | out_free_ctrl: | |
799 | kfree(ctrl); | |
800 | out_put_subsystem: | |
801 | nvmet_subsys_put(subsys); | |
802 | out: | |
803 | return status; | |
804 | } | |
805 | ||
806 | static void nvmet_ctrl_free(struct kref *ref) | |
807 | { | |
808 | struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); | |
809 | struct nvmet_subsys *subsys = ctrl->subsys; | |
810 | ||
811 | nvmet_stop_keep_alive_timer(ctrl); | |
812 | ||
813 | mutex_lock(&subsys->lock); | |
814 | list_del(&ctrl->subsys_entry); | |
815 | mutex_unlock(&subsys->lock); | |
816 | ||
817 | ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); | |
818 | nvmet_subsys_put(subsys); | |
819 | ||
820 | kfree(ctrl->sqs); | |
821 | kfree(ctrl->cqs); | |
822 | kfree(ctrl); | |
823 | } | |
824 | ||
825 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) | |
826 | { | |
827 | kref_put(&ctrl->ref, nvmet_ctrl_free); | |
828 | } | |
829 | ||
830 | static void nvmet_fatal_error_handler(struct work_struct *work) | |
831 | { | |
832 | struct nvmet_ctrl *ctrl = | |
833 | container_of(work, struct nvmet_ctrl, fatal_err_work); | |
834 | ||
835 | pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); | |
836 | ctrl->ops->delete_ctrl(ctrl); | |
837 | } | |
838 | ||
839 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) | |
840 | { | |
841 | ctrl->csts |= NVME_CSTS_CFS; | |
842 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); | |
843 | schedule_work(&ctrl->fatal_err_work); | |
844 | } | |
845 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); | |
846 | ||
847 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, | |
848 | const char *subsysnqn) | |
849 | { | |
850 | struct nvmet_subsys_link *p; | |
851 | ||
852 | if (!port) | |
853 | return NULL; | |
854 | ||
855 | if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn, | |
856 | NVMF_NQN_SIZE)) { | |
857 | if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) | |
858 | return NULL; | |
859 | return nvmet_disc_subsys; | |
860 | } | |
861 | ||
862 | down_read(&nvmet_config_sem); | |
863 | list_for_each_entry(p, &port->subsystems, entry) { | |
864 | if (!strncmp(p->subsys->subsysnqn, subsysnqn, | |
865 | NVMF_NQN_SIZE)) { | |
866 | if (!kref_get_unless_zero(&p->subsys->ref)) | |
867 | break; | |
868 | up_read(&nvmet_config_sem); | |
869 | return p->subsys; | |
870 | } | |
871 | } | |
872 | up_read(&nvmet_config_sem); | |
873 | return NULL; | |
874 | } | |
875 | ||
876 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, | |
877 | enum nvme_subsys_type type) | |
878 | { | |
879 | struct nvmet_subsys *subsys; | |
880 | ||
881 | subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); | |
882 | if (!subsys) | |
883 | return NULL; | |
884 | ||
885 | subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */ | |
886 | ||
887 | switch (type) { | |
888 | case NVME_NQN_NVME: | |
889 | subsys->max_qid = NVMET_NR_QUEUES; | |
890 | break; | |
891 | case NVME_NQN_DISC: | |
892 | subsys->max_qid = 0; | |
893 | break; | |
894 | default: | |
895 | pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); | |
896 | kfree(subsys); | |
897 | return NULL; | |
898 | } | |
899 | subsys->type = type; | |
900 | subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, | |
901 | GFP_KERNEL); | |
69555af2 | 902 | if (!subsys->subsysnqn) { |
a07b4970 CH |
903 | kfree(subsys); |
904 | return NULL; | |
905 | } | |
906 | ||
907 | kref_init(&subsys->ref); | |
908 | ||
909 | mutex_init(&subsys->lock); | |
910 | INIT_LIST_HEAD(&subsys->namespaces); | |
911 | INIT_LIST_HEAD(&subsys->ctrls); | |
912 | ||
913 | ida_init(&subsys->cntlid_ida); | |
914 | ||
915 | INIT_LIST_HEAD(&subsys->hosts); | |
916 | ||
917 | return subsys; | |
918 | } | |
919 | ||
920 | static void nvmet_subsys_free(struct kref *ref) | |
921 | { | |
922 | struct nvmet_subsys *subsys = | |
923 | container_of(ref, struct nvmet_subsys, ref); | |
924 | ||
925 | WARN_ON_ONCE(!list_empty(&subsys->namespaces)); | |
926 | ||
927 | ida_destroy(&subsys->cntlid_ida); | |
928 | kfree(subsys->subsysnqn); | |
929 | kfree(subsys); | |
930 | } | |
931 | ||
932 | void nvmet_subsys_put(struct nvmet_subsys *subsys) | |
933 | { | |
934 | kref_put(&subsys->ref, nvmet_subsys_free); | |
935 | } | |
936 | ||
937 | static int __init nvmet_init(void) | |
938 | { | |
939 | int error; | |
940 | ||
941 | error = nvmet_init_discovery(); | |
942 | if (error) | |
943 | goto out; | |
944 | ||
945 | error = nvmet_init_configfs(); | |
946 | if (error) | |
947 | goto out_exit_discovery; | |
948 | return 0; | |
949 | ||
950 | out_exit_discovery: | |
951 | nvmet_exit_discovery(); | |
952 | out: | |
953 | return error; | |
954 | } | |
955 | ||
956 | static void __exit nvmet_exit(void) | |
957 | { | |
958 | nvmet_exit_configfs(); | |
959 | nvmet_exit_discovery(); | |
960 | ||
961 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); | |
962 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); | |
963 | } | |
964 | ||
965 | module_init(nvmet_init); | |
966 | module_exit(nvmet_exit); | |
967 | ||
968 | MODULE_LICENSE("GPL v2"); |