2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
18 static struct nvmet_fabrics_ops
*nvmet_transports
[NVMF_TRTYPE_MAX
];
21 * This read/write semaphore is used to synchronize access to configuration
22 * information on a target system that will result in discovery log page
23 * information change for at least one host.
24 * The full list of resources to protected by this semaphore is:
27 * - per-subsystem allowed hosts list
28 * - allow_any_host subsystem attribute
30 * - the nvmet_transports array
32 * When updating any of those lists/structures write lock should be obtained,
33 * while when reading (popolating discovery log page or checking host-subsystem
34 * link) read lock is obtained to allow concurrent reads.
36 DECLARE_RWSEM(nvmet_config_sem
);
38 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
39 const char *subsysnqn
);
41 u16
nvmet_copy_to_sgl(struct nvmet_req
*req
, off_t off
, const void *buf
,
44 if (sg_pcopy_from_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
)
45 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
49 u16
nvmet_copy_from_sgl(struct nvmet_req
*req
, off_t off
, void *buf
, size_t len
)
51 if (sg_pcopy_to_buffer(req
->sg
, req
->sg_cnt
, buf
, len
, off
) != len
)
52 return NVME_SC_SGL_INVALID_DATA
| NVME_SC_DNR
;
56 static u32
nvmet_async_event_result(struct nvmet_async_event
*aen
)
58 return aen
->event_type
| (aen
->event_info
<< 8) | (aen
->log_page
<< 16);
61 static void nvmet_async_events_free(struct nvmet_ctrl
*ctrl
)
63 struct nvmet_req
*req
;
66 mutex_lock(&ctrl
->lock
);
67 if (!ctrl
->nr_async_event_cmds
) {
68 mutex_unlock(&ctrl
->lock
);
72 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
73 mutex_unlock(&ctrl
->lock
);
74 nvmet_req_complete(req
, NVME_SC_INTERNAL
| NVME_SC_DNR
);
78 static void nvmet_async_event_work(struct work_struct
*work
)
80 struct nvmet_ctrl
*ctrl
=
81 container_of(work
, struct nvmet_ctrl
, async_event_work
);
82 struct nvmet_async_event
*aen
;
83 struct nvmet_req
*req
;
86 mutex_lock(&ctrl
->lock
);
87 aen
= list_first_entry_or_null(&ctrl
->async_events
,
88 struct nvmet_async_event
, entry
);
89 if (!aen
|| !ctrl
->nr_async_event_cmds
) {
90 mutex_unlock(&ctrl
->lock
);
94 req
= ctrl
->async_event_cmds
[--ctrl
->nr_async_event_cmds
];
95 nvmet_set_result(req
, nvmet_async_event_result(aen
));
97 list_del(&aen
->entry
);
100 mutex_unlock(&ctrl
->lock
);
101 nvmet_req_complete(req
, 0);
105 static void nvmet_add_async_event(struct nvmet_ctrl
*ctrl
, u8 event_type
,
106 u8 event_info
, u8 log_page
)
108 struct nvmet_async_event
*aen
;
110 aen
= kmalloc(sizeof(*aen
), GFP_KERNEL
);
114 aen
->event_type
= event_type
;
115 aen
->event_info
= event_info
;
116 aen
->log_page
= log_page
;
118 mutex_lock(&ctrl
->lock
);
119 list_add_tail(&aen
->entry
, &ctrl
->async_events
);
120 mutex_unlock(&ctrl
->lock
);
122 schedule_work(&ctrl
->async_event_work
);
125 int nvmet_register_transport(struct nvmet_fabrics_ops
*ops
)
129 down_write(&nvmet_config_sem
);
130 if (nvmet_transports
[ops
->type
])
133 nvmet_transports
[ops
->type
] = ops
;
134 up_write(&nvmet_config_sem
);
138 EXPORT_SYMBOL_GPL(nvmet_register_transport
);
140 void nvmet_unregister_transport(struct nvmet_fabrics_ops
*ops
)
142 down_write(&nvmet_config_sem
);
143 nvmet_transports
[ops
->type
] = NULL
;
144 up_write(&nvmet_config_sem
);
146 EXPORT_SYMBOL_GPL(nvmet_unregister_transport
);
148 int nvmet_enable_port(struct nvmet_port
*port
)
150 struct nvmet_fabrics_ops
*ops
;
153 lockdep_assert_held(&nvmet_config_sem
);
155 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
157 up_write(&nvmet_config_sem
);
158 request_module("nvmet-transport-%d", port
->disc_addr
.trtype
);
159 down_write(&nvmet_config_sem
);
160 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
162 pr_err("transport type %d not supported\n",
163 port
->disc_addr
.trtype
);
168 if (!try_module_get(ops
->owner
))
171 ret
= ops
->add_port(port
);
173 module_put(ops
->owner
);
177 port
->enabled
= true;
181 void nvmet_disable_port(struct nvmet_port
*port
)
183 struct nvmet_fabrics_ops
*ops
;
185 lockdep_assert_held(&nvmet_config_sem
);
187 port
->enabled
= false;
189 ops
= nvmet_transports
[port
->disc_addr
.trtype
];
190 ops
->remove_port(port
);
191 module_put(ops
->owner
);
194 static void nvmet_keep_alive_timer(struct work_struct
*work
)
196 struct nvmet_ctrl
*ctrl
= container_of(to_delayed_work(work
),
197 struct nvmet_ctrl
, ka_work
);
199 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
200 ctrl
->cntlid
, ctrl
->kato
);
202 ctrl
->ops
->delete_ctrl(ctrl
);
205 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
207 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
208 ctrl
->cntlid
, ctrl
->kato
);
210 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvmet_keep_alive_timer
);
211 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
214 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl
*ctrl
)
216 pr_debug("ctrl %d stop keep-alive\n", ctrl
->cntlid
);
218 cancel_delayed_work_sync(&ctrl
->ka_work
);
221 static struct nvmet_ns
*__nvmet_find_namespace(struct nvmet_ctrl
*ctrl
,
226 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
227 if (ns
->nsid
== le32_to_cpu(nsid
))
234 struct nvmet_ns
*nvmet_find_namespace(struct nvmet_ctrl
*ctrl
, __le32 nsid
)
239 ns
= __nvmet_find_namespace(ctrl
, nsid
);
241 percpu_ref_get(&ns
->ref
);
247 static void nvmet_destroy_namespace(struct percpu_ref
*ref
)
249 struct nvmet_ns
*ns
= container_of(ref
, struct nvmet_ns
, ref
);
251 complete(&ns
->disable_done
);
254 void nvmet_put_namespace(struct nvmet_ns
*ns
)
256 percpu_ref_put(&ns
->ref
);
259 int nvmet_ns_enable(struct nvmet_ns
*ns
)
261 struct nvmet_subsys
*subsys
= ns
->subsys
;
262 struct nvmet_ctrl
*ctrl
;
265 mutex_lock(&subsys
->lock
);
266 if (!list_empty(&ns
->dev_link
))
269 ns
->bdev
= blkdev_get_by_path(ns
->device_path
, FMODE_READ
| FMODE_WRITE
,
271 if (IS_ERR(ns
->bdev
)) {
272 pr_err("nvmet: failed to open block device %s: (%ld)\n",
273 ns
->device_path
, PTR_ERR(ns
->bdev
));
274 ret
= PTR_ERR(ns
->bdev
);
279 ns
->size
= i_size_read(ns
->bdev
->bd_inode
);
280 ns
->blksize_shift
= blksize_bits(bdev_logical_block_size(ns
->bdev
));
282 ret
= percpu_ref_init(&ns
->ref
, nvmet_destroy_namespace
,
287 if (ns
->nsid
> subsys
->max_nsid
)
288 subsys
->max_nsid
= ns
->nsid
;
291 * The namespaces list needs to be sorted to simplify the implementation
292 * of the Identify Namepace List subcommand.
294 if (list_empty(&subsys
->namespaces
)) {
295 list_add_tail_rcu(&ns
->dev_link
, &subsys
->namespaces
);
297 struct nvmet_ns
*old
;
299 list_for_each_entry_rcu(old
, &subsys
->namespaces
, dev_link
) {
300 BUG_ON(ns
->nsid
== old
->nsid
);
301 if (ns
->nsid
< old
->nsid
)
305 list_add_tail_rcu(&ns
->dev_link
, &old
->dev_link
);
308 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
309 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
, 0, 0);
313 mutex_unlock(&subsys
->lock
);
316 blkdev_put(ns
->bdev
, FMODE_WRITE
|FMODE_READ
);
321 void nvmet_ns_disable(struct nvmet_ns
*ns
)
323 struct nvmet_subsys
*subsys
= ns
->subsys
;
324 struct nvmet_ctrl
*ctrl
;
326 mutex_lock(&subsys
->lock
);
327 if (list_empty(&ns
->dev_link
)) {
328 mutex_unlock(&subsys
->lock
);
331 list_del_init(&ns
->dev_link
);
332 mutex_unlock(&subsys
->lock
);
335 * Now that we removed the namespaces from the lookup list, we
336 * can kill the per_cpu ref and wait for any remaining references
337 * to be dropped, as well as a RCU grace period for anyone only
338 * using the namepace under rcu_read_lock(). Note that we can't
339 * use call_rcu here as we need to ensure the namespaces have
340 * been fully destroyed before unloading the module.
342 percpu_ref_kill(&ns
->ref
);
344 wait_for_completion(&ns
->disable_done
);
345 percpu_ref_exit(&ns
->ref
);
347 mutex_lock(&subsys
->lock
);
348 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
349 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
, 0, 0);
352 blkdev_put(ns
->bdev
, FMODE_WRITE
|FMODE_READ
);
353 mutex_unlock(&subsys
->lock
);
356 void nvmet_ns_free(struct nvmet_ns
*ns
)
358 nvmet_ns_disable(ns
);
360 kfree(ns
->device_path
);
364 struct nvmet_ns
*nvmet_ns_alloc(struct nvmet_subsys
*subsys
, u32 nsid
)
368 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
372 INIT_LIST_HEAD(&ns
->dev_link
);
373 init_completion(&ns
->disable_done
);
381 static void __nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
384 nvmet_set_status(req
, status
);
386 /* XXX: need to fill in something useful for sq_head */
387 req
->rsp
->sq_head
= 0;
388 if (likely(req
->sq
)) /* may happen during early failure */
389 req
->rsp
->sq_id
= cpu_to_le16(req
->sq
->qid
);
390 req
->rsp
->command_id
= req
->cmd
->common
.command_id
;
393 nvmet_put_namespace(req
->ns
);
394 req
->ops
->queue_response(req
);
397 void nvmet_req_complete(struct nvmet_req
*req
, u16 status
)
399 __nvmet_req_complete(req
, status
);
400 percpu_ref_put(&req
->sq
->ref
);
402 EXPORT_SYMBOL_GPL(nvmet_req_complete
);
404 void nvmet_cq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_cq
*cq
,
413 void nvmet_sq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_sq
*sq
,
422 void nvmet_sq_destroy(struct nvmet_sq
*sq
)
425 * If this is the admin queue, complete all AERs so that our
426 * queue doesn't have outstanding requests on it.
428 if (sq
->ctrl
&& sq
->ctrl
->sqs
&& sq
->ctrl
->sqs
[0] == sq
)
429 nvmet_async_events_free(sq
->ctrl
);
430 percpu_ref_kill(&sq
->ref
);
431 wait_for_completion(&sq
->free_done
);
432 percpu_ref_exit(&sq
->ref
);
435 nvmet_ctrl_put(sq
->ctrl
);
436 sq
->ctrl
= NULL
; /* allows reusing the queue later */
439 EXPORT_SYMBOL_GPL(nvmet_sq_destroy
);
441 static void nvmet_sq_free(struct percpu_ref
*ref
)
443 struct nvmet_sq
*sq
= container_of(ref
, struct nvmet_sq
, ref
);
445 complete(&sq
->free_done
);
448 int nvmet_sq_init(struct nvmet_sq
*sq
)
452 ret
= percpu_ref_init(&sq
->ref
, nvmet_sq_free
, 0, GFP_KERNEL
);
454 pr_err("percpu_ref init failed!\n");
457 init_completion(&sq
->free_done
);
461 EXPORT_SYMBOL_GPL(nvmet_sq_init
);
463 bool nvmet_req_init(struct nvmet_req
*req
, struct nvmet_cq
*cq
,
464 struct nvmet_sq
*sq
, struct nvmet_fabrics_ops
*ops
)
466 u8 flags
= req
->cmd
->common
.flags
;
474 req
->rsp
->status
= 0;
476 /* no support for fused commands yet */
477 if (unlikely(flags
& (NVME_CMD_FUSE_FIRST
| NVME_CMD_FUSE_SECOND
))) {
478 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
482 /* either variant of SGLs is fine, as we don't support metadata */
483 if (unlikely((flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METABUF
&&
484 (flags
& NVME_CMD_SGL_ALL
) != NVME_CMD_SGL_METASEG
)) {
485 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
489 if (unlikely(!req
->sq
->ctrl
))
490 /* will return an error for any Non-connect command: */
491 status
= nvmet_parse_connect_cmd(req
);
492 else if (likely(req
->sq
->qid
!= 0))
493 status
= nvmet_parse_io_cmd(req
);
494 else if (req
->cmd
->common
.opcode
== nvme_fabrics_command
)
495 status
= nvmet_parse_fabrics_cmd(req
);
496 else if (req
->sq
->ctrl
->subsys
->type
== NVME_NQN_DISC
)
497 status
= nvmet_parse_discovery_cmd(req
);
499 status
= nvmet_parse_admin_cmd(req
);
504 if (unlikely(!percpu_ref_tryget_live(&sq
->ref
))) {
505 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
512 __nvmet_req_complete(req
, status
);
515 EXPORT_SYMBOL_GPL(nvmet_req_init
);
517 static inline bool nvmet_cc_en(u32 cc
)
522 static inline u8
nvmet_cc_css(u32 cc
)
524 return (cc
>> 4) & 0x7;
527 static inline u8
nvmet_cc_mps(u32 cc
)
529 return (cc
>> 7) & 0xf;
532 static inline u8
nvmet_cc_ams(u32 cc
)
534 return (cc
>> 11) & 0x7;
537 static inline u8
nvmet_cc_shn(u32 cc
)
539 return (cc
>> 14) & 0x3;
542 static inline u8
nvmet_cc_iosqes(u32 cc
)
544 return (cc
>> 16) & 0xf;
547 static inline u8
nvmet_cc_iocqes(u32 cc
)
549 return (cc
>> 20) & 0xf;
552 static void nvmet_start_ctrl(struct nvmet_ctrl
*ctrl
)
554 lockdep_assert_held(&ctrl
->lock
);
556 if (nvmet_cc_iosqes(ctrl
->cc
) != NVME_NVM_IOSQES
||
557 nvmet_cc_iocqes(ctrl
->cc
) != NVME_NVM_IOCQES
||
558 nvmet_cc_mps(ctrl
->cc
) != 0 ||
559 nvmet_cc_ams(ctrl
->cc
) != 0 ||
560 nvmet_cc_css(ctrl
->cc
) != 0) {
561 ctrl
->csts
= NVME_CSTS_CFS
;
565 ctrl
->csts
= NVME_CSTS_RDY
;
568 static void nvmet_clear_ctrl(struct nvmet_ctrl
*ctrl
)
570 lockdep_assert_held(&ctrl
->lock
);
572 /* XXX: tear down queues? */
573 ctrl
->csts
&= ~NVME_CSTS_RDY
;
577 void nvmet_update_cc(struct nvmet_ctrl
*ctrl
, u32
new)
581 mutex_lock(&ctrl
->lock
);
585 if (nvmet_cc_en(new) && !nvmet_cc_en(old
))
586 nvmet_start_ctrl(ctrl
);
587 if (!nvmet_cc_en(new) && nvmet_cc_en(old
))
588 nvmet_clear_ctrl(ctrl
);
589 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old
)) {
590 nvmet_clear_ctrl(ctrl
);
591 ctrl
->csts
|= NVME_CSTS_SHST_CMPLT
;
593 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old
))
594 ctrl
->csts
&= ~NVME_CSTS_SHST_CMPLT
;
595 mutex_unlock(&ctrl
->lock
);
598 static void nvmet_init_cap(struct nvmet_ctrl
*ctrl
)
600 /* command sets supported: NVMe command set: */
601 ctrl
->cap
= (1ULL << 37);
602 /* CC.EN timeout in 500msec units: */
603 ctrl
->cap
|= (15ULL << 24);
604 /* maximum queue entries supported: */
605 ctrl
->cap
|= NVMET_QUEUE_SIZE
- 1;
608 u16
nvmet_ctrl_find_get(const char *subsysnqn
, const char *hostnqn
, u16 cntlid
,
609 struct nvmet_req
*req
, struct nvmet_ctrl
**ret
)
611 struct nvmet_subsys
*subsys
;
612 struct nvmet_ctrl
*ctrl
;
615 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
617 pr_warn("connect request for invalid subsystem %s!\n",
619 req
->rsp
->result
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
620 return NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
623 mutex_lock(&subsys
->lock
);
624 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
625 if (ctrl
->cntlid
== cntlid
) {
626 if (strncmp(hostnqn
, ctrl
->hostnqn
, NVMF_NQN_SIZE
)) {
627 pr_warn("hostnqn mismatch.\n");
630 if (!kref_get_unless_zero(&ctrl
->ref
))
638 pr_warn("could not find controller %d for subsys %s / host %s\n",
639 cntlid
, subsysnqn
, hostnqn
);
640 req
->rsp
->result
= IPO_IATTR_CONNECT_DATA(cntlid
);
641 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
644 mutex_unlock(&subsys
->lock
);
645 nvmet_subsys_put(subsys
);
649 static bool __nvmet_host_allowed(struct nvmet_subsys
*subsys
,
652 struct nvmet_host_link
*p
;
654 if (subsys
->allow_any_host
)
657 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
658 if (!strcmp(nvmet_host_name(p
->host
), hostnqn
))
665 static bool nvmet_host_discovery_allowed(struct nvmet_req
*req
,
668 struct nvmet_subsys_link
*s
;
670 list_for_each_entry(s
, &req
->port
->subsystems
, entry
) {
671 if (__nvmet_host_allowed(s
->subsys
, hostnqn
))
678 bool nvmet_host_allowed(struct nvmet_req
*req
, struct nvmet_subsys
*subsys
,
681 lockdep_assert_held(&nvmet_config_sem
);
683 if (subsys
->type
== NVME_NQN_DISC
)
684 return nvmet_host_discovery_allowed(req
, hostnqn
);
686 return __nvmet_host_allowed(subsys
, hostnqn
);
689 u16
nvmet_alloc_ctrl(const char *subsysnqn
, const char *hostnqn
,
690 struct nvmet_req
*req
, u32 kato
, struct nvmet_ctrl
**ctrlp
)
692 struct nvmet_subsys
*subsys
;
693 struct nvmet_ctrl
*ctrl
;
697 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
698 subsys
= nvmet_find_get_subsys(req
->port
, subsysnqn
);
700 pr_warn("connect request for invalid subsystem %s!\n",
702 req
->rsp
->result
= IPO_IATTR_CONNECT_DATA(subsysnqn
);
706 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_SC_DNR
;
707 down_read(&nvmet_config_sem
);
708 if (!nvmet_host_allowed(req
, subsys
, hostnqn
)) {
709 pr_info("connect by host %s for subsystem %s not allowed\n",
711 req
->rsp
->result
= IPO_IATTR_CONNECT_DATA(hostnqn
);
712 up_read(&nvmet_config_sem
);
713 goto out_put_subsystem
;
715 up_read(&nvmet_config_sem
);
717 status
= NVME_SC_INTERNAL
;
718 ctrl
= kzalloc(sizeof(*ctrl
), GFP_KERNEL
);
720 goto out_put_subsystem
;
721 mutex_init(&ctrl
->lock
);
723 nvmet_init_cap(ctrl
);
725 INIT_WORK(&ctrl
->async_event_work
, nvmet_async_event_work
);
726 INIT_LIST_HEAD(&ctrl
->async_events
);
728 memcpy(ctrl
->subsysnqn
, subsysnqn
, NVMF_NQN_SIZE
);
729 memcpy(ctrl
->hostnqn
, hostnqn
, NVMF_NQN_SIZE
);
731 kref_init(&ctrl
->ref
);
732 ctrl
->subsys
= subsys
;
734 ctrl
->cqs
= kcalloc(subsys
->max_qid
+ 1,
735 sizeof(struct nvmet_cq
*),
740 ctrl
->sqs
= kcalloc(subsys
->max_qid
+ 1,
741 sizeof(struct nvmet_sq
*),
746 ret
= ida_simple_get(&subsys
->cntlid_ida
,
747 NVME_CNTLID_MIN
, NVME_CNTLID_MAX
,
750 status
= NVME_SC_CONNECT_CTRL_BUSY
| NVME_SC_DNR
;
755 ctrl
->ops
= req
->ops
;
756 if (ctrl
->subsys
->type
== NVME_NQN_DISC
) {
757 /* Don't accept keep-alive timeout for discovery controllers */
759 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
764 * Discovery controllers use some arbitrary high value in order
765 * to cleanup stale discovery sessions
767 * From the latest base diff RC:
768 * "The Keep Alive command is not supported by
769 * Discovery controllers. A transport may specify a
770 * fixed Discovery controller activity timeout value
771 * (e.g., 2 minutes). If no commands are received
772 * by a Discovery controller within that time
773 * period, the controller may perform the
774 * actions for Keep Alive Timer expiration".
776 ctrl
->kato
= NVMET_DISC_KATO
;
778 /* keep-alive timeout in seconds */
779 ctrl
->kato
= DIV_ROUND_UP(kato
, 1000);
781 nvmet_start_keep_alive_timer(ctrl
);
783 mutex_lock(&subsys
->lock
);
784 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
785 mutex_unlock(&subsys
->lock
);
797 nvmet_subsys_put(subsys
);
802 static void nvmet_ctrl_free(struct kref
*ref
)
804 struct nvmet_ctrl
*ctrl
= container_of(ref
, struct nvmet_ctrl
, ref
);
805 struct nvmet_subsys
*subsys
= ctrl
->subsys
;
807 nvmet_stop_keep_alive_timer(ctrl
);
809 mutex_lock(&subsys
->lock
);
810 list_del(&ctrl
->subsys_entry
);
811 mutex_unlock(&subsys
->lock
);
813 ida_simple_remove(&subsys
->cntlid_ida
, ctrl
->cntlid
);
814 nvmet_subsys_put(subsys
);
821 void nvmet_ctrl_put(struct nvmet_ctrl
*ctrl
)
823 kref_put(&ctrl
->ref
, nvmet_ctrl_free
);
826 static void nvmet_fatal_error_handler(struct work_struct
*work
)
828 struct nvmet_ctrl
*ctrl
=
829 container_of(work
, struct nvmet_ctrl
, fatal_err_work
);
831 pr_err("ctrl %d fatal error occurred!\n", ctrl
->cntlid
);
832 ctrl
->ops
->delete_ctrl(ctrl
);
835 void nvmet_ctrl_fatal_error(struct nvmet_ctrl
*ctrl
)
837 ctrl
->csts
|= NVME_CSTS_CFS
;
838 INIT_WORK(&ctrl
->fatal_err_work
, nvmet_fatal_error_handler
);
839 schedule_work(&ctrl
->fatal_err_work
);
841 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error
);
843 static struct nvmet_subsys
*nvmet_find_get_subsys(struct nvmet_port
*port
,
844 const char *subsysnqn
)
846 struct nvmet_subsys_link
*p
;
851 if (!strncmp(NVME_DISC_SUBSYS_NAME
, subsysnqn
,
853 if (!kref_get_unless_zero(&nvmet_disc_subsys
->ref
))
855 return nvmet_disc_subsys
;
858 down_read(&nvmet_config_sem
);
859 list_for_each_entry(p
, &port
->subsystems
, entry
) {
860 if (!strncmp(p
->subsys
->subsysnqn
, subsysnqn
,
862 if (!kref_get_unless_zero(&p
->subsys
->ref
))
864 up_read(&nvmet_config_sem
);
868 up_read(&nvmet_config_sem
);
872 struct nvmet_subsys
*nvmet_subsys_alloc(const char *subsysnqn
,
873 enum nvme_subsys_type type
)
875 struct nvmet_subsys
*subsys
;
877 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
881 subsys
->ver
= (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
885 subsys
->max_qid
= NVMET_NR_QUEUES
;
891 pr_err("%s: Unknown Subsystem type - %d\n", __func__
, type
);
896 subsys
->subsysnqn
= kstrndup(subsysnqn
, NVMF_NQN_SIZE
,
898 if (!subsys
->subsysnqn
) {
903 kref_init(&subsys
->ref
);
905 mutex_init(&subsys
->lock
);
906 INIT_LIST_HEAD(&subsys
->namespaces
);
907 INIT_LIST_HEAD(&subsys
->ctrls
);
909 ida_init(&subsys
->cntlid_ida
);
911 INIT_LIST_HEAD(&subsys
->hosts
);
916 static void nvmet_subsys_free(struct kref
*ref
)
918 struct nvmet_subsys
*subsys
=
919 container_of(ref
, struct nvmet_subsys
, ref
);
921 WARN_ON_ONCE(!list_empty(&subsys
->namespaces
));
923 ida_destroy(&subsys
->cntlid_ida
);
924 kfree(subsys
->subsysnqn
);
928 void nvmet_subsys_put(struct nvmet_subsys
*subsys
)
930 kref_put(&subsys
->ref
, nvmet_subsys_free
);
933 static int __init
nvmet_init(void)
937 error
= nvmet_init_discovery();
941 error
= nvmet_init_configfs();
943 goto out_exit_discovery
;
947 nvmet_exit_discovery();
952 static void __exit
nvmet_exit(void)
954 nvmet_exit_configfs();
955 nvmet_exit_discovery();
957 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry
) != 1024);
958 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr
) != 1024);
961 module_init(nvmet_init
);
962 module_exit(nvmet_exit
);
964 MODULE_LICENSE("GPL v2");