2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <generated/utsrelease.h>
19 u32
nvmet_get_log_page_len(struct nvme_command
*cmd
)
21 u32 len
= le16_to_cpu(cmd
->get_log_page
.numdu
);
24 len
+= le16_to_cpu(cmd
->get_log_page
.numdl
);
25 /* NUMD is a 0's based value */
32 static void nvmet_execute_get_log_page(struct nvmet_req
*req
)
34 size_t data_len
= nvmet_get_log_page_len(req
->cmd
);
38 buf
= kzalloc(data_len
, GFP_KERNEL
);
40 status
= NVME_SC_INTERNAL
;
44 switch (req
->cmd
->get_log_page
.lid
) {
47 * We currently never set the More bit in the status field,
48 * so all error log entries are invalid and can be zeroed out.
49 * This is called a minum viable implementation (TM) of this
55 * XXX: fill out actual smart log
57 * We might have a hard time coming up with useful values for
58 * many of the fields, and even when we have useful data
59 * available (e.g. units or commands read/written) those aren't
60 * persistent over power loss.
65 * We only support a single firmware slot which always is
66 * active, so we can zero out the whole firmware slot log and
67 * still claim to fully implement this mandatory log page.
74 status
= nvmet_copy_to_sgl(req
, 0, buf
, data_len
);
78 nvmet_req_complete(req
, status
);
81 static void nvmet_execute_identify_ctrl(struct nvmet_req
*req
)
83 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
84 struct nvme_id_ctrl
*id
;
87 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
89 status
= NVME_SC_INTERNAL
;
93 /* XXX: figure out how to assign real vendors IDs. */
97 memset(id
->sn
, ' ', sizeof(id
->sn
));
98 snprintf(id
->sn
, sizeof(id
->sn
), "%llx", ctrl
->serial
);
100 memset(id
->mn
, ' ', sizeof(id
->mn
));
101 strncpy((char *)id
->mn
, "Linux", sizeof(id
->mn
));
103 memset(id
->fr
, ' ', sizeof(id
->fr
));
104 strncpy((char *)id
->fr
, UTS_RELEASE
, sizeof(id
->fr
));
109 * XXX: figure out how we can assign a IEEE OUI, but until then
110 * the safest is to leave it as zeroes.
113 /* we support multiple ports and multiples hosts: */
114 id
->mic
= (1 << 0) | (1 << 1);
116 /* no limit on data transfer sizes for now */
118 id
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
119 id
->ver
= cpu_to_le32(ctrl
->subsys
->ver
);
121 /* XXX: figure out what to do about RTD3R/RTD3 */
122 id
->oaes
= cpu_to_le32(1 << 8);
123 id
->ctratt
= cpu_to_le32(1 << 0);
128 * We don't really have a practical limit on the number of abort
129 * comands. But we don't do anything useful for abort either, so
130 * no point in allowing more abort commands than the spec requires.
134 id
->aerl
= NVMET_ASYNC_EVENTS
- 1;
136 /* first slot is read-only, only one slot supported */
137 id
->frmw
= (1 << 0) | (1 << 1);
138 id
->lpa
= (1 << 0) | (1 << 2);
139 id
->elpe
= NVMET_ERROR_LOG_SLOTS
- 1;
142 /* We support keep-alive timeout in granularity of seconds */
143 id
->kas
= cpu_to_le16(NVMET_KAS
);
145 id
->sqes
= (0x6 << 4) | 0x6;
146 id
->cqes
= (0x4 << 4) | 0x4;
148 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
149 id
->maxcmd
= cpu_to_le16(NVMET_MAX_CMD
);
151 id
->nn
= cpu_to_le32(ctrl
->subsys
->max_nsid
);
152 id
->oncs
= cpu_to_le16(NVME_CTRL_ONCS_DSM
);
154 /* XXX: don't report vwc if the underlying device is write through */
155 id
->vwc
= NVME_CTRL_VWC_PRESENT
;
158 * We can't support atomic writes bigger than a LBA without support
159 * from the backend device.
164 id
->sgls
= cpu_to_le32(1 << 0); /* we always support SGLs */
165 if (ctrl
->ops
->has_keyed_sgls
)
166 id
->sgls
|= cpu_to_le32(1 << 2);
167 if (ctrl
->ops
->sqe_inline_size
)
168 id
->sgls
|= cpu_to_le32(1 << 20);
170 strcpy(id
->subnqn
, ctrl
->subsys
->subsysnqn
);
172 /* Max command capsule size is sqe + single page of in-capsule data */
173 id
->ioccsz
= cpu_to_le32((sizeof(struct nvme_command
) +
174 ctrl
->ops
->sqe_inline_size
) / 16);
175 /* Max response capsule size is cqe */
176 id
->iorcsz
= cpu_to_le32(sizeof(struct nvme_completion
) / 16);
178 id
->msdbd
= ctrl
->ops
->msdbd
;
181 * Meh, we don't really support any power state. Fake up the same
182 * values that qemu does.
184 id
->psd
[0].max_power
= cpu_to_le16(0x9c4);
185 id
->psd
[0].entry_lat
= cpu_to_le32(0x10);
186 id
->psd
[0].exit_lat
= cpu_to_le32(0x4);
188 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
192 nvmet_req_complete(req
, status
);
195 static void nvmet_execute_identify_ns(struct nvmet_req
*req
)
198 struct nvme_id_ns
*id
;
201 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->identify
.nsid
);
203 status
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
207 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
209 status
= NVME_SC_INTERNAL
;
214 * nuse = ncap = nsze isn't aways true, but we have no way to find
215 * that out from the underlying device.
217 id
->ncap
= id
->nuse
= id
->nsze
=
218 cpu_to_le64(ns
->size
>> ns
->blksize_shift
);
221 * We just provide a single LBA format that matches what the
222 * underlying device reports.
228 * Our namespace might always be shared. Not just with other
229 * controllers, but also with any other user of the block device.
233 memcpy(&id
->nguid
, &ns
->nguid
, sizeof(uuid_le
));
235 id
->lbaf
[0].ds
= ns
->blksize_shift
;
237 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
241 nvmet_put_namespace(ns
);
243 nvmet_req_complete(req
, status
);
246 static void nvmet_execute_identify_nslist(struct nvmet_req
*req
)
248 static const int buf_size
= 4096;
249 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
251 u32 min_nsid
= le32_to_cpu(req
->cmd
->identify
.nsid
);
256 list
= kzalloc(buf_size
, GFP_KERNEL
);
258 status
= NVME_SC_INTERNAL
;
263 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
264 if (ns
->nsid
<= min_nsid
)
266 list
[i
++] = cpu_to_le32(ns
->nsid
);
267 if (i
== buf_size
/ sizeof(__le32
))
272 status
= nvmet_copy_to_sgl(req
, 0, list
, buf_size
);
276 nvmet_req_complete(req
, status
);
280 * A "mimimum viable" abort implementation: the command is mandatory in the
281 * spec, but we are not required to do any useful work. We couldn't really
282 * do a useful abort, so don't bother even with waiting for the command
283 * to be exectuted and return immediately telling the command to abort
286 static void nvmet_execute_abort(struct nvmet_req
*req
)
288 nvmet_set_result(req
, 1);
289 nvmet_req_complete(req
, 0);
292 static void nvmet_execute_set_features(struct nvmet_req
*req
)
294 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
295 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
[0]);
300 switch (cdw10
& 0xf) {
301 case NVME_FEAT_NUM_QUEUES
:
302 nvmet_set_result(req
,
303 (subsys
->max_qid
- 1) | ((subsys
->max_qid
- 1) << 16));
306 val
= le64_to_cpu(req
->cmd
->prop_set
.value
);
307 val32
= val
& 0xffff;
308 req
->sq
->ctrl
->kato
= DIV_ROUND_UP(val32
, 1000);
309 nvmet_set_result(req
, req
->sq
->ctrl
->kato
);
312 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
316 nvmet_req_complete(req
, status
);
319 static void nvmet_execute_get_features(struct nvmet_req
*req
)
321 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
322 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
[0]);
325 switch (cdw10
& 0xf) {
327 * These features are mandatory in the spec, but we don't
328 * have a useful way to implement them. We'll eventually
329 * need to come up with some fake values for these.
332 case NVME_FEAT_ARBITRATION
:
334 case NVME_FEAT_POWER_MGMT
:
336 case NVME_FEAT_TEMP_THRESH
:
338 case NVME_FEAT_ERR_RECOVERY
:
340 case NVME_FEAT_IRQ_COALESCE
:
342 case NVME_FEAT_IRQ_CONFIG
:
344 case NVME_FEAT_WRITE_ATOMIC
:
346 case NVME_FEAT_ASYNC_EVENT
:
349 case NVME_FEAT_VOLATILE_WC
:
350 nvmet_set_result(req
, 1);
352 case NVME_FEAT_NUM_QUEUES
:
353 nvmet_set_result(req
,
354 (subsys
->max_qid
-1) | ((subsys
->max_qid
-1) << 16));
357 nvmet_set_result(req
, req
->sq
->ctrl
->kato
* 1000);
360 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
364 nvmet_req_complete(req
, status
);
367 static void nvmet_execute_async_event(struct nvmet_req
*req
)
369 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
371 mutex_lock(&ctrl
->lock
);
372 if (ctrl
->nr_async_event_cmds
>= NVMET_ASYNC_EVENTS
) {
373 mutex_unlock(&ctrl
->lock
);
374 nvmet_req_complete(req
, NVME_SC_ASYNC_LIMIT
| NVME_SC_DNR
);
377 ctrl
->async_event_cmds
[ctrl
->nr_async_event_cmds
++] = req
;
378 mutex_unlock(&ctrl
->lock
);
380 schedule_work(&ctrl
->async_event_work
);
383 static void nvmet_execute_keep_alive(struct nvmet_req
*req
)
385 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
387 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
388 ctrl
->cntlid
, ctrl
->kato
);
390 mod_delayed_work(system_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
391 nvmet_req_complete(req
, 0);
394 int nvmet_parse_admin_cmd(struct nvmet_req
*req
)
396 struct nvme_command
*cmd
= req
->cmd
;
400 if (unlikely(!(req
->sq
->ctrl
->cc
& NVME_CC_ENABLE
))) {
401 pr_err("nvmet: got admin cmd %d while CC.EN == 0\n",
403 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
405 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
406 pr_err("nvmet: got admin cmd %d while CSTS.RDY == 0\n",
408 return NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
411 switch (cmd
->common
.opcode
) {
412 case nvme_admin_get_log_page
:
413 req
->data_len
= nvmet_get_log_page_len(cmd
);
415 switch (cmd
->get_log_page
.lid
) {
419 req
->execute
= nvmet_execute_get_log_page
;
423 case nvme_admin_identify
:
424 req
->data_len
= 4096;
425 switch (le32_to_cpu(cmd
->identify
.cns
)) {
427 req
->execute
= nvmet_execute_identify_ns
;
430 req
->execute
= nvmet_execute_identify_ctrl
;
433 req
->execute
= nvmet_execute_identify_nslist
;
437 case nvme_admin_abort_cmd
:
438 req
->execute
= nvmet_execute_abort
;
441 case nvme_admin_set_features
:
442 req
->execute
= nvmet_execute_set_features
;
445 case nvme_admin_get_features
:
446 req
->execute
= nvmet_execute_get_features
;
449 case nvme_admin_async_event
:
450 req
->execute
= nvmet_execute_async_event
;
453 case nvme_admin_keep_alive
:
454 req
->execute
= nvmet_execute_keep_alive
;
459 pr_err("nvmet: unhandled cmd %d\n", cmd
->common
.opcode
);
460 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;