2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/nvme.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/errno.h>
26 #include <linux/genhd.h>
27 #include <linux/idr.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
31 #include <linux/kdev_t.h>
32 #include <linux/kthread.h>
33 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/pci.h>
38 #include <linux/poison.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/types.h>
42 #include <linux/version.h>
44 #define NVME_Q_DEPTH 1024
45 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
46 #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
47 #define NVME_MINORS 64
48 #define IO_TIMEOUT (5 * HZ)
49 #define ADMIN_TIMEOUT (60 * HZ)
51 static int nvme_major
;
52 module_param(nvme_major
, int, 0);
54 static int use_threaded_interrupts
;
55 module_param(use_threaded_interrupts
, int, 0);
57 static DEFINE_SPINLOCK(dev_list_lock
);
58 static LIST_HEAD(dev_list
);
59 static struct task_struct
*nvme_thread
;
62 * Represents an NVM Express device. Each nvme_dev is a PCI function.
65 struct list_head node
;
66 struct nvme_queue
**queues
;
68 struct pci_dev
*pci_dev
;
69 struct dma_pool
*prp_page_pool
;
70 struct dma_pool
*prp_small_pool
;
74 struct msix_entry
*entry
;
75 struct nvme_bar __iomem
*bar
;
76 struct list_head namespaces
;
83 * An NVM Express namespace is equivalent to a SCSI LUN
86 struct list_head list
;
89 struct request_queue
*queue
;
97 * An NVM Express queue. Each device has at least two (one for admin
98 * commands and one for I/O commands).
101 struct device
*q_dmadev
;
102 struct nvme_dev
*dev
;
104 struct nvme_command
*sq_cmds
;
105 volatile struct nvme_completion
*cqes
;
106 dma_addr_t sq_dma_addr
;
107 dma_addr_t cq_dma_addr
;
108 wait_queue_head_t sq_full
;
109 wait_queue_t sq_cong_wait
;
110 struct bio_list sq_cong
;
118 unsigned long cmdid_data
[];
122 * Check we didin't inadvertently grow the command struct
124 static inline void _nvme_check_size(void)
126 BUILD_BUG_ON(sizeof(struct nvme_rw_command
) != 64);
127 BUILD_BUG_ON(sizeof(struct nvme_create_cq
) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_create_sq
) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_delete_queue
) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_features
) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_command
) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl
) != 4096);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ns
) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type
) != 64);
137 struct nvme_cmd_info
{
139 unsigned long timeout
;
142 static struct nvme_cmd_info
*nvme_cmd_info(struct nvme_queue
*nvmeq
)
144 return (void *)&nvmeq
->cmdid_data
[BITS_TO_LONGS(nvmeq
->q_depth
)];
148 * alloc_cmdid() - Allocate a Command ID
149 * @nvmeq: The queue that will be used for this command
150 * @ctx: A pointer that will be passed to the handler
151 * @handler: The ID of the handler to call
153 * Allocate a Command ID for a queue. The data passed in will
154 * be passed to the completion handler. This is implemented by using
155 * the bottom two bits of the ctx pointer to store the handler ID.
156 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
157 * We can change this if it becomes a problem.
159 * May be called with local interrupts disabled and the q_lock held,
160 * or with interrupts enabled and no locks held.
162 static int alloc_cmdid(struct nvme_queue
*nvmeq
, void *ctx
, int handler
,
165 int depth
= nvmeq
->q_depth
- 1;
166 struct nvme_cmd_info
*info
= nvme_cmd_info(nvmeq
);
169 BUG_ON((unsigned long)ctx
& 3);
172 cmdid
= find_first_zero_bit(nvmeq
->cmdid_data
, depth
);
175 } while (test_and_set_bit(cmdid
, nvmeq
->cmdid_data
));
177 info
[cmdid
].ctx
= (unsigned long)ctx
| handler
;
178 info
[cmdid
].timeout
= jiffies
+ timeout
;
182 static int alloc_cmdid_killable(struct nvme_queue
*nvmeq
, void *ctx
,
183 int handler
, unsigned timeout
)
186 wait_event_killable(nvmeq
->sq_full
,
187 (cmdid
= alloc_cmdid(nvmeq
, ctx
, handler
, timeout
)) >= 0);
188 return (cmdid
< 0) ? -EINTR
: cmdid
;
192 * If you need more than four handlers, you'll need to change how
193 * alloc_cmdid and nvme_process_cq work. Consider using a special
194 * CMD_CTX value instead, if that works for your situation.
197 sync_completion_id
= 0,
201 /* Special values must be a multiple of 4, and less than 0x1000 */
202 #define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id)
203 #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
204 #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
205 #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
206 #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
209 * Called with local interrupts disabled and the q_lock held. May not sleep.
211 static unsigned long free_cmdid(struct nvme_queue
*nvmeq
, int cmdid
)
214 struct nvme_cmd_info
*info
= nvme_cmd_info(nvmeq
);
216 if (cmdid
>= nvmeq
->q_depth
)
217 return CMD_CTX_INVALID
;
218 data
= info
[cmdid
].ctx
;
219 info
[cmdid
].ctx
= CMD_CTX_COMPLETED
;
220 clear_bit(cmdid
, nvmeq
->cmdid_data
);
221 wake_up(&nvmeq
->sq_full
);
225 static unsigned long cancel_cmdid(struct nvme_queue
*nvmeq
, int cmdid
)
228 struct nvme_cmd_info
*info
= nvme_cmd_info(nvmeq
);
229 data
= info
[cmdid
].ctx
;
230 info
[cmdid
].ctx
= CMD_CTX_CANCELLED
;
234 static struct nvme_queue
*get_nvmeq(struct nvme_ns
*ns
)
236 return ns
->dev
->queues
[get_cpu() + 1];
239 static void put_nvmeq(struct nvme_queue
*nvmeq
)
245 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
246 * @nvmeq: The queue to use
247 * @cmd: The command to send
249 * Safe to use from interrupt context
251 static int nvme_submit_cmd(struct nvme_queue
*nvmeq
, struct nvme_command
*cmd
)
255 spin_lock_irqsave(&nvmeq
->q_lock
, flags
);
256 tail
= nvmeq
->sq_tail
;
257 memcpy(&nvmeq
->sq_cmds
[tail
], cmd
, sizeof(*cmd
));
258 if (++tail
== nvmeq
->q_depth
)
260 writel(tail
, nvmeq
->q_db
);
261 nvmeq
->sq_tail
= tail
;
262 spin_unlock_irqrestore(&nvmeq
->q_lock
, flags
);
269 dma_addr_t first_dma
;
273 static void nvme_free_prps(struct nvme_dev
*dev
, struct nvme_prps
*prps
)
275 const int last_prp
= PAGE_SIZE
/ 8 - 1;
282 prp_dma
= prps
->first_dma
;
284 if (prps
->npages
== 0)
285 dma_pool_free(dev
->prp_small_pool
, prps
->list
[0], prp_dma
);
286 for (i
= 0; i
< prps
->npages
; i
++) {
287 __le64
*prp_list
= prps
->list
[i
];
288 dma_addr_t next_prp_dma
= le64_to_cpu(prp_list
[last_prp
]);
289 dma_pool_free(dev
->prp_page_pool
, prp_list
, prp_dma
);
290 prp_dma
= next_prp_dma
;
298 struct nvme_prps
*prps
;
299 struct scatterlist sg
[0];
302 /* XXX: use a mempool */
303 static struct nvme_bio
*alloc_nbio(unsigned nseg
, gfp_t gfp
)
305 return kzalloc(sizeof(struct nvme_bio
) +
306 sizeof(struct scatterlist
) * nseg
, gfp
);
309 static void free_nbio(struct nvme_queue
*nvmeq
, struct nvme_bio
*nbio
)
311 nvme_free_prps(nvmeq
->dev
, nbio
->prps
);
315 static void bio_completion(struct nvme_queue
*nvmeq
, void *ctx
,
316 struct nvme_completion
*cqe
)
318 struct nvme_bio
*nbio
= ctx
;
319 struct bio
*bio
= nbio
->bio
;
320 u16 status
= le16_to_cpup(&cqe
->status
) >> 1;
322 dma_unmap_sg(nvmeq
->q_dmadev
, nbio
->sg
, nbio
->nents
,
323 bio_data_dir(bio
) ? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
324 free_nbio(nvmeq
, nbio
);
326 bio_endio(bio
, -EIO
);
327 } else if (bio
->bi_vcnt
> bio
->bi_idx
) {
328 bio_list_add(&nvmeq
->sq_cong
, bio
);
329 wake_up_process(nvme_thread
);
335 /* length is in bytes. gfp flags indicates whether we may sleep. */
336 static struct nvme_prps
*nvme_setup_prps(struct nvme_dev
*dev
,
337 struct nvme_common_command
*cmd
,
338 struct scatterlist
*sg
, int *len
,
341 struct dma_pool
*pool
;
343 int dma_len
= sg_dma_len(sg
);
344 u64 dma_addr
= sg_dma_address(sg
);
345 int offset
= offset_in_page(dma_addr
);
348 int nprps
, npages
, i
, prp_page
;
349 struct nvme_prps
*prps
= NULL
;
351 cmd
->prp1
= cpu_to_le64(dma_addr
);
352 length
-= (PAGE_SIZE
- offset
);
356 dma_len
-= (PAGE_SIZE
- offset
);
358 dma_addr
+= (PAGE_SIZE
- offset
);
361 dma_addr
= sg_dma_address(sg
);
362 dma_len
= sg_dma_len(sg
);
365 if (length
<= PAGE_SIZE
) {
366 cmd
->prp2
= cpu_to_le64(dma_addr
);
370 nprps
= DIV_ROUND_UP(length
, PAGE_SIZE
);
371 npages
= DIV_ROUND_UP(8 * nprps
, PAGE_SIZE
);
372 prps
= kmalloc(sizeof(*prps
) + sizeof(__le64
*) * npages
, gfp
);
374 cmd
->prp2
= cpu_to_le64(dma_addr
);
375 *len
= (*len
- length
) + PAGE_SIZE
;
379 if (nprps
<= (256 / 8)) {
380 pool
= dev
->prp_small_pool
;
383 pool
= dev
->prp_page_pool
;
384 prps
->npages
= npages
;
387 prp_list
= dma_pool_alloc(pool
, gfp
, &prp_dma
);
389 cmd
->prp2
= cpu_to_le64(dma_addr
);
390 *len
= (*len
- length
) + PAGE_SIZE
;
394 prps
->list
[prp_page
++] = prp_list
;
395 prps
->first_dma
= prp_dma
;
396 cmd
->prp2
= cpu_to_le64(prp_dma
);
399 if (i
== PAGE_SIZE
/ 8) {
400 __le64
*old_prp_list
= prp_list
;
401 prp_list
= dma_pool_alloc(pool
, gfp
, &prp_dma
);
403 *len
= (*len
- length
);
406 prps
->list
[prp_page
++] = prp_list
;
407 prp_list
[0] = old_prp_list
[i
- 1];
408 old_prp_list
[i
- 1] = cpu_to_le64(prp_dma
);
411 prp_list
[i
++] = cpu_to_le64(dma_addr
);
412 dma_len
-= PAGE_SIZE
;
413 dma_addr
+= PAGE_SIZE
;
421 dma_addr
= sg_dma_address(sg
);
422 dma_len
= sg_dma_len(sg
);
428 /* NVMe scatterlists require no holes in the virtual address */
429 #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
430 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
432 static int nvme_map_bio(struct device
*dev
, struct nvme_bio
*nbio
,
433 struct bio
*bio
, enum dma_data_direction dma_dir
, int psegs
)
435 struct bio_vec
*bvec
, *bvprv
= NULL
;
436 struct scatterlist
*sg
= NULL
;
437 int i
, old_idx
, length
= 0, nsegs
= 0;
439 sg_init_table(nbio
->sg
, psegs
);
440 old_idx
= bio
->bi_idx
;
441 bio_for_each_segment(bvec
, bio
, i
) {
442 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
443 sg
->length
+= bvec
->bv_len
;
445 if (bvprv
&& BIOVEC_NOT_VIRT_MERGEABLE(bvprv
, bvec
))
447 sg
= sg
? sg
+ 1 : nbio
->sg
;
448 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
,
452 length
+= bvec
->bv_len
;
458 if (dma_map_sg(dev
, nbio
->sg
, nbio
->nents
, dma_dir
) == 0) {
459 bio
->bi_idx
= old_idx
;
465 static int nvme_submit_flush(struct nvme_queue
*nvmeq
, struct nvme_ns
*ns
,
468 struct nvme_command
*cmnd
= &nvmeq
->sq_cmds
[nvmeq
->sq_tail
];
470 memset(cmnd
, 0, sizeof(*cmnd
));
471 cmnd
->common
.opcode
= nvme_cmd_flush
;
472 cmnd
->common
.command_id
= cmdid
;
473 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
475 if (++nvmeq
->sq_tail
== nvmeq
->q_depth
)
477 writel(nvmeq
->sq_tail
, nvmeq
->q_db
);
482 static int nvme_submit_flush_data(struct nvme_queue
*nvmeq
, struct nvme_ns
*ns
)
484 int cmdid
= alloc_cmdid(nvmeq
, (void *)CMD_CTX_FLUSH
,
485 sync_completion_id
, IO_TIMEOUT
);
486 if (unlikely(cmdid
< 0))
489 return nvme_submit_flush(nvmeq
, ns
, cmdid
);
493 * Called with local interrupts disabled and the q_lock held. May not sleep.
495 static int nvme_submit_bio_queue(struct nvme_queue
*nvmeq
, struct nvme_ns
*ns
,
498 struct nvme_command
*cmnd
;
499 struct nvme_bio
*nbio
;
500 enum dma_data_direction dma_dir
;
501 int cmdid
, length
, result
= -ENOMEM
;
504 int psegs
= bio_phys_segments(ns
->queue
, bio
);
506 if ((bio
->bi_rw
& REQ_FLUSH
) && psegs
) {
507 result
= nvme_submit_flush_data(nvmeq
, ns
);
512 nbio
= alloc_nbio(psegs
, GFP_ATOMIC
);
518 cmdid
= alloc_cmdid(nvmeq
, nbio
, bio_completion_id
, IO_TIMEOUT
);
519 if (unlikely(cmdid
< 0))
522 if ((bio
->bi_rw
& REQ_FLUSH
) && !psegs
)
523 return nvme_submit_flush(nvmeq
, ns
, cmdid
);
526 if (bio
->bi_rw
& REQ_FUA
)
527 control
|= NVME_RW_FUA
;
528 if (bio
->bi_rw
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
529 control
|= NVME_RW_LR
;
532 if (bio
->bi_rw
& REQ_RAHEAD
)
533 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
535 cmnd
= &nvmeq
->sq_cmds
[nvmeq
->sq_tail
];
537 memset(cmnd
, 0, sizeof(*cmnd
));
538 if (bio_data_dir(bio
)) {
539 cmnd
->rw
.opcode
= nvme_cmd_write
;
540 dma_dir
= DMA_TO_DEVICE
;
542 cmnd
->rw
.opcode
= nvme_cmd_read
;
543 dma_dir
= DMA_FROM_DEVICE
;
546 result
= nvme_map_bio(nvmeq
->q_dmadev
, nbio
, bio
, dma_dir
, psegs
);
551 cmnd
->rw
.command_id
= cmdid
;
552 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
553 nbio
->prps
= nvme_setup_prps(nvmeq
->dev
, &cmnd
->common
, nbio
->sg
,
554 &length
, GFP_ATOMIC
);
555 cmnd
->rw
.slba
= cpu_to_le64(bio
->bi_sector
>> (ns
->lba_shift
- 9));
556 cmnd
->rw
.length
= cpu_to_le16((length
>> ns
->lba_shift
) - 1);
557 cmnd
->rw
.control
= cpu_to_le16(control
);
558 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
560 bio
->bi_sector
+= length
>> 9;
562 if (++nvmeq
->sq_tail
== nvmeq
->q_depth
)
564 writel(nvmeq
->sq_tail
, nvmeq
->q_db
);
569 free_nbio(nvmeq
, nbio
);
575 * NB: return value of non-zero would mean that we were a stacking driver.
576 * make_request must always succeed.
578 static int nvme_make_request(struct request_queue
*q
, struct bio
*bio
)
580 struct nvme_ns
*ns
= q
->queuedata
;
581 struct nvme_queue
*nvmeq
= get_nvmeq(ns
);
584 spin_lock_irq(&nvmeq
->q_lock
);
585 if (bio_list_empty(&nvmeq
->sq_cong
))
586 result
= nvme_submit_bio_queue(nvmeq
, ns
, bio
);
587 if (unlikely(result
)) {
588 if (bio_list_empty(&nvmeq
->sq_cong
))
589 add_wait_queue(&nvmeq
->sq_full
, &nvmeq
->sq_cong_wait
);
590 bio_list_add(&nvmeq
->sq_cong
, bio
);
593 spin_unlock_irq(&nvmeq
->q_lock
);
599 struct sync_cmd_info
{
600 struct task_struct
*task
;
605 static void sync_completion(struct nvme_queue
*nvmeq
, void *ctx
,
606 struct nvme_completion
*cqe
)
608 struct sync_cmd_info
*cmdinfo
= ctx
;
609 if (unlikely((unsigned long)cmdinfo
== CMD_CTX_CANCELLED
))
611 if ((unsigned long)cmdinfo
== CMD_CTX_FLUSH
)
613 if (unlikely((unsigned long)cmdinfo
== CMD_CTX_COMPLETED
)) {
614 dev_warn(nvmeq
->q_dmadev
,
615 "completed id %d twice on queue %d\n",
616 cqe
->command_id
, le16_to_cpup(&cqe
->sq_id
));
619 if (unlikely((unsigned long)cmdinfo
== CMD_CTX_INVALID
)) {
620 dev_warn(nvmeq
->q_dmadev
,
621 "invalid id %d completed on queue %d\n",
622 cqe
->command_id
, le16_to_cpup(&cqe
->sq_id
));
625 cmdinfo
->result
= le32_to_cpup(&cqe
->result
);
626 cmdinfo
->status
= le16_to_cpup(&cqe
->status
) >> 1;
627 wake_up_process(cmdinfo
->task
);
630 typedef void (*completion_fn
)(struct nvme_queue
*, void *,
631 struct nvme_completion
*);
633 static const completion_fn nvme_completions
[4] = {
634 [sync_completion_id
] = sync_completion
,
635 [bio_completion_id
] = bio_completion
,
638 static irqreturn_t
nvme_process_cq(struct nvme_queue
*nvmeq
)
642 head
= nvmeq
->cq_head
;
643 phase
= nvmeq
->cq_phase
;
648 unsigned char handler
;
649 struct nvme_completion cqe
= nvmeq
->cqes
[head
];
650 if ((le16_to_cpu(cqe
.status
) & 1) != phase
)
652 nvmeq
->sq_head
= le16_to_cpu(cqe
.sq_head
);
653 if (++head
== nvmeq
->q_depth
) {
658 data
= free_cmdid(nvmeq
, cqe
.command_id
);
660 ptr
= (void *)(data
& ~3UL);
661 nvme_completions
[handler
](nvmeq
, ptr
, &cqe
);
664 /* If the controller ignores the cq head doorbell and continuously
665 * writes to the queue, it is theoretically possible to wrap around
666 * the queue twice and mistakenly return IRQ_NONE. Linux only
667 * requires that 0.1% of your interrupts are handled, so this isn't
670 if (head
== nvmeq
->cq_head
&& phase
== nvmeq
->cq_phase
)
673 writel(head
, nvmeq
->q_db
+ 1);
674 nvmeq
->cq_head
= head
;
675 nvmeq
->cq_phase
= phase
;
680 static irqreturn_t
nvme_irq(int irq
, void *data
)
683 struct nvme_queue
*nvmeq
= data
;
684 spin_lock(&nvmeq
->q_lock
);
685 result
= nvme_process_cq(nvmeq
);
686 spin_unlock(&nvmeq
->q_lock
);
690 static irqreturn_t
nvme_irq_check(int irq
, void *data
)
692 struct nvme_queue
*nvmeq
= data
;
693 struct nvme_completion cqe
= nvmeq
->cqes
[nvmeq
->cq_head
];
694 if ((le16_to_cpu(cqe
.status
) & 1) != nvmeq
->cq_phase
)
696 return IRQ_WAKE_THREAD
;
699 static void nvme_abort_command(struct nvme_queue
*nvmeq
, int cmdid
)
701 spin_lock_irq(&nvmeq
->q_lock
);
702 cancel_cmdid(nvmeq
, cmdid
);
703 spin_unlock_irq(&nvmeq
->q_lock
);
707 * Returns 0 on success. If the result is negative, it's a Linux error code;
708 * if the result is positive, it's an NVM Express status code
710 static int nvme_submit_sync_cmd(struct nvme_queue
*nvmeq
,
711 struct nvme_command
*cmd
, u32
*result
, unsigned timeout
)
714 struct sync_cmd_info cmdinfo
;
716 cmdinfo
.task
= current
;
717 cmdinfo
.status
= -EINTR
;
719 cmdid
= alloc_cmdid_killable(nvmeq
, &cmdinfo
, sync_completion_id
,
723 cmd
->common
.command_id
= cmdid
;
725 set_current_state(TASK_KILLABLE
);
726 nvme_submit_cmd(nvmeq
, cmd
);
729 if (cmdinfo
.status
== -EINTR
) {
730 nvme_abort_command(nvmeq
, cmdid
);
735 *result
= cmdinfo
.result
;
737 return cmdinfo
.status
;
740 static int nvme_submit_admin_cmd(struct nvme_dev
*dev
, struct nvme_command
*cmd
,
743 return nvme_submit_sync_cmd(dev
->queues
[0], cmd
, result
, ADMIN_TIMEOUT
);
746 static int adapter_delete_queue(struct nvme_dev
*dev
, u8 opcode
, u16 id
)
749 struct nvme_command c
;
751 memset(&c
, 0, sizeof(c
));
752 c
.delete_queue
.opcode
= opcode
;
753 c
.delete_queue
.qid
= cpu_to_le16(id
);
755 status
= nvme_submit_admin_cmd(dev
, &c
, NULL
);
761 static int adapter_alloc_cq(struct nvme_dev
*dev
, u16 qid
,
762 struct nvme_queue
*nvmeq
)
765 struct nvme_command c
;
766 int flags
= NVME_QUEUE_PHYS_CONTIG
| NVME_CQ_IRQ_ENABLED
;
768 memset(&c
, 0, sizeof(c
));
769 c
.create_cq
.opcode
= nvme_admin_create_cq
;
770 c
.create_cq
.prp1
= cpu_to_le64(nvmeq
->cq_dma_addr
);
771 c
.create_cq
.cqid
= cpu_to_le16(qid
);
772 c
.create_cq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
773 c
.create_cq
.cq_flags
= cpu_to_le16(flags
);
774 c
.create_cq
.irq_vector
= cpu_to_le16(nvmeq
->cq_vector
);
776 status
= nvme_submit_admin_cmd(dev
, &c
, NULL
);
782 static int adapter_alloc_sq(struct nvme_dev
*dev
, u16 qid
,
783 struct nvme_queue
*nvmeq
)
786 struct nvme_command c
;
787 int flags
= NVME_QUEUE_PHYS_CONTIG
| NVME_SQ_PRIO_MEDIUM
;
789 memset(&c
, 0, sizeof(c
));
790 c
.create_sq
.opcode
= nvme_admin_create_sq
;
791 c
.create_sq
.prp1
= cpu_to_le64(nvmeq
->sq_dma_addr
);
792 c
.create_sq
.sqid
= cpu_to_le16(qid
);
793 c
.create_sq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
794 c
.create_sq
.sq_flags
= cpu_to_le16(flags
);
795 c
.create_sq
.cqid
= cpu_to_le16(qid
);
797 status
= nvme_submit_admin_cmd(dev
, &c
, NULL
);
803 static int adapter_delete_cq(struct nvme_dev
*dev
, u16 cqid
)
805 return adapter_delete_queue(dev
, nvme_admin_delete_cq
, cqid
);
808 static int adapter_delete_sq(struct nvme_dev
*dev
, u16 sqid
)
810 return adapter_delete_queue(dev
, nvme_admin_delete_sq
, sqid
);
813 static void nvme_free_queue(struct nvme_dev
*dev
, int qid
)
815 struct nvme_queue
*nvmeq
= dev
->queues
[qid
];
816 int vector
= dev
->entry
[nvmeq
->cq_vector
].vector
;
818 irq_set_affinity_hint(vector
, NULL
);
819 free_irq(vector
, nvmeq
);
821 /* Don't tell the adapter to delete the admin queue */
823 adapter_delete_sq(dev
, qid
);
824 adapter_delete_cq(dev
, qid
);
827 dma_free_coherent(nvmeq
->q_dmadev
, CQ_SIZE(nvmeq
->q_depth
),
828 (void *)nvmeq
->cqes
, nvmeq
->cq_dma_addr
);
829 dma_free_coherent(nvmeq
->q_dmadev
, SQ_SIZE(nvmeq
->q_depth
),
830 nvmeq
->sq_cmds
, nvmeq
->sq_dma_addr
);
834 static struct nvme_queue
*nvme_alloc_queue(struct nvme_dev
*dev
, int qid
,
835 int depth
, int vector
)
837 struct device
*dmadev
= &dev
->pci_dev
->dev
;
838 unsigned extra
= (depth
/ 8) + (depth
* sizeof(struct nvme_cmd_info
));
839 struct nvme_queue
*nvmeq
= kzalloc(sizeof(*nvmeq
) + extra
, GFP_KERNEL
);
843 nvmeq
->cqes
= dma_alloc_coherent(dmadev
, CQ_SIZE(depth
),
844 &nvmeq
->cq_dma_addr
, GFP_KERNEL
);
847 memset((void *)nvmeq
->cqes
, 0, CQ_SIZE(depth
));
849 nvmeq
->sq_cmds
= dma_alloc_coherent(dmadev
, SQ_SIZE(depth
),
850 &nvmeq
->sq_dma_addr
, GFP_KERNEL
);
854 nvmeq
->q_dmadev
= dmadev
;
856 spin_lock_init(&nvmeq
->q_lock
);
859 init_waitqueue_head(&nvmeq
->sq_full
);
860 init_waitqueue_entry(&nvmeq
->sq_cong_wait
, nvme_thread
);
861 bio_list_init(&nvmeq
->sq_cong
);
862 nvmeq
->q_db
= &dev
->dbs
[qid
* 2];
863 nvmeq
->q_depth
= depth
;
864 nvmeq
->cq_vector
= vector
;
869 dma_free_coherent(dmadev
, CQ_SIZE(nvmeq
->q_depth
), (void *)nvmeq
->cqes
,
876 static int queue_request_irq(struct nvme_dev
*dev
, struct nvme_queue
*nvmeq
,
879 if (use_threaded_interrupts
)
880 return request_threaded_irq(dev
->entry
[nvmeq
->cq_vector
].vector
,
881 nvme_irq_check
, nvme_irq
,
882 IRQF_DISABLED
| IRQF_SHARED
,
884 return request_irq(dev
->entry
[nvmeq
->cq_vector
].vector
, nvme_irq
,
885 IRQF_DISABLED
| IRQF_SHARED
, name
, nvmeq
);
888 static __devinit
struct nvme_queue
*nvme_create_queue(struct nvme_dev
*dev
,
889 int qid
, int cq_size
, int vector
)
892 struct nvme_queue
*nvmeq
= nvme_alloc_queue(dev
, qid
, cq_size
, vector
);
897 result
= adapter_alloc_cq(dev
, qid
, nvmeq
);
901 result
= adapter_alloc_sq(dev
, qid
, nvmeq
);
905 result
= queue_request_irq(dev
, nvmeq
, "nvme");
912 adapter_delete_sq(dev
, qid
);
914 adapter_delete_cq(dev
, qid
);
916 dma_free_coherent(nvmeq
->q_dmadev
, CQ_SIZE(nvmeq
->q_depth
),
917 (void *)nvmeq
->cqes
, nvmeq
->cq_dma_addr
);
918 dma_free_coherent(nvmeq
->q_dmadev
, SQ_SIZE(nvmeq
->q_depth
),
919 nvmeq
->sq_cmds
, nvmeq
->sq_dma_addr
);
924 static int __devinit
nvme_configure_admin_queue(struct nvme_dev
*dev
)
929 unsigned long timeout
;
930 struct nvme_queue
*nvmeq
;
932 dev
->dbs
= ((void __iomem
*)dev
->bar
) + 4096;
934 nvmeq
= nvme_alloc_queue(dev
, 0, 64, 0);
938 aqa
= nvmeq
->q_depth
- 1;
941 dev
->ctrl_config
= NVME_CC_ENABLE
| NVME_CC_CSS_NVM
;
942 dev
->ctrl_config
|= (PAGE_SHIFT
- 12) << NVME_CC_MPS_SHIFT
;
943 dev
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
944 dev
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
946 writel(0, &dev
->bar
->cc
);
947 writel(aqa
, &dev
->bar
->aqa
);
948 writeq(nvmeq
->sq_dma_addr
, &dev
->bar
->asq
);
949 writeq(nvmeq
->cq_dma_addr
, &dev
->bar
->acq
);
950 writel(dev
->ctrl_config
, &dev
->bar
->cc
);
952 cap
= readq(&dev
->bar
->cap
);
953 timeout
= ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
955 while (!(readl(&dev
->bar
->csts
) & NVME_CSTS_RDY
)) {
957 if (fatal_signal_pending(current
))
959 if (time_after(jiffies
, timeout
)) {
960 dev_err(&dev
->pci_dev
->dev
,
961 "Device not ready; aborting initialisation\n");
966 result
= queue_request_irq(dev
, nvmeq
, "nvme admin");
967 dev
->queues
[0] = nvmeq
;
971 static int nvme_map_user_pages(struct nvme_dev
*dev
, int write
,
972 unsigned long addr
, unsigned length
,
973 struct scatterlist
**sgp
)
975 int i
, err
, count
, nents
, offset
;
976 struct scatterlist
*sg
;
984 offset
= offset_in_page(addr
);
985 count
= DIV_ROUND_UP(offset
+ length
, PAGE_SIZE
);
986 pages
= kcalloc(count
, sizeof(*pages
), GFP_KERNEL
);
988 err
= get_user_pages_fast(addr
, count
, 1, pages
);
995 sg
= kcalloc(count
, sizeof(*sg
), GFP_KERNEL
);
996 sg_init_table(sg
, count
);
997 sg_set_page(&sg
[0], pages
[0], PAGE_SIZE
- offset
, offset
);
998 length
-= (PAGE_SIZE
- offset
);
999 for (i
= 1; i
< count
; i
++) {
1000 sg_set_page(&sg
[i
], pages
[i
], min_t(int, length
, PAGE_SIZE
), 0);
1001 length
-= PAGE_SIZE
;
1005 nents
= dma_map_sg(&dev
->pci_dev
->dev
, sg
, count
,
1006 write
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
1015 for (i
= 0; i
< count
; i
++)
1021 static void nvme_unmap_user_pages(struct nvme_dev
*dev
, int write
,
1022 unsigned long addr
, int length
,
1023 struct scatterlist
*sg
, int nents
)
1027 count
= DIV_ROUND_UP(offset_in_page(addr
) + length
, PAGE_SIZE
);
1028 dma_unmap_sg(&dev
->pci_dev
->dev
, sg
, nents
, DMA_FROM_DEVICE
);
1030 for (i
= 0; i
< count
; i
++)
1031 put_page(sg_page(&sg
[i
]));
1034 static int nvme_submit_user_admin_command(struct nvme_dev
*dev
,
1035 unsigned long addr
, unsigned length
,
1036 struct nvme_command
*cmd
)
1038 int err
, nents
, tmplen
= length
;
1039 struct scatterlist
*sg
;
1040 struct nvme_prps
*prps
;
1042 nents
= nvme_map_user_pages(dev
, 0, addr
, length
, &sg
);
1045 prps
= nvme_setup_prps(dev
, &cmd
->common
, sg
, &tmplen
, GFP_KERNEL
);
1046 if (tmplen
!= length
)
1049 err
= nvme_submit_admin_cmd(dev
, cmd
, NULL
);
1050 nvme_unmap_user_pages(dev
, 0, addr
, length
, sg
, nents
);
1051 nvme_free_prps(dev
, prps
);
1052 return err
? -EIO
: 0;
1055 static int nvme_identify(struct nvme_ns
*ns
, unsigned long addr
, int cns
)
1057 struct nvme_command c
;
1059 memset(&c
, 0, sizeof(c
));
1060 c
.identify
.opcode
= nvme_admin_identify
;
1061 c
.identify
.nsid
= cns
? 0 : cpu_to_le32(ns
->ns_id
);
1062 c
.identify
.cns
= cpu_to_le32(cns
);
1064 return nvme_submit_user_admin_command(ns
->dev
, addr
, 4096, &c
);
1067 static int nvme_get_range_type(struct nvme_ns
*ns
, unsigned long addr
)
1069 struct nvme_command c
;
1071 memset(&c
, 0, sizeof(c
));
1072 c
.features
.opcode
= nvme_admin_get_features
;
1073 c
.features
.nsid
= cpu_to_le32(ns
->ns_id
);
1074 c
.features
.fid
= cpu_to_le32(NVME_FEAT_LBA_RANGE
);
1076 return nvme_submit_user_admin_command(ns
->dev
, addr
, 4096, &c
);
1079 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
1081 struct nvme_dev
*dev
= ns
->dev
;
1082 struct nvme_queue
*nvmeq
;
1083 struct nvme_user_io io
;
1084 struct nvme_command c
;
1087 struct scatterlist
*sg
;
1088 struct nvme_prps
*prps
;
1090 if (copy_from_user(&io
, uio
, sizeof(io
)))
1092 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
1094 switch (io
.opcode
) {
1095 case nvme_cmd_write
:
1097 nents
= nvme_map_user_pages(dev
, io
.opcode
& 1, io
.addr
,
1106 memset(&c
, 0, sizeof(c
));
1107 c
.rw
.opcode
= io
.opcode
;
1108 c
.rw
.flags
= io
.flags
;
1109 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
1110 c
.rw
.slba
= cpu_to_le64(io
.slba
);
1111 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
1112 c
.rw
.control
= cpu_to_le16(io
.control
);
1113 c
.rw
.dsmgmt
= cpu_to_le16(io
.dsmgmt
);
1114 c
.rw
.reftag
= io
.reftag
;
1115 c
.rw
.apptag
= io
.apptag
;
1116 c
.rw
.appmask
= io
.appmask
;
1118 prps
= nvme_setup_prps(dev
, &c
.common
, sg
, &length
, GFP_KERNEL
);
1120 nvmeq
= get_nvmeq(ns
);
1122 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1123 * disabled. We may be preempted at any point, and be rescheduled
1124 * to a different CPU. That will cause cacheline bouncing, but no
1125 * additional races since q_lock already protects against other CPUs.
1128 if (length
!= (io
.nblocks
+ 1) << ns
->lba_shift
)
1131 status
= nvme_submit_sync_cmd(nvmeq
, &c
, NULL
, IO_TIMEOUT
);
1133 nvme_unmap_user_pages(dev
, io
.opcode
& 1, io
.addr
, length
, sg
, nents
);
1134 nvme_free_prps(dev
, prps
);
1138 static int nvme_download_firmware(struct nvme_ns
*ns
,
1139 struct nvme_dlfw __user
*udlfw
)
1141 struct nvme_dev
*dev
= ns
->dev
;
1142 struct nvme_dlfw dlfw
;
1143 struct nvme_command c
;
1144 int nents
, status
, length
;
1145 struct scatterlist
*sg
;
1146 struct nvme_prps
*prps
;
1148 if (copy_from_user(&dlfw
, udlfw
, sizeof(dlfw
)))
1150 if (dlfw
.length
>= (1 << 30))
1152 length
= dlfw
.length
* 4;
1154 nents
= nvme_map_user_pages(dev
, 1, dlfw
.addr
, length
, &sg
);
1158 memset(&c
, 0, sizeof(c
));
1159 c
.dlfw
.opcode
= nvme_admin_download_fw
;
1160 c
.dlfw
.numd
= cpu_to_le32(dlfw
.length
);
1161 c
.dlfw
.offset
= cpu_to_le32(dlfw
.offset
);
1162 prps
= nvme_setup_prps(dev
, &c
.common
, sg
, &length
, GFP_KERNEL
);
1163 if (length
!= dlfw
.length
* 4)
1166 status
= nvme_submit_admin_cmd(dev
, &c
, NULL
);
1167 nvme_unmap_user_pages(dev
, 0, dlfw
.addr
, dlfw
.length
* 4, sg
, nents
);
1168 nvme_free_prps(dev
, prps
);
1172 static int nvme_activate_firmware(struct nvme_ns
*ns
, unsigned long arg
)
1174 struct nvme_dev
*dev
= ns
->dev
;
1175 struct nvme_command c
;
1177 memset(&c
, 0, sizeof(c
));
1178 c
.common
.opcode
= nvme_admin_activate_fw
;
1179 c
.common
.rsvd10
[0] = cpu_to_le32(arg
);
1181 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
1184 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
, unsigned int cmd
,
1187 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1190 case NVME_IOCTL_IDENTIFY_NS
:
1191 return nvme_identify(ns
, arg
, 0);
1192 case NVME_IOCTL_IDENTIFY_CTRL
:
1193 return nvme_identify(ns
, arg
, 1);
1194 case NVME_IOCTL_GET_RANGE_TYPE
:
1195 return nvme_get_range_type(ns
, arg
);
1196 case NVME_IOCTL_SUBMIT_IO
:
1197 return nvme_submit_io(ns
, (void __user
*)arg
);
1198 case NVME_IOCTL_DOWNLOAD_FW
:
1199 return nvme_download_firmware(ns
, (void __user
*)arg
);
1200 case NVME_IOCTL_ACTIVATE_FW
:
1201 return nvme_activate_firmware(ns
, arg
);
1207 static const struct block_device_operations nvme_fops
= {
1208 .owner
= THIS_MODULE
,
1209 .ioctl
= nvme_ioctl
,
1210 .compat_ioctl
= nvme_ioctl
,
1213 static void nvme_timeout_ios(struct nvme_queue
*nvmeq
)
1215 int depth
= nvmeq
->q_depth
- 1;
1216 struct nvme_cmd_info
*info
= nvme_cmd_info(nvmeq
);
1217 unsigned long now
= jiffies
;
1220 for_each_set_bit(cmdid
, nvmeq
->cmdid_data
, depth
) {
1223 unsigned char handler
;
1224 static struct nvme_completion cqe
= { .status
= cpu_to_le16(NVME_SC_ABORT_REQ
) << 1, };
1226 if (!time_after(now
, info
[cmdid
].timeout
))
1228 dev_warn(nvmeq
->q_dmadev
, "Timing out I/O %d\n", cmdid
);
1229 data
= cancel_cmdid(nvmeq
, cmdid
);
1231 ptr
= (void *)(data
& ~3UL);
1232 nvme_completions
[handler
](nvmeq
, ptr
, &cqe
);
1236 static void nvme_resubmit_bios(struct nvme_queue
*nvmeq
)
1238 while (bio_list_peek(&nvmeq
->sq_cong
)) {
1239 struct bio
*bio
= bio_list_pop(&nvmeq
->sq_cong
);
1240 struct nvme_ns
*ns
= bio
->bi_bdev
->bd_disk
->private_data
;
1241 if (nvme_submit_bio_queue(nvmeq
, ns
, bio
)) {
1242 bio_list_add_head(&nvmeq
->sq_cong
, bio
);
1245 if (bio_list_empty(&nvmeq
->sq_cong
))
1246 remove_wait_queue(&nvmeq
->sq_full
,
1247 &nvmeq
->sq_cong_wait
);
1251 static int nvme_kthread(void *data
)
1253 struct nvme_dev
*dev
;
1255 while (!kthread_should_stop()) {
1256 __set_current_state(TASK_RUNNING
);
1257 spin_lock(&dev_list_lock
);
1258 list_for_each_entry(dev
, &dev_list
, node
) {
1260 for (i
= 0; i
< dev
->queue_count
; i
++) {
1261 struct nvme_queue
*nvmeq
= dev
->queues
[i
];
1264 spin_lock_irq(&nvmeq
->q_lock
);
1265 if (nvme_process_cq(nvmeq
))
1266 printk("process_cq did something\n");
1267 nvme_timeout_ios(nvmeq
);
1268 nvme_resubmit_bios(nvmeq
);
1269 spin_unlock_irq(&nvmeq
->q_lock
);
1272 spin_unlock(&dev_list_lock
);
1273 set_current_state(TASK_INTERRUPTIBLE
);
1274 schedule_timeout(HZ
);
1279 static DEFINE_IDA(nvme_index_ida
);
1281 static int nvme_get_ns_idx(void)
1286 if (!ida_pre_get(&nvme_index_ida
, GFP_KERNEL
))
1289 spin_lock(&dev_list_lock
);
1290 error
= ida_get_new(&nvme_index_ida
, &index
);
1291 spin_unlock(&dev_list_lock
);
1292 } while (error
== -EAGAIN
);
1299 static void nvme_put_ns_idx(int index
)
1301 spin_lock(&dev_list_lock
);
1302 ida_remove(&nvme_index_ida
, index
);
1303 spin_unlock(&dev_list_lock
);
1306 static struct nvme_ns
*nvme_alloc_ns(struct nvme_dev
*dev
, int nsid
,
1307 struct nvme_id_ns
*id
, struct nvme_lba_range_type
*rt
)
1310 struct gendisk
*disk
;
1313 if (rt
->attributes
& NVME_LBART_ATTRIB_HIDE
)
1316 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
1319 ns
->queue
= blk_alloc_queue(GFP_KERNEL
);
1322 ns
->queue
->queue_flags
= QUEUE_FLAG_DEFAULT
| QUEUE_FLAG_NOMERGES
|
1323 QUEUE_FLAG_NONROT
| QUEUE_FLAG_DISCARD
;
1324 blk_queue_make_request(ns
->queue
, nvme_make_request
);
1326 ns
->queue
->queuedata
= ns
;
1328 disk
= alloc_disk(NVME_MINORS
);
1330 goto out_free_queue
;
1333 lbaf
= id
->flbas
& 0xf;
1334 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
1336 disk
->major
= nvme_major
;
1337 disk
->minors
= NVME_MINORS
;
1338 disk
->first_minor
= NVME_MINORS
* nvme_get_ns_idx();
1339 disk
->fops
= &nvme_fops
;
1340 disk
->private_data
= ns
;
1341 disk
->queue
= ns
->queue
;
1342 disk
->driverfs_dev
= &dev
->pci_dev
->dev
;
1343 sprintf(disk
->disk_name
, "nvme%dn%d", dev
->instance
, nsid
);
1344 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
1349 blk_cleanup_queue(ns
->queue
);
1355 static void nvme_ns_free(struct nvme_ns
*ns
)
1357 int index
= ns
->disk
->first_minor
/ NVME_MINORS
;
1359 nvme_put_ns_idx(index
);
1360 blk_cleanup_queue(ns
->queue
);
1364 static int set_queue_count(struct nvme_dev
*dev
, int count
)
1368 struct nvme_command c
;
1369 u32 q_count
= (count
- 1) | ((count
- 1) << 16);
1371 memset(&c
, 0, sizeof(c
));
1372 c
.features
.opcode
= nvme_admin_get_features
;
1373 c
.features
.fid
= cpu_to_le32(NVME_FEAT_NUM_QUEUES
);
1374 c
.features
.dword11
= cpu_to_le32(q_count
);
1376 status
= nvme_submit_admin_cmd(dev
, &c
, &result
);
1379 return min(result
& 0xffff, result
>> 16) + 1;
1382 static int __devinit
nvme_setup_io_queues(struct nvme_dev
*dev
)
1384 int result
, cpu
, i
, nr_io_queues
;
1386 nr_io_queues
= num_online_cpus();
1387 result
= set_queue_count(dev
, nr_io_queues
);
1390 if (result
< nr_io_queues
)
1391 nr_io_queues
= result
;
1393 /* Deregister the admin queue's interrupt */
1394 free_irq(dev
->entry
[0].vector
, dev
->queues
[0]);
1396 for (i
= 0; i
< nr_io_queues
; i
++)
1397 dev
->entry
[i
].entry
= i
;
1399 result
= pci_enable_msix(dev
->pci_dev
, dev
->entry
,
1403 } else if (result
> 0) {
1404 nr_io_queues
= result
;
1412 result
= queue_request_irq(dev
, dev
->queues
[0], "nvme admin");
1413 /* XXX: handle failure here */
1415 cpu
= cpumask_first(cpu_online_mask
);
1416 for (i
= 0; i
< nr_io_queues
; i
++) {
1417 irq_set_affinity_hint(dev
->entry
[i
].vector
, get_cpu_mask(cpu
));
1418 cpu
= cpumask_next(cpu
, cpu_online_mask
);
1421 for (i
= 0; i
< nr_io_queues
; i
++) {
1422 dev
->queues
[i
+ 1] = nvme_create_queue(dev
, i
+ 1,
1424 if (!dev
->queues
[i
+ 1])
1429 for (; i
< num_possible_cpus(); i
++) {
1430 int target
= i
% rounddown_pow_of_two(dev
->queue_count
- 1);
1431 dev
->queues
[i
+ 1] = dev
->queues
[target
+ 1];
1437 static void nvme_free_queues(struct nvme_dev
*dev
)
1441 for (i
= dev
->queue_count
- 1; i
>= 0; i
--)
1442 nvme_free_queue(dev
, i
);
1445 static int __devinit
nvme_dev_add(struct nvme_dev
*dev
)
1448 struct nvme_ns
*ns
, *next
;
1449 struct nvme_id_ctrl
*ctrl
;
1451 dma_addr_t dma_addr
;
1452 struct nvme_command cid
, crt
;
1454 res
= nvme_setup_io_queues(dev
);
1458 /* XXX: Switch to a SG list once prp2 works */
1459 id
= dma_alloc_coherent(&dev
->pci_dev
->dev
, 8192, &dma_addr
,
1462 memset(&cid
, 0, sizeof(cid
));
1463 cid
.identify
.opcode
= nvme_admin_identify
;
1464 cid
.identify
.nsid
= 0;
1465 cid
.identify
.prp1
= cpu_to_le64(dma_addr
);
1466 cid
.identify
.cns
= cpu_to_le32(1);
1468 res
= nvme_submit_admin_cmd(dev
, &cid
, NULL
);
1475 nn
= le32_to_cpup(&ctrl
->nn
);
1476 memcpy(dev
->serial
, ctrl
->sn
, sizeof(ctrl
->sn
));
1477 memcpy(dev
->model
, ctrl
->mn
, sizeof(ctrl
->mn
));
1478 memcpy(dev
->firmware_rev
, ctrl
->fr
, sizeof(ctrl
->fr
));
1480 cid
.identify
.cns
= 0;
1481 memset(&crt
, 0, sizeof(crt
));
1482 crt
.features
.opcode
= nvme_admin_get_features
;
1483 crt
.features
.prp1
= cpu_to_le64(dma_addr
+ 4096);
1484 crt
.features
.fid
= cpu_to_le32(NVME_FEAT_LBA_RANGE
);
1486 for (i
= 0; i
<= nn
; i
++) {
1487 cid
.identify
.nsid
= cpu_to_le32(i
);
1488 res
= nvme_submit_admin_cmd(dev
, &cid
, NULL
);
1492 if (((struct nvme_id_ns
*)id
)->ncap
== 0)
1495 crt
.features
.nsid
= cpu_to_le32(i
);
1496 res
= nvme_submit_admin_cmd(dev
, &crt
, NULL
);
1500 ns
= nvme_alloc_ns(dev
, i
, id
, id
+ 4096);
1502 list_add_tail(&ns
->list
, &dev
->namespaces
);
1504 list_for_each_entry(ns
, &dev
->namespaces
, list
)
1507 dma_free_coherent(&dev
->pci_dev
->dev
, 4096, id
, dma_addr
);
1511 list_for_each_entry_safe(ns
, next
, &dev
->namespaces
, list
) {
1512 list_del(&ns
->list
);
1516 dma_free_coherent(&dev
->pci_dev
->dev
, 4096, id
, dma_addr
);
1520 static int nvme_dev_remove(struct nvme_dev
*dev
)
1522 struct nvme_ns
*ns
, *next
;
1524 spin_lock(&dev_list_lock
);
1525 list_del(&dev
->node
);
1526 spin_unlock(&dev_list_lock
);
1528 /* TODO: wait all I/O finished or cancel them */
1530 list_for_each_entry_safe(ns
, next
, &dev
->namespaces
, list
) {
1531 list_del(&ns
->list
);
1532 del_gendisk(ns
->disk
);
1536 nvme_free_queues(dev
);
1541 static int nvme_setup_prp_pools(struct nvme_dev
*dev
)
1543 struct device
*dmadev
= &dev
->pci_dev
->dev
;
1544 dev
->prp_page_pool
= dma_pool_create("prp list page", dmadev
,
1545 PAGE_SIZE
, PAGE_SIZE
, 0);
1546 if (!dev
->prp_page_pool
)
1549 /* Optimisation for I/Os between 4k and 128k */
1550 dev
->prp_small_pool
= dma_pool_create("prp list 256", dmadev
,
1552 if (!dev
->prp_small_pool
) {
1553 dma_pool_destroy(dev
->prp_page_pool
);
1559 static void nvme_release_prp_pools(struct nvme_dev
*dev
)
1561 dma_pool_destroy(dev
->prp_page_pool
);
1562 dma_pool_destroy(dev
->prp_small_pool
);
1565 /* XXX: Use an ida or something to let remove / add work correctly */
1566 static void nvme_set_instance(struct nvme_dev
*dev
)
1568 static int instance
;
1569 dev
->instance
= instance
++;
1572 static void nvme_release_instance(struct nvme_dev
*dev
)
1576 static int __devinit
nvme_probe(struct pci_dev
*pdev
,
1577 const struct pci_device_id
*id
)
1579 int bars
, result
= -ENOMEM
;
1580 struct nvme_dev
*dev
;
1582 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
1585 dev
->entry
= kcalloc(num_possible_cpus(), sizeof(*dev
->entry
),
1589 dev
->queues
= kcalloc(num_possible_cpus() + 1, sizeof(void *),
1594 if (pci_enable_device_mem(pdev
))
1596 pci_set_master(pdev
);
1597 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
1598 if (pci_request_selected_regions(pdev
, bars
, "nvme"))
1601 INIT_LIST_HEAD(&dev
->namespaces
);
1602 dev
->pci_dev
= pdev
;
1603 pci_set_drvdata(pdev
, dev
);
1604 dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
1605 dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64));
1606 nvme_set_instance(dev
);
1607 dev
->entry
[0].vector
= pdev
->irq
;
1609 result
= nvme_setup_prp_pools(dev
);
1613 dev
->bar
= ioremap(pci_resource_start(pdev
, 0), 8192);
1619 result
= nvme_configure_admin_queue(dev
);
1624 spin_lock(&dev_list_lock
);
1625 list_add(&dev
->node
, &dev_list
);
1626 spin_unlock(&dev_list_lock
);
1628 result
= nvme_dev_add(dev
);
1635 spin_lock(&dev_list_lock
);
1636 list_del(&dev
->node
);
1637 spin_unlock(&dev_list_lock
);
1639 nvme_free_queues(dev
);
1643 pci_disable_msix(pdev
);
1644 nvme_release_instance(dev
);
1645 nvme_release_prp_pools(dev
);
1647 pci_disable_device(pdev
);
1648 pci_release_regions(pdev
);
1656 static void __devexit
nvme_remove(struct pci_dev
*pdev
)
1658 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
1659 nvme_dev_remove(dev
);
1660 pci_disable_msix(pdev
);
1662 nvme_release_instance(dev
);
1663 nvme_release_prp_pools(dev
);
1664 pci_disable_device(pdev
);
1665 pci_release_regions(pdev
);
1671 /* These functions are yet to be implemented */
1672 #define nvme_error_detected NULL
1673 #define nvme_dump_registers NULL
1674 #define nvme_link_reset NULL
1675 #define nvme_slot_reset NULL
1676 #define nvme_error_resume NULL
1677 #define nvme_suspend NULL
1678 #define nvme_resume NULL
1680 static struct pci_error_handlers nvme_err_handler
= {
1681 .error_detected
= nvme_error_detected
,
1682 .mmio_enabled
= nvme_dump_registers
,
1683 .link_reset
= nvme_link_reset
,
1684 .slot_reset
= nvme_slot_reset
,
1685 .resume
= nvme_error_resume
,
1688 /* Move to pci_ids.h later */
1689 #define PCI_CLASS_STORAGE_EXPRESS 0x010802
1691 static DEFINE_PCI_DEVICE_TABLE(nvme_id_table
) = {
1692 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS
, 0xffffff) },
1695 MODULE_DEVICE_TABLE(pci
, nvme_id_table
);
1697 static struct pci_driver nvme_driver
= {
1699 .id_table
= nvme_id_table
,
1700 .probe
= nvme_probe
,
1701 .remove
= __devexit_p(nvme_remove
),
1702 .suspend
= nvme_suspend
,
1703 .resume
= nvme_resume
,
1704 .err_handler
= &nvme_err_handler
,
1707 static int __init
nvme_init(void)
1709 int result
= -EBUSY
;
1711 nvme_thread
= kthread_run(nvme_kthread
, NULL
, "nvme");
1712 if (IS_ERR(nvme_thread
))
1713 return PTR_ERR(nvme_thread
);
1715 nvme_major
= register_blkdev(nvme_major
, "nvme");
1716 if (nvme_major
<= 0)
1719 result
= pci_register_driver(&nvme_driver
);
1721 goto unregister_blkdev
;
1725 unregister_blkdev(nvme_major
, "nvme");
1727 kthread_stop(nvme_thread
);
1731 static void __exit
nvme_exit(void)
1733 pci_unregister_driver(&nvme_driver
);
1734 unregister_blkdev(nvme_major
, "nvme");
1735 kthread_stop(nvme_thread
);
1738 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1739 MODULE_LICENSE("GPL");
1740 MODULE_VERSION("0.6");
1741 module_init(nvme_init
);
1742 module_exit(nvme_exit
);