2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pfn_t.h>
29 #include <linux/slab.h>
30 #include <linux/pmem.h>
36 struct request_queue
*pmem_queue
;
37 struct gendisk
*pmem_disk
;
38 struct nd_namespace_common
*ndns
;
40 /* One contiguous memory region per device */
41 phys_addr_t phys_addr
;
42 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset
;
45 void __pmem
*virt_addr
;
50 static int pmem_major
;
52 static bool is_bad_pmem(struct badblocks
*bb
, sector_t sector
, unsigned int len
)
58 return !!badblocks_check(bb
, sector
, len
/ 512, &first_bad
,
65 static int pmem_do_bvec(struct pmem_device
*pmem
, struct page
*page
,
66 unsigned int len
, unsigned int off
, int rw
,
70 void *mem
= kmap_atomic(page
);
71 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
72 void __pmem
*pmem_addr
= pmem
->virt_addr
+ pmem_off
;
75 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
78 memcpy_from_pmem(mem
+ off
, pmem_addr
, len
);
79 flush_dcache_page(page
);
82 flush_dcache_page(page
);
83 memcpy_to_pmem(pmem_addr
, mem
+ off
, len
);
90 static blk_qc_t
pmem_make_request(struct request_queue
*q
, struct bio
*bio
)
96 struct bvec_iter iter
;
97 struct block_device
*bdev
= bio
->bi_bdev
;
98 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
100 do_acct
= nd_iostat_start(bio
, &start
);
101 bio_for_each_segment(bvec
, bio
, iter
) {
102 rc
= pmem_do_bvec(pmem
, bvec
.bv_page
, bvec
.bv_len
,
103 bvec
.bv_offset
, bio_data_dir(bio
),
111 nd_iostat_end(bio
, start
);
113 if (bio_data_dir(bio
))
117 return BLK_QC_T_NONE
;
120 static int pmem_rw_page(struct block_device
*bdev
, sector_t sector
,
121 struct page
*page
, int rw
)
123 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
126 rc
= pmem_do_bvec(pmem
, page
, PAGE_CACHE_SIZE
, 0, rw
, sector
);
131 * The ->rw_page interface is subtle and tricky. The core
132 * retries on any error, so we can only invoke page_endio() in
133 * the successful completion case. Otherwise, we'll see crashes
134 * caused by double completion.
137 page_endio(page
, rw
& WRITE
, 0);
142 static long pmem_direct_access(struct block_device
*bdev
, sector_t sector
,
143 void __pmem
**kaddr
, pfn_t
*pfn
)
145 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
146 resource_size_t offset
= sector
* 512 + pmem
->data_offset
;
148 *kaddr
= pmem
->virt_addr
+ offset
;
149 *pfn
= phys_to_pfn_t(pmem
->phys_addr
+ offset
, pmem
->pfn_flags
);
151 return pmem
->size
- offset
;
154 static const struct block_device_operations pmem_fops
= {
155 .owner
= THIS_MODULE
,
156 .rw_page
= pmem_rw_page
,
157 .direct_access
= pmem_direct_access
,
158 .revalidate_disk
= nvdimm_revalidate_disk
,
161 static struct pmem_device
*pmem_alloc(struct device
*dev
,
162 struct resource
*res
, int id
)
164 struct pmem_device
*pmem
;
165 struct request_queue
*q
;
167 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
169 return ERR_PTR(-ENOMEM
);
171 pmem
->phys_addr
= res
->start
;
172 pmem
->size
= resource_size(res
);
173 if (!arch_has_wmb_pmem())
174 dev_warn(dev
, "unable to guarantee persistence of writes\n");
176 if (!devm_request_mem_region(dev
, pmem
->phys_addr
, pmem
->size
,
178 dev_warn(dev
, "could not reserve region [0x%pa:0x%zx]\n",
179 &pmem
->phys_addr
, pmem
->size
);
180 return ERR_PTR(-EBUSY
);
183 q
= blk_alloc_queue_node(GFP_KERNEL
, dev_to_node(dev
));
185 return ERR_PTR(-ENOMEM
);
187 pmem
->pfn_flags
= PFN_DEV
;
188 if (pmem_should_map_pages(dev
)) {
189 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, res
,
190 &q
->q_usage_counter
, NULL
);
191 pmem
->pfn_flags
|= PFN_MAP
;
193 pmem
->virt_addr
= (void __pmem
*) devm_memremap(dev
,
194 pmem
->phys_addr
, pmem
->size
,
197 if (IS_ERR(pmem
->virt_addr
)) {
198 blk_cleanup_queue(q
);
199 return (void __force
*) pmem
->virt_addr
;
202 pmem
->pmem_queue
= q
;
206 static void pmem_detach_disk(struct pmem_device
*pmem
)
208 if (!pmem
->pmem_disk
)
211 del_gendisk(pmem
->pmem_disk
);
212 put_disk(pmem
->pmem_disk
);
213 blk_cleanup_queue(pmem
->pmem_queue
);
216 static int pmem_attach_disk(struct device
*dev
,
217 struct nd_namespace_common
*ndns
, struct pmem_device
*pmem
)
219 int nid
= dev_to_node(dev
);
220 struct gendisk
*disk
;
222 blk_queue_make_request(pmem
->pmem_queue
, pmem_make_request
);
223 blk_queue_physical_block_size(pmem
->pmem_queue
, PAGE_SIZE
);
224 blk_queue_max_hw_sectors(pmem
->pmem_queue
, UINT_MAX
);
225 blk_queue_bounce_limit(pmem
->pmem_queue
, BLK_BOUNCE_ANY
);
226 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, pmem
->pmem_queue
);
228 disk
= alloc_disk_node(0, nid
);
230 blk_cleanup_queue(pmem
->pmem_queue
);
234 disk
->major
= pmem_major
;
235 disk
->first_minor
= 0;
236 disk
->fops
= &pmem_fops
;
237 disk
->private_data
= pmem
;
238 disk
->queue
= pmem
->pmem_queue
;
239 disk
->flags
= GENHD_FL_EXT_DEVT
;
240 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
241 disk
->driverfs_dev
= dev
;
242 set_capacity(disk
, (pmem
->size
- pmem
->data_offset
) / 512);
243 pmem
->pmem_disk
= disk
;
244 devm_exit_badblocks(dev
, &pmem
->bb
);
245 if (devm_init_badblocks(dev
, &pmem
->bb
))
247 nvdimm_namespace_add_poison(ndns
, &pmem
->bb
, pmem
->data_offset
);
249 disk
->bb
= &pmem
->bb
;
251 revalidate_disk(disk
);
256 static int pmem_rw_bytes(struct nd_namespace_common
*ndns
,
257 resource_size_t offset
, void *buf
, size_t size
, int rw
)
259 struct pmem_device
*pmem
= dev_get_drvdata(ndns
->claim
);
261 if (unlikely(offset
+ size
> pmem
->size
)) {
262 dev_WARN_ONCE(&ndns
->dev
, 1, "request out of range\n");
267 unsigned int sz_align
= ALIGN(size
+ (offset
& (512 - 1)), 512);
269 if (unlikely(is_bad_pmem(&pmem
->bb
, offset
/ 512, sz_align
)))
271 memcpy_from_pmem(buf
, pmem
->virt_addr
+ offset
, size
);
273 memcpy_to_pmem(pmem
->virt_addr
+ offset
, buf
, size
);
280 static int nd_pfn_init(struct nd_pfn
*nd_pfn
)
282 struct nd_pfn_sb
*pfn_sb
= kzalloc(sizeof(*pfn_sb
), GFP_KERNEL
);
283 struct pmem_device
*pmem
= dev_get_drvdata(&nd_pfn
->dev
);
284 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
285 struct nd_region
*nd_region
;
294 nd_pfn
->pfn_sb
= pfn_sb
;
295 rc
= nd_pfn_validate(nd_pfn
);
297 /* no info block, do init */;
301 nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
303 dev_info(&nd_pfn
->dev
,
304 "%s is read-only, unable to init metadata\n",
305 dev_name(&nd_region
->dev
));
309 memset(pfn_sb
, 0, sizeof(*pfn_sb
));
310 npfns
= (pmem
->size
- SZ_8K
) / SZ_4K
;
312 * Note, we use 64 here for the standard size of struct page,
313 * debugging options may cause it to be larger in which case the
314 * implementation will limit the pfns advertised through
315 * ->direct_access() to those that are included in the memmap.
317 if (nd_pfn
->mode
== PFN_MODE_PMEM
)
318 offset
= ALIGN(SZ_8K
+ 64 * npfns
, nd_pfn
->align
);
319 else if (nd_pfn
->mode
== PFN_MODE_RAM
)
320 offset
= ALIGN(SZ_8K
, nd_pfn
->align
);
324 npfns
= (pmem
->size
- offset
) / SZ_4K
;
325 pfn_sb
->mode
= cpu_to_le32(nd_pfn
->mode
);
326 pfn_sb
->dataoff
= cpu_to_le64(offset
);
327 pfn_sb
->npfns
= cpu_to_le64(npfns
);
328 memcpy(pfn_sb
->signature
, PFN_SIG
, PFN_SIG_LEN
);
329 memcpy(pfn_sb
->uuid
, nd_pfn
->uuid
, 16);
330 memcpy(pfn_sb
->parent_uuid
, nd_dev_to_uuid(&ndns
->dev
), 16);
331 pfn_sb
->version_major
= cpu_to_le16(1);
332 checksum
= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
);
333 pfn_sb
->checksum
= cpu_to_le64(checksum
);
335 rc
= nvdimm_write_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
));
341 nd_pfn
->pfn_sb
= NULL
;
346 static int nvdimm_namespace_detach_pfn(struct nd_namespace_common
*ndns
)
348 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
349 struct pmem_device
*pmem
;
352 pmem
= dev_get_drvdata(&nd_pfn
->dev
);
353 pmem_detach_disk(pmem
);
355 /* release nd_pfn resources */
356 kfree(nd_pfn
->pfn_sb
);
357 nd_pfn
->pfn_sb
= NULL
;
362 static int nvdimm_namespace_attach_pfn(struct nd_namespace_common
*ndns
)
364 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
365 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
366 struct device
*dev
= &nd_pfn
->dev
;
367 struct nd_region
*nd_region
;
368 struct vmem_altmap
*altmap
;
369 struct nd_pfn_sb
*pfn_sb
;
370 struct pmem_device
*pmem
;
371 struct request_queue
*q
;
374 struct vmem_altmap __altmap
= {
375 .base_pfn
= __phys_to_pfn(nsio
->res
.start
),
376 .reserve
= __phys_to_pfn(SZ_8K
),
379 if (!nd_pfn
->uuid
|| !nd_pfn
->ndns
)
382 nd_region
= to_nd_region(dev
->parent
);
383 rc
= nd_pfn_init(nd_pfn
);
387 pfn_sb
= nd_pfn
->pfn_sb
;
388 offset
= le64_to_cpu(pfn_sb
->dataoff
);
389 nd_pfn
->mode
= le32_to_cpu(nd_pfn
->pfn_sb
->mode
);
390 if (nd_pfn
->mode
== PFN_MODE_RAM
) {
393 nd_pfn
->npfns
= le64_to_cpu(pfn_sb
->npfns
);
395 } else if (nd_pfn
->mode
== PFN_MODE_PMEM
) {
396 nd_pfn
->npfns
= (resource_size(&nsio
->res
) - offset
)
398 if (le64_to_cpu(nd_pfn
->pfn_sb
->npfns
) > nd_pfn
->npfns
)
399 dev_info(&nd_pfn
->dev
,
400 "number of pfns truncated from %lld to %ld\n",
401 le64_to_cpu(nd_pfn
->pfn_sb
->npfns
),
404 altmap
->free
= __phys_to_pfn(offset
- SZ_8K
);
411 /* establish pfn range for lookup, and switch to direct map */
412 pmem
= dev_get_drvdata(dev
);
413 q
= pmem
->pmem_queue
;
414 devm_memunmap(dev
, (void __force
*) pmem
->virt_addr
);
415 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, &nsio
->res
,
416 &q
->q_usage_counter
, altmap
);
417 pmem
->pfn_flags
|= PFN_MAP
;
418 if (IS_ERR(pmem
->virt_addr
)) {
419 rc
= PTR_ERR(pmem
->virt_addr
);
423 /* attach pmem disk in "pfn-mode" */
424 pmem
->data_offset
= offset
;
425 rc
= pmem_attach_disk(dev
, ndns
, pmem
);
431 nvdimm_namespace_detach_pfn(ndns
);
435 static int nd_pmem_probe(struct device
*dev
)
437 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
438 struct nd_namespace_common
*ndns
;
439 struct nd_namespace_io
*nsio
;
440 struct pmem_device
*pmem
;
442 ndns
= nvdimm_namespace_common_probe(dev
);
444 return PTR_ERR(ndns
);
446 nsio
= to_nd_namespace_io(&ndns
->dev
);
447 pmem
= pmem_alloc(dev
, &nsio
->res
, nd_region
->id
);
449 return PTR_ERR(pmem
);
452 dev_set_drvdata(dev
, pmem
);
453 ndns
->rw_bytes
= pmem_rw_bytes
;
454 if (devm_init_badblocks(dev
, &pmem
->bb
))
456 nvdimm_namespace_add_poison(ndns
, &pmem
->bb
, 0);
458 if (is_nd_btt(dev
)) {
459 /* btt allocates its own request_queue */
460 blk_cleanup_queue(pmem
->pmem_queue
);
461 pmem
->pmem_queue
= NULL
;
462 return nvdimm_namespace_attach_btt(ndns
);
466 return nvdimm_namespace_attach_pfn(ndns
);
468 if (nd_btt_probe(ndns
, pmem
) == 0 || nd_pfn_probe(ndns
, pmem
) == 0) {
470 * We'll come back as either btt-pmem, or pfn-pmem, so
471 * drop the queue allocation for now.
473 blk_cleanup_queue(pmem
->pmem_queue
);
477 return pmem_attach_disk(dev
, ndns
, pmem
);
480 static int nd_pmem_remove(struct device
*dev
)
482 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
485 nvdimm_namespace_detach_btt(pmem
->ndns
);
486 else if (is_nd_pfn(dev
))
487 nvdimm_namespace_detach_pfn(pmem
->ndns
);
489 pmem_detach_disk(pmem
);
494 static void nd_pmem_notify(struct device
*dev
, enum nvdimm_event event
)
496 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
497 struct nd_namespace_common
*ndns
= pmem
->ndns
;
499 if (event
!= NVDIMM_REVALIDATE_POISON
)
503 nvdimm_namespace_add_poison(ndns
, &pmem
->bb
, 0);
505 nvdimm_namespace_add_poison(ndns
, &pmem
->bb
, pmem
->data_offset
);
508 MODULE_ALIAS("pmem");
509 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
510 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
511 static struct nd_device_driver nd_pmem_driver
= {
512 .probe
= nd_pmem_probe
,
513 .remove
= nd_pmem_remove
,
514 .notify
= nd_pmem_notify
,
518 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
521 static int __init
pmem_init(void)
525 pmem_major
= register_blkdev(0, "pmem");
529 error
= nd_driver_register(&nd_pmem_driver
);
531 unregister_blkdev(pmem_major
, "pmem");
537 module_init(pmem_init
);
539 static void pmem_exit(void)
541 driver_unregister(&nd_pmem_driver
.drv
);
542 unregister_blkdev(pmem_major
, "pmem");
544 module_exit(pmem_exit
);
546 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
547 MODULE_LICENSE("GPL v2");