2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
20 static void namespace_io_release(struct device
*dev
)
22 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
27 static void namespace_pmem_release(struct device
*dev
)
29 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
31 kfree(nspm
->alt_name
);
36 static void namespace_blk_release(struct device
*dev
)
38 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
39 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
42 ida_simple_remove(&nd_region
->ns_ida
, nsblk
->id
);
43 kfree(nsblk
->alt_name
);
49 static struct device_type namespace_io_device_type
= {
50 .name
= "nd_namespace_io",
51 .release
= namespace_io_release
,
54 static struct device_type namespace_pmem_device_type
= {
55 .name
= "nd_namespace_pmem",
56 .release
= namespace_pmem_release
,
59 static struct device_type namespace_blk_device_type
= {
60 .name
= "nd_namespace_blk",
61 .release
= namespace_blk_release
,
64 static bool is_namespace_pmem(struct device
*dev
)
66 return dev
? dev
->type
== &namespace_pmem_device_type
: false;
69 static bool is_namespace_blk(struct device
*dev
)
71 return dev
? dev
->type
== &namespace_blk_device_type
: false;
74 static bool is_namespace_io(struct device
*dev
)
76 return dev
? dev
->type
== &namespace_io_device_type
: false;
79 const char *nvdimm_namespace_disk_name(struct nd_namespace_common
*ndns
,
82 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
83 const char *suffix
= "";
85 if (ndns
->claim
&& is_nd_btt(ndns
->claim
))
88 if (is_namespace_pmem(&ndns
->dev
) || is_namespace_io(&ndns
->dev
))
89 sprintf(name
, "pmem%d%s", nd_region
->id
, suffix
);
90 else if (is_namespace_blk(&ndns
->dev
)) {
91 struct nd_namespace_blk
*nsblk
;
93 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
94 sprintf(name
, "ndblk%d.%d%s", nd_region
->id
, nsblk
->id
, suffix
);
101 EXPORT_SYMBOL(nvdimm_namespace_disk_name
);
103 static ssize_t
nstype_show(struct device
*dev
,
104 struct device_attribute
*attr
, char *buf
)
106 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
108 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
110 static DEVICE_ATTR_RO(nstype
);
112 static ssize_t
__alt_name_store(struct device
*dev
, const char *buf
,
115 char *input
, *pos
, *alt_name
, **ns_altname
;
118 if (is_namespace_pmem(dev
)) {
119 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
121 ns_altname
= &nspm
->alt_name
;
122 } else if (is_namespace_blk(dev
)) {
123 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
125 ns_altname
= &nsblk
->alt_name
;
129 if (dev
->driver
|| to_ndns(dev
)->claim
)
132 input
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
138 if (strlen(pos
) + 1 > NSLABEL_NAME_LEN
) {
143 alt_name
= kzalloc(NSLABEL_NAME_LEN
, GFP_KERNEL
);
149 *ns_altname
= alt_name
;
150 sprintf(*ns_altname
, "%s", pos
);
158 static resource_size_t
nd_namespace_blk_size(struct nd_namespace_blk
*nsblk
)
160 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
161 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
162 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
163 struct nd_label_id label_id
;
164 resource_size_t size
= 0;
165 struct resource
*res
;
169 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
170 for_each_dpa_resource(ndd
, res
)
171 if (strcmp(res
->name
, label_id
.id
) == 0)
172 size
+= resource_size(res
);
176 static int nd_namespace_label_update(struct nd_region
*nd_region
,
179 dev_WARN_ONCE(dev
, dev
->driver
|| to_ndns(dev
)->claim
,
180 "namespace must be idle during label update\n");
181 if (dev
->driver
|| to_ndns(dev
)->claim
)
185 * Only allow label writes that will result in a valid namespace
186 * or deletion of an existing namespace.
188 if (is_namespace_pmem(dev
)) {
189 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
190 resource_size_t size
= resource_size(&nspm
->nsio
.res
);
192 if (size
== 0 && nspm
->uuid
)
193 /* delete allocation */;
194 else if (!nspm
->uuid
)
197 return nd_pmem_namespace_label_update(nd_region
, nspm
, size
);
198 } else if (is_namespace_blk(dev
)) {
199 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
200 resource_size_t size
= nd_namespace_blk_size(nsblk
);
202 if (size
== 0 && nsblk
->uuid
)
203 /* delete allocation */;
204 else if (!nsblk
->uuid
|| !nsblk
->lbasize
)
207 return nd_blk_namespace_label_update(nd_region
, nsblk
, size
);
212 static ssize_t
alt_name_store(struct device
*dev
,
213 struct device_attribute
*attr
, const char *buf
, size_t len
)
215 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
219 nvdimm_bus_lock(dev
);
220 wait_nvdimm_bus_probe_idle(dev
);
221 rc
= __alt_name_store(dev
, buf
, len
);
223 rc
= nd_namespace_label_update(nd_region
, dev
);
224 dev_dbg(dev
, "%s: %s(%zd)\n", __func__
, rc
< 0 ? "fail " : "", rc
);
225 nvdimm_bus_unlock(dev
);
228 return rc
< 0 ? rc
: len
;
231 static ssize_t
alt_name_show(struct device
*dev
,
232 struct device_attribute
*attr
, char *buf
)
236 if (is_namespace_pmem(dev
)) {
237 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
239 ns_altname
= nspm
->alt_name
;
240 } else if (is_namespace_blk(dev
)) {
241 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
243 ns_altname
= nsblk
->alt_name
;
247 return sprintf(buf
, "%s\n", ns_altname
? ns_altname
: "");
249 static DEVICE_ATTR_RW(alt_name
);
251 static int scan_free(struct nd_region
*nd_region
,
252 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
255 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
256 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
260 struct resource
*res
, *last
;
261 resource_size_t new_start
;
264 for_each_dpa_resource(ndd
, res
)
265 if (strcmp(res
->name
, label_id
->id
) == 0)
271 if (n
>= resource_size(res
)) {
272 n
-= resource_size(res
);
273 nd_dbg_dpa(nd_region
, ndd
, res
, "delete %d\n", rc
);
274 nvdimm_free_dpa(ndd
, res
);
275 /* retry with last resource deleted */
280 * Keep BLK allocations relegated to high DPA as much as
284 new_start
= res
->start
+ n
;
286 new_start
= res
->start
;
288 rc
= adjust_resource(res
, new_start
, resource_size(res
) - n
);
290 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
291 nd_dbg_dpa(nd_region
, ndd
, res
, "shrink %d\n", rc
);
299 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
300 * @nd_region: the set of dimms to reclaim @n bytes from
301 * @label_id: unique identifier for the namespace consuming this dpa range
302 * @n: number of bytes per-dimm to release
304 * Assumes resources are ordered. Starting from the end try to
305 * adjust_resource() the allocation to @n, but if @n is larger than the
306 * allocation delete it and find the 'new' last allocation in the label
309 static int shrink_dpa_allocation(struct nd_region
*nd_region
,
310 struct nd_label_id
*label_id
, resource_size_t n
)
314 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
315 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
318 rc
= scan_free(nd_region
, nd_mapping
, label_id
, n
);
326 static resource_size_t
init_dpa_allocation(struct nd_label_id
*label_id
,
327 struct nd_region
*nd_region
, struct nd_mapping
*nd_mapping
,
330 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
331 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
332 resource_size_t first_dpa
;
333 struct resource
*res
;
336 /* allocate blk from highest dpa first */
338 first_dpa
= nd_mapping
->start
+ nd_mapping
->size
- n
;
340 first_dpa
= nd_mapping
->start
;
342 /* first resource allocation for this label-id or dimm */
343 res
= nvdimm_allocate_dpa(ndd
, label_id
, first_dpa
, n
);
347 nd_dbg_dpa(nd_region
, ndd
, res
, "init %d\n", rc
);
351 static bool space_valid(bool is_pmem
, bool is_reserve
,
352 struct nd_label_id
*label_id
, struct resource
*res
)
355 * For BLK-space any space is valid, for PMEM-space, it must be
356 * contiguous with an existing allocation unless we are
359 if (is_reserve
|| !is_pmem
)
361 if (!res
|| strcmp(res
->name
, label_id
->id
) == 0)
367 ALLOC_ERR
= 0, ALLOC_BEFORE
, ALLOC_MID
, ALLOC_AFTER
,
370 static resource_size_t
scan_allocate(struct nd_region
*nd_region
,
371 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
374 resource_size_t mapping_end
= nd_mapping
->start
+ nd_mapping
->size
- 1;
375 bool is_reserve
= strcmp(label_id
->id
, "pmem-reserve") == 0;
376 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
377 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
378 const resource_size_t to_allocate
= n
;
379 struct resource
*res
;
384 for_each_dpa_resource(ndd
, res
) {
385 resource_size_t allocate
, available
= 0, free_start
, free_end
;
386 struct resource
*next
= res
->sibling
, *new_res
= NULL
;
387 enum alloc_loc loc
= ALLOC_ERR
;
391 /* ignore resources outside this nd_mapping */
392 if (res
->start
> mapping_end
)
394 if (res
->end
< nd_mapping
->start
)
397 /* space at the beginning of the mapping */
398 if (!first
++ && res
->start
> nd_mapping
->start
) {
399 free_start
= nd_mapping
->start
;
400 available
= res
->start
- free_start
;
401 if (space_valid(is_pmem
, is_reserve
, label_id
, NULL
))
405 /* space between allocations */
407 free_start
= res
->start
+ resource_size(res
);
408 free_end
= min(mapping_end
, next
->start
- 1);
409 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
410 && free_start
< free_end
) {
411 available
= free_end
+ 1 - free_start
;
416 /* space at the end of the mapping */
418 free_start
= res
->start
+ resource_size(res
);
419 free_end
= mapping_end
;
420 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
421 && free_start
< free_end
) {
422 available
= free_end
+ 1 - free_start
;
427 if (!loc
|| !available
)
429 allocate
= min(available
, n
);
432 if (strcmp(res
->name
, label_id
->id
) == 0) {
433 /* adjust current resource up */
434 if (is_pmem
&& !is_reserve
)
436 rc
= adjust_resource(res
, res
->start
- allocate
,
437 resource_size(res
) + allocate
);
438 action
= "cur grow up";
443 if (strcmp(next
->name
, label_id
->id
) == 0) {
444 /* adjust next resource up */
445 if (is_pmem
&& !is_reserve
)
447 rc
= adjust_resource(next
, next
->start
448 - allocate
, resource_size(next
)
451 action
= "next grow up";
452 } else if (strcmp(res
->name
, label_id
->id
) == 0) {
453 action
= "grow down";
458 if (strcmp(res
->name
, label_id
->id
) == 0)
459 action
= "grow down";
467 if (strcmp(action
, "allocate") == 0) {
468 /* BLK allocate bottom up */
470 free_start
+= available
- allocate
;
471 else if (!is_reserve
&& free_start
!= nd_mapping
->start
)
474 new_res
= nvdimm_allocate_dpa(ndd
, label_id
,
475 free_start
, allocate
);
478 } else if (strcmp(action
, "grow down") == 0) {
479 /* adjust current resource down */
480 rc
= adjust_resource(res
, res
->start
, resource_size(res
)
483 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
489 nd_dbg_dpa(nd_region
, ndd
, new_res
, "%s(%d) %d\n",
498 * Retry scan with newly inserted resources.
499 * For example, if we did an ALLOC_BEFORE
500 * insertion there may also have been space
501 * available for an ALLOC_AFTER insertion, so we
502 * need to check this same resource again
510 * If we allocated nothing in the BLK case it may be because we are in
511 * an initial "pmem-reserve pass". Only do an initial BLK allocation
512 * when none of the DPA space is reserved.
514 if ((is_pmem
|| !ndd
->dpa
.child
) && n
== to_allocate
)
515 return init_dpa_allocation(label_id
, nd_region
, nd_mapping
, n
);
519 static int merge_dpa(struct nd_region
*nd_region
,
520 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
)
522 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
523 struct resource
*res
;
525 if (strncmp("pmem", label_id
->id
, 4) == 0)
528 for_each_dpa_resource(ndd
, res
) {
530 struct resource
*next
= res
->sibling
;
531 resource_size_t end
= res
->start
+ resource_size(res
);
533 if (!next
|| strcmp(res
->name
, label_id
->id
) != 0
534 || strcmp(next
->name
, label_id
->id
) != 0
535 || end
!= next
->start
)
537 end
+= resource_size(next
);
538 nvdimm_free_dpa(ndd
, next
);
539 rc
= adjust_resource(res
, res
->start
, end
- res
->start
);
540 nd_dbg_dpa(nd_region
, ndd
, res
, "merge %d\n", rc
);
543 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
550 static int __reserve_free_pmem(struct device
*dev
, void *data
)
552 struct nvdimm
*nvdimm
= data
;
553 struct nd_region
*nd_region
;
554 struct nd_label_id label_id
;
557 if (!is_nd_pmem(dev
))
560 nd_region
= to_nd_region(dev
);
561 if (nd_region
->ndr_mappings
== 0)
564 memset(&label_id
, 0, sizeof(label_id
));
565 strcat(label_id
.id
, "pmem-reserve");
566 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
567 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
568 resource_size_t n
, rem
= 0;
570 if (nd_mapping
->nvdimm
!= nvdimm
)
573 n
= nd_pmem_available_dpa(nd_region
, nd_mapping
, &rem
);
576 rem
= scan_allocate(nd_region
, nd_mapping
, &label_id
, n
);
577 dev_WARN_ONCE(&nd_region
->dev
, rem
,
578 "pmem reserve underrun: %#llx of %#llx bytes\n",
579 (unsigned long long) n
- rem
,
580 (unsigned long long) n
);
581 return rem
? -ENXIO
: 0;
587 static void release_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
588 struct nd_mapping
*nd_mapping
)
590 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
591 struct resource
*res
, *_res
;
593 for_each_dpa_resource_safe(ndd
, res
, _res
)
594 if (strcmp(res
->name
, "pmem-reserve") == 0)
595 nvdimm_free_dpa(ndd
, res
);
598 static int reserve_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
599 struct nd_mapping
*nd_mapping
)
601 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
604 rc
= device_for_each_child(&nvdimm_bus
->dev
, nvdimm
,
605 __reserve_free_pmem
);
607 release_free_pmem(nvdimm_bus
, nd_mapping
);
612 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
613 * @nd_region: the set of dimms to allocate @n more bytes from
614 * @label_id: unique identifier for the namespace consuming this dpa range
615 * @n: number of bytes per-dimm to add to the existing allocation
617 * Assumes resources are ordered. For BLK regions, first consume
618 * BLK-only available DPA free space, then consume PMEM-aliased DPA
619 * space starting at the highest DPA. For PMEM regions start
620 * allocations from the start of an interleave set and end at the first
621 * BLK allocation or the end of the interleave set, whichever comes
624 static int grow_dpa_allocation(struct nd_region
*nd_region
,
625 struct nd_label_id
*label_id
, resource_size_t n
)
627 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
628 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
631 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
632 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
633 resource_size_t rem
= n
;
637 * In the BLK case try once with all unallocated PMEM
638 * reserved, and once without
640 for (j
= is_pmem
; j
< 2; j
++) {
641 bool blk_only
= j
== 0;
644 rc
= reserve_free_pmem(nvdimm_bus
, nd_mapping
);
648 rem
= scan_allocate(nd_region
, nd_mapping
,
651 release_free_pmem(nvdimm_bus
, nd_mapping
);
653 /* try again and allow encroachments into PMEM */
658 dev_WARN_ONCE(&nd_region
->dev
, rem
,
659 "allocation underrun: %#llx of %#llx bytes\n",
660 (unsigned long long) n
- rem
,
661 (unsigned long long) n
);
665 rc
= merge_dpa(nd_region
, nd_mapping
, label_id
);
673 static void nd_namespace_pmem_set_size(struct nd_region
*nd_region
,
674 struct nd_namespace_pmem
*nspm
, resource_size_t size
)
676 struct resource
*res
= &nspm
->nsio
.res
;
678 res
->start
= nd_region
->ndr_start
;
679 res
->end
= nd_region
->ndr_start
+ size
- 1;
682 static ssize_t
__size_store(struct device
*dev
, unsigned long long val
)
684 resource_size_t allocated
= 0, available
= 0;
685 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
686 struct nd_mapping
*nd_mapping
;
687 struct nvdimm_drvdata
*ndd
;
688 struct nd_label_id label_id
;
689 u32 flags
= 0, remainder
;
693 if (dev
->driver
|| to_ndns(dev
)->claim
)
696 if (is_namespace_pmem(dev
)) {
697 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
700 } else if (is_namespace_blk(dev
)) {
701 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
704 flags
= NSLABEL_FLAG_LOCAL
;
708 * We need a uuid for the allocation-label and dimm(s) on which
709 * to store the label.
711 if (!uuid
|| nd_region
->ndr_mappings
== 0)
714 div_u64_rem(val
, SZ_4K
* nd_region
->ndr_mappings
, &remainder
);
716 dev_dbg(dev
, "%llu is not %dK aligned\n", val
,
717 (SZ_4K
* nd_region
->ndr_mappings
) / SZ_1K
);
721 nd_label_gen_id(&label_id
, uuid
, flags
);
722 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
723 nd_mapping
= &nd_region
->mapping
[i
];
724 ndd
= to_ndd(nd_mapping
);
727 * All dimms in an interleave set, or the base dimm for a blk
728 * region, need to be enabled for the size to be changed.
733 allocated
+= nvdimm_allocated_dpa(ndd
, &label_id
);
735 available
= nd_region_available_dpa(nd_region
);
737 if (val
> available
+ allocated
)
740 if (val
== allocated
)
743 val
= div_u64(val
, nd_region
->ndr_mappings
);
744 allocated
= div_u64(allocated
, nd_region
->ndr_mappings
);
746 rc
= shrink_dpa_allocation(nd_region
, &label_id
,
749 rc
= grow_dpa_allocation(nd_region
, &label_id
, val
- allocated
);
754 if (is_namespace_pmem(dev
)) {
755 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
757 nd_namespace_pmem_set_size(nd_region
, nspm
,
758 val
* nd_region
->ndr_mappings
);
759 } else if (is_namespace_blk(dev
)) {
760 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
763 * Try to delete the namespace if we deleted all of its
764 * allocation, this is not the seed device for the
765 * region, and it is not actively claimed by a btt
768 if (val
== 0 && nd_region
->ns_seed
!= dev
769 && !nsblk
->common
.claim
)
770 nd_device_unregister(dev
, ND_ASYNC
);
776 static ssize_t
size_store(struct device
*dev
,
777 struct device_attribute
*attr
, const char *buf
, size_t len
)
779 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
780 unsigned long long val
;
784 rc
= kstrtoull(buf
, 0, &val
);
789 nvdimm_bus_lock(dev
);
790 wait_nvdimm_bus_probe_idle(dev
);
791 rc
= __size_store(dev
, val
);
793 rc
= nd_namespace_label_update(nd_region
, dev
);
795 if (is_namespace_pmem(dev
)) {
796 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
799 } else if (is_namespace_blk(dev
)) {
800 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
805 if (rc
== 0 && val
== 0 && uuid
) {
806 /* setting size zero == 'delete namespace' */
811 dev_dbg(dev
, "%s: %llx %s (%d)\n", __func__
, val
, rc
< 0
812 ? "fail" : "success", rc
);
814 nvdimm_bus_unlock(dev
);
817 return rc
< 0 ? rc
: len
;
820 resource_size_t
__nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
822 struct device
*dev
= &ndns
->dev
;
824 if (is_namespace_pmem(dev
)) {
825 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
827 return resource_size(&nspm
->nsio
.res
);
828 } else if (is_namespace_blk(dev
)) {
829 return nd_namespace_blk_size(to_nd_namespace_blk(dev
));
830 } else if (is_namespace_io(dev
)) {
831 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
833 return resource_size(&nsio
->res
);
835 WARN_ONCE(1, "unknown namespace type\n");
839 resource_size_t
nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
841 resource_size_t size
;
843 nvdimm_bus_lock(&ndns
->dev
);
844 size
= __nvdimm_namespace_capacity(ndns
);
845 nvdimm_bus_unlock(&ndns
->dev
);
849 EXPORT_SYMBOL(nvdimm_namespace_capacity
);
851 static ssize_t
size_show(struct device
*dev
,
852 struct device_attribute
*attr
, char *buf
)
854 return sprintf(buf
, "%llu\n", (unsigned long long)
855 nvdimm_namespace_capacity(to_ndns(dev
)));
857 static DEVICE_ATTR(size
, S_IRUGO
, size_show
, size_store
);
859 static ssize_t
uuid_show(struct device
*dev
,
860 struct device_attribute
*attr
, char *buf
)
864 if (is_namespace_pmem(dev
)) {
865 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
868 } else if (is_namespace_blk(dev
)) {
869 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
876 return sprintf(buf
, "%pUb\n", uuid
);
877 return sprintf(buf
, "\n");
881 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
882 * @nd_region: parent region so we can updates all dimms in the set
883 * @dev: namespace type for generating label_id
884 * @new_uuid: incoming uuid
885 * @old_uuid: reference to the uuid storage location in the namespace object
887 static int namespace_update_uuid(struct nd_region
*nd_region
,
888 struct device
*dev
, u8
*new_uuid
, u8
**old_uuid
)
890 u32 flags
= is_namespace_blk(dev
) ? NSLABEL_FLAG_LOCAL
: 0;
891 struct nd_label_id old_label_id
;
892 struct nd_label_id new_label_id
;
895 if (!nd_is_uuid_unique(dev
, new_uuid
))
898 if (*old_uuid
== NULL
)
902 * If we've already written a label with this uuid, then it's
903 * too late to rename because we can't reliably update the uuid
904 * without losing the old namespace. Userspace must delete this
905 * namespace to abandon the old uuid.
907 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
908 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
911 * This check by itself is sufficient because old_uuid
912 * would be NULL above if this uuid did not exist in the
913 * currently written set.
915 * FIXME: can we delete uuid with zero dpa allocated?
917 if (nd_mapping
->labels
)
921 nd_label_gen_id(&old_label_id
, *old_uuid
, flags
);
922 nd_label_gen_id(&new_label_id
, new_uuid
, flags
);
923 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
924 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
925 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
926 struct resource
*res
;
928 for_each_dpa_resource(ndd
, res
)
929 if (strcmp(res
->name
, old_label_id
.id
) == 0)
930 sprintf((void *) res
->name
, "%s",
935 *old_uuid
= new_uuid
;
939 static ssize_t
uuid_store(struct device
*dev
,
940 struct device_attribute
*attr
, const char *buf
, size_t len
)
942 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
947 if (is_namespace_pmem(dev
)) {
948 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
950 ns_uuid
= &nspm
->uuid
;
951 } else if (is_namespace_blk(dev
)) {
952 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
954 ns_uuid
= &nsblk
->uuid
;
959 nvdimm_bus_lock(dev
);
960 wait_nvdimm_bus_probe_idle(dev
);
961 if (to_ndns(dev
)->claim
)
964 rc
= nd_uuid_store(dev
, &uuid
, buf
, len
);
966 rc
= namespace_update_uuid(nd_region
, dev
, uuid
, ns_uuid
);
968 rc
= nd_namespace_label_update(nd_region
, dev
);
971 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
972 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
973 nvdimm_bus_unlock(dev
);
976 return rc
< 0 ? rc
: len
;
978 static DEVICE_ATTR_RW(uuid
);
980 static ssize_t
resource_show(struct device
*dev
,
981 struct device_attribute
*attr
, char *buf
)
983 struct resource
*res
;
985 if (is_namespace_pmem(dev
)) {
986 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
988 res
= &nspm
->nsio
.res
;
989 } else if (is_namespace_io(dev
)) {
990 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
996 /* no address to convey if the namespace has no allocation */
997 if (resource_size(res
) == 0)
999 return sprintf(buf
, "%#llx\n", (unsigned long long) res
->start
);
1001 static DEVICE_ATTR_RO(resource
);
1003 static const unsigned long ns_lbasize_supported
[] = { 512, 0 };
1005 static ssize_t
sector_size_show(struct device
*dev
,
1006 struct device_attribute
*attr
, char *buf
)
1008 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1010 if (!is_namespace_blk(dev
))
1013 return nd_sector_size_show(nsblk
->lbasize
, ns_lbasize_supported
, buf
);
1016 static ssize_t
sector_size_store(struct device
*dev
,
1017 struct device_attribute
*attr
, const char *buf
, size_t len
)
1019 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1020 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1023 if (!is_namespace_blk(dev
))
1027 nvdimm_bus_lock(dev
);
1028 if (to_ndns(dev
)->claim
)
1031 rc
= nd_sector_size_store(dev
, buf
, &nsblk
->lbasize
,
1032 ns_lbasize_supported
);
1034 rc
= nd_namespace_label_update(nd_region
, dev
);
1035 dev_dbg(dev
, "%s: result: %zd %s: %s%s", __func__
,
1036 rc
, rc
< 0 ? "tried" : "wrote", buf
,
1037 buf
[len
- 1] == '\n' ? "" : "\n");
1038 nvdimm_bus_unlock(dev
);
1041 return rc
? rc
: len
;
1043 static DEVICE_ATTR_RW(sector_size
);
1045 static ssize_t
dpa_extents_show(struct device
*dev
,
1046 struct device_attribute
*attr
, char *buf
)
1048 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1049 struct nd_label_id label_id
;
1054 nvdimm_bus_lock(dev
);
1055 if (is_namespace_pmem(dev
)) {
1056 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1060 } else if (is_namespace_blk(dev
)) {
1061 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1064 flags
= NSLABEL_FLAG_LOCAL
;
1070 nd_label_gen_id(&label_id
, uuid
, flags
);
1071 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1072 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1073 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1074 struct resource
*res
;
1076 for_each_dpa_resource(ndd
, res
)
1077 if (strcmp(res
->name
, label_id
.id
) == 0)
1081 nvdimm_bus_unlock(dev
);
1083 return sprintf(buf
, "%d\n", count
);
1085 static DEVICE_ATTR_RO(dpa_extents
);
1087 static ssize_t
holder_show(struct device
*dev
,
1088 struct device_attribute
*attr
, char *buf
)
1090 struct nd_namespace_common
*ndns
= to_ndns(dev
);
1094 rc
= sprintf(buf
, "%s\n", ndns
->claim
? dev_name(ndns
->claim
) : "");
1099 static DEVICE_ATTR_RO(holder
);
1101 static ssize_t
force_raw_store(struct device
*dev
,
1102 struct device_attribute
*attr
, const char *buf
, size_t len
)
1105 int rc
= strtobool(buf
, &force_raw
);
1110 to_ndns(dev
)->force_raw
= force_raw
;
1114 static ssize_t
force_raw_show(struct device
*dev
,
1115 struct device_attribute
*attr
, char *buf
)
1117 return sprintf(buf
, "%d\n", to_ndns(dev
)->force_raw
);
1119 static DEVICE_ATTR_RW(force_raw
);
1121 static struct attribute
*nd_namespace_attributes
[] = {
1122 &dev_attr_nstype
.attr
,
1123 &dev_attr_size
.attr
,
1124 &dev_attr_uuid
.attr
,
1125 &dev_attr_holder
.attr
,
1126 &dev_attr_resource
.attr
,
1127 &dev_attr_alt_name
.attr
,
1128 &dev_attr_force_raw
.attr
,
1129 &dev_attr_sector_size
.attr
,
1130 &dev_attr_dpa_extents
.attr
,
1134 static umode_t
namespace_visible(struct kobject
*kobj
,
1135 struct attribute
*a
, int n
)
1137 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1139 if (a
== &dev_attr_resource
.attr
) {
1140 if (is_namespace_blk(dev
))
1145 if (is_namespace_pmem(dev
) || is_namespace_blk(dev
)) {
1146 if (a
== &dev_attr_size
.attr
)
1147 return S_IWUSR
| S_IRUGO
;
1149 if (is_namespace_pmem(dev
) && a
== &dev_attr_sector_size
.attr
)
1155 if (a
== &dev_attr_nstype
.attr
|| a
== &dev_attr_size
.attr
1156 || a
== &dev_attr_holder
.attr
1157 || a
== &dev_attr_force_raw
.attr
)
1163 static struct attribute_group nd_namespace_attribute_group
= {
1164 .attrs
= nd_namespace_attributes
,
1165 .is_visible
= namespace_visible
,
1168 static const struct attribute_group
*nd_namespace_attribute_groups
[] = {
1169 &nd_device_attribute_group
,
1170 &nd_namespace_attribute_group
,
1174 struct nd_namespace_common
*nvdimm_namespace_common_probe(struct device
*dev
)
1176 struct nd_btt
*nd_btt
= is_nd_btt(dev
) ? to_nd_btt(dev
) : NULL
;
1177 struct nd_namespace_common
*ndns
;
1178 resource_size_t size
;
1181 ndns
= nd_btt
->ndns
;
1183 return ERR_PTR(-ENODEV
);
1186 * Flush any in-progess probes / removals in the driver
1187 * for the raw personality of this namespace.
1189 device_lock(&ndns
->dev
);
1190 device_unlock(&ndns
->dev
);
1191 if (ndns
->dev
.driver
) {
1192 dev_dbg(&ndns
->dev
, "is active, can't bind %s\n",
1193 dev_name(&nd_btt
->dev
));
1194 return ERR_PTR(-EBUSY
);
1196 if (dev_WARN_ONCE(&ndns
->dev
, ndns
->claim
!= &nd_btt
->dev
,
1197 "host (%s) vs claim (%s) mismatch\n",
1198 dev_name(&nd_btt
->dev
),
1199 dev_name(ndns
->claim
)))
1200 return ERR_PTR(-ENXIO
);
1202 ndns
= to_ndns(dev
);
1204 dev_dbg(dev
, "claimed by %s, failing probe\n",
1205 dev_name(ndns
->claim
));
1207 return ERR_PTR(-ENXIO
);
1211 size
= nvdimm_namespace_capacity(ndns
);
1212 if (size
< ND_MIN_NAMESPACE_SIZE
) {
1213 dev_dbg(&ndns
->dev
, "%pa, too small must be at least %#x\n",
1214 &size
, ND_MIN_NAMESPACE_SIZE
);
1215 return ERR_PTR(-ENODEV
);
1218 if (is_namespace_pmem(&ndns
->dev
)) {
1219 struct nd_namespace_pmem
*nspm
;
1221 nspm
= to_nd_namespace_pmem(&ndns
->dev
);
1223 dev_dbg(&ndns
->dev
, "%s: uuid not set\n", __func__
);
1224 return ERR_PTR(-ENODEV
);
1226 } else if (is_namespace_blk(&ndns
->dev
)) {
1227 return ERR_PTR(-ENODEV
); /* TODO */
1232 EXPORT_SYMBOL(nvdimm_namespace_common_probe
);
1234 static struct device
**create_namespace_io(struct nd_region
*nd_region
)
1236 struct nd_namespace_io
*nsio
;
1237 struct device
*dev
, **devs
;
1238 struct resource
*res
;
1240 nsio
= kzalloc(sizeof(*nsio
), GFP_KERNEL
);
1244 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1250 dev
= &nsio
->common
.dev
;
1251 dev
->type
= &namespace_io_device_type
;
1252 dev
->parent
= &nd_region
->dev
;
1254 res
->name
= dev_name(&nd_region
->dev
);
1255 res
->flags
= IORESOURCE_MEM
;
1256 res
->start
= nd_region
->ndr_start
;
1257 res
->end
= res
->start
+ nd_region
->ndr_size
- 1;
1263 static bool has_uuid_at_pos(struct nd_region
*nd_region
, u8
*uuid
,
1264 u64 cookie
, u16 pos
)
1266 struct nd_namespace_label
*found
= NULL
;
1269 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1270 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1271 struct nd_namespace_label
*nd_label
;
1272 bool found_uuid
= false;
1275 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1276 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1277 u16 position
= __le16_to_cpu(nd_label
->position
);
1278 u16 nlabel
= __le16_to_cpu(nd_label
->nlabel
);
1280 if (isetcookie
!= cookie
)
1283 if (memcmp(nd_label
->uuid
, uuid
, NSLABEL_UUID_LEN
) != 0)
1287 dev_dbg(to_ndd(nd_mapping
)->dev
,
1288 "%s duplicate entry for uuid\n",
1293 if (nlabel
!= nd_region
->ndr_mappings
)
1295 if (position
!= pos
)
1303 return found
!= NULL
;
1306 static int select_pmem_id(struct nd_region
*nd_region
, u8
*pmem_id
)
1308 struct nd_namespace_label
*select
= NULL
;
1314 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1315 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1316 struct nd_namespace_label
*nd_label
;
1317 u64 hw_start
, hw_end
, pmem_start
, pmem_end
;
1320 for_each_label(l
, nd_label
, nd_mapping
->labels
)
1321 if (memcmp(nd_label
->uuid
, pmem_id
, NSLABEL_UUID_LEN
) == 0)
1331 * Check that this label is compliant with the dpa
1332 * range published in NFIT
1334 hw_start
= nd_mapping
->start
;
1335 hw_end
= hw_start
+ nd_mapping
->size
;
1336 pmem_start
= __le64_to_cpu(select
->dpa
);
1337 pmem_end
= pmem_start
+ __le64_to_cpu(select
->rawsize
);
1338 if (pmem_start
== hw_start
&& pmem_end
<= hw_end
)
1343 nd_mapping
->labels
[0] = select
;
1344 nd_mapping
->labels
[1] = NULL
;
1350 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1351 * @nd_region: region with mappings to validate
1353 static int find_pmem_label_set(struct nd_region
*nd_region
,
1354 struct nd_namespace_pmem
*nspm
)
1356 u64 cookie
= nd_region_interleave_set_cookie(nd_region
);
1357 struct nd_namespace_label
*nd_label
;
1358 u8 select_id
[NSLABEL_UUID_LEN
];
1359 resource_size_t size
= 0;
1361 int rc
= -ENODEV
, l
;
1368 * Find a complete set of labels by uuid. By definition we can start
1369 * with any mapping as the reference label
1371 for_each_label(l
, nd_label
, nd_region
->mapping
[0].labels
) {
1372 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1374 if (isetcookie
!= cookie
)
1377 for (i
= 0; nd_region
->ndr_mappings
; i
++)
1378 if (!has_uuid_at_pos(nd_region
, nd_label
->uuid
,
1381 if (i
< nd_region
->ndr_mappings
) {
1383 * Give up if we don't find an instance of a
1384 * uuid at each position (from 0 to
1385 * nd_region->ndr_mappings - 1), or if we find a
1386 * dimm with two instances of the same uuid.
1390 } else if (pmem_id
) {
1392 * If there is more than one valid uuid set, we
1393 * need userspace to clean this up.
1398 memcpy(select_id
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
1399 pmem_id
= select_id
;
1403 * Fix up each mapping's 'labels' to have the validated pmem label for
1404 * that position at labels[0], and NULL at labels[1]. In the process,
1405 * check that the namespace aligns with interleave-set. We know
1406 * that it does not overlap with any blk namespaces by virtue of
1407 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1410 rc
= select_pmem_id(nd_region
, pmem_id
);
1414 /* Calculate total size and populate namespace properties from label0 */
1415 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1416 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1417 struct nd_namespace_label
*label0
= nd_mapping
->labels
[0];
1419 size
+= __le64_to_cpu(label0
->rawsize
);
1420 if (__le16_to_cpu(label0
->position
) != 0)
1422 WARN_ON(nspm
->alt_name
|| nspm
->uuid
);
1423 nspm
->alt_name
= kmemdup((void __force
*) label0
->name
,
1424 NSLABEL_NAME_LEN
, GFP_KERNEL
);
1425 nspm
->uuid
= kmemdup((void __force
*) label0
->uuid
,
1426 NSLABEL_UUID_LEN
, GFP_KERNEL
);
1429 if (!nspm
->alt_name
|| !nspm
->uuid
) {
1434 nd_namespace_pmem_set_size(nd_region
, nspm
, size
);
1440 dev_dbg(&nd_region
->dev
, "%s: invalid label(s)\n", __func__
);
1443 dev_dbg(&nd_region
->dev
, "%s: label not found\n", __func__
);
1446 dev_dbg(&nd_region
->dev
, "%s: unexpected err: %d\n",
1453 static struct device
**create_namespace_pmem(struct nd_region
*nd_region
)
1455 struct nd_namespace_pmem
*nspm
;
1456 struct device
*dev
, **devs
;
1457 struct resource
*res
;
1460 nspm
= kzalloc(sizeof(*nspm
), GFP_KERNEL
);
1464 dev
= &nspm
->nsio
.common
.dev
;
1465 dev
->type
= &namespace_pmem_device_type
;
1466 dev
->parent
= &nd_region
->dev
;
1467 res
= &nspm
->nsio
.res
;
1468 res
->name
= dev_name(&nd_region
->dev
);
1469 res
->flags
= IORESOURCE_MEM
;
1470 rc
= find_pmem_label_set(nd_region
, nspm
);
1471 if (rc
== -ENODEV
) {
1474 /* Pass, try to permit namespace creation... */
1475 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1476 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1478 kfree(nd_mapping
->labels
);
1479 nd_mapping
->labels
= NULL
;
1482 /* Publish a zero-sized namespace for userspace to configure. */
1483 nd_namespace_pmem_set_size(nd_region
, nspm
, 0);
1489 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1497 namespace_pmem_release(&nspm
->nsio
.common
.dev
);
1501 struct resource
*nsblk_add_resource(struct nd_region
*nd_region
,
1502 struct nvdimm_drvdata
*ndd
, struct nd_namespace_blk
*nsblk
,
1503 resource_size_t start
)
1505 struct nd_label_id label_id
;
1506 struct resource
*res
;
1508 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
1509 res
= krealloc(nsblk
->res
,
1510 sizeof(void *) * (nsblk
->num_resources
+ 1),
1514 nsblk
->res
= (struct resource
**) res
;
1515 for_each_dpa_resource(ndd
, res
)
1516 if (strcmp(res
->name
, label_id
.id
) == 0
1517 && res
->start
== start
) {
1518 nsblk
->res
[nsblk
->num_resources
++] = res
;
1524 static struct device
*nd_namespace_blk_create(struct nd_region
*nd_region
)
1526 struct nd_namespace_blk
*nsblk
;
1529 if (!is_nd_blk(&nd_region
->dev
))
1532 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1536 dev
= &nsblk
->common
.dev
;
1537 dev
->type
= &namespace_blk_device_type
;
1538 nsblk
->id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0, GFP_KERNEL
);
1539 if (nsblk
->id
< 0) {
1543 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, nsblk
->id
);
1544 dev
->parent
= &nd_region
->dev
;
1545 dev
->groups
= nd_namespace_attribute_groups
;
1547 return &nsblk
->common
.dev
;
1550 void nd_region_create_blk_seed(struct nd_region
*nd_region
)
1552 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1553 nd_region
->ns_seed
= nd_namespace_blk_create(nd_region
);
1555 * Seed creation failures are not fatal, provisioning is simply
1556 * disabled until memory becomes available
1558 if (!nd_region
->ns_seed
)
1559 dev_err(&nd_region
->dev
, "failed to create blk namespace\n");
1561 nd_device_register(nd_region
->ns_seed
);
1564 void nd_region_create_btt_seed(struct nd_region
*nd_region
)
1566 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1567 nd_region
->btt_seed
= nd_btt_create(nd_region
);
1569 * Seed creation failures are not fatal, provisioning is simply
1570 * disabled until memory becomes available
1572 if (!nd_region
->btt_seed
)
1573 dev_err(&nd_region
->dev
, "failed to create btt namespace\n");
1576 static struct device
**create_namespace_blk(struct nd_region
*nd_region
)
1578 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
1579 struct nd_namespace_label
*nd_label
;
1580 struct device
*dev
, **devs
= NULL
;
1581 struct nd_namespace_blk
*nsblk
;
1582 struct nvdimm_drvdata
*ndd
;
1583 int i
, l
, count
= 0;
1584 struct resource
*res
;
1586 if (nd_region
->ndr_mappings
== 0)
1589 ndd
= to_ndd(nd_mapping
);
1590 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1591 u32 flags
= __le32_to_cpu(nd_label
->flags
);
1592 char *name
[NSLABEL_NAME_LEN
];
1593 struct device
**__devs
;
1595 if (flags
& NSLABEL_FLAG_LOCAL
)
1600 for (i
= 0; i
< count
; i
++) {
1601 nsblk
= to_nd_namespace_blk(devs
[i
]);
1602 if (memcmp(nsblk
->uuid
, nd_label
->uuid
,
1603 NSLABEL_UUID_LEN
) == 0) {
1604 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1605 __le64_to_cpu(nd_label
->dpa
));
1608 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1609 dev_name(&nsblk
->common
.dev
));
1615 __devs
= kcalloc(count
+ 2, sizeof(dev
), GFP_KERNEL
);
1618 memcpy(__devs
, devs
, sizeof(dev
) * count
);
1622 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1625 dev
= &nsblk
->common
.dev
;
1626 dev
->type
= &namespace_blk_device_type
;
1627 dev
->parent
= &nd_region
->dev
;
1628 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, count
);
1629 devs
[count
++] = dev
;
1631 nsblk
->lbasize
= __le64_to_cpu(nd_label
->lbasize
);
1632 nsblk
->uuid
= kmemdup(nd_label
->uuid
, NSLABEL_UUID_LEN
,
1636 memcpy(name
, nd_label
->name
, NSLABEL_NAME_LEN
);
1638 nsblk
->alt_name
= kmemdup(name
, NSLABEL_NAME_LEN
,
1640 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1641 __le64_to_cpu(nd_label
->dpa
));
1644 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1645 dev_name(&nsblk
->common
.dev
));
1648 dev_dbg(&nd_region
->dev
, "%s: discovered %d blk namespace%s\n",
1649 __func__
, count
, count
== 1 ? "" : "s");
1652 /* Publish a zero-sized namespace for userspace to configure. */
1653 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1654 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1656 kfree(nd_mapping
->labels
);
1657 nd_mapping
->labels
= NULL
;
1660 devs
= kcalloc(2, sizeof(dev
), GFP_KERNEL
);
1663 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1666 dev
= &nsblk
->common
.dev
;
1667 dev
->type
= &namespace_blk_device_type
;
1668 dev
->parent
= &nd_region
->dev
;
1669 devs
[count
++] = dev
;
1675 for (i
= 0; i
< count
; i
++) {
1676 nsblk
= to_nd_namespace_blk(devs
[i
]);
1677 namespace_blk_release(&nsblk
->common
.dev
);
1683 static int init_active_labels(struct nd_region
*nd_region
)
1687 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1688 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1689 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1690 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1694 * If the dimm is disabled then prevent the region from
1695 * being activated if it aliases DPA.
1698 if ((nvdimm
->flags
& NDD_ALIASING
) == 0)
1700 dev_dbg(&nd_region
->dev
, "%s: is disabled, failing probe\n",
1701 dev_name(&nd_mapping
->nvdimm
->dev
));
1704 nd_mapping
->ndd
= ndd
;
1705 atomic_inc(&nvdimm
->busy
);
1708 count
= nd_label_active_count(ndd
);
1709 dev_dbg(ndd
->dev
, "%s: %d\n", __func__
, count
);
1712 nd_mapping
->labels
= kcalloc(count
+ 1, sizeof(void *),
1714 if (!nd_mapping
->labels
)
1716 for (j
= 0; j
< count
; j
++) {
1717 struct nd_namespace_label
*label
;
1719 label
= nd_label_active(ndd
, j
);
1720 nd_mapping
->labels
[j
] = label
;
1727 int nd_region_register_namespaces(struct nd_region
*nd_region
, int *err
)
1729 struct device
**devs
= NULL
;
1730 int i
, rc
= 0, type
;
1733 nvdimm_bus_lock(&nd_region
->dev
);
1734 rc
= init_active_labels(nd_region
);
1736 nvdimm_bus_unlock(&nd_region
->dev
);
1740 type
= nd_region_to_nstype(nd_region
);
1742 case ND_DEVICE_NAMESPACE_IO
:
1743 devs
= create_namespace_io(nd_region
);
1745 case ND_DEVICE_NAMESPACE_PMEM
:
1746 devs
= create_namespace_pmem(nd_region
);
1748 case ND_DEVICE_NAMESPACE_BLK
:
1749 devs
= create_namespace_blk(nd_region
);
1754 nvdimm_bus_unlock(&nd_region
->dev
);
1759 for (i
= 0; devs
[i
]; i
++) {
1760 struct device
*dev
= devs
[i
];
1763 if (type
== ND_DEVICE_NAMESPACE_BLK
) {
1764 struct nd_namespace_blk
*nsblk
;
1766 nsblk
= to_nd_namespace_blk(dev
);
1767 id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0,
1775 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, id
);
1776 dev
->groups
= nd_namespace_attribute_groups
;
1777 nd_device_register(dev
);
1780 nd_region
->ns_seed
= devs
[0];
1785 for (j
= i
; devs
[j
]; j
++) {
1786 struct device
*dev
= devs
[j
];
1788 device_initialize(dev
);
1793 * All of the namespaces we tried to register failed, so
1794 * fail region activation.