2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
21 static DEFINE_IDA(region_ida
);
23 static void nd_region_release(struct device
*dev
)
25 struct nd_region
*nd_region
= to_nd_region(dev
);
28 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
29 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
30 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
32 put_device(&nvdimm
->dev
);
34 ida_simple_remove(®ion_ida
, nd_region
->id
);
38 static struct device_type nd_blk_device_type
= {
40 .release
= nd_region_release
,
43 static struct device_type nd_pmem_device_type
= {
45 .release
= nd_region_release
,
48 static struct device_type nd_volatile_device_type
= {
49 .name
= "nd_volatile",
50 .release
= nd_region_release
,
53 bool is_nd_pmem(struct device
*dev
)
55 return dev
? dev
->type
== &nd_pmem_device_type
: false;
58 bool is_nd_blk(struct device
*dev
)
60 return dev
? dev
->type
== &nd_blk_device_type
: false;
63 struct nd_region
*to_nd_region(struct device
*dev
)
65 struct nd_region
*nd_region
= container_of(dev
, struct nd_region
, dev
);
67 WARN_ON(dev
->type
->release
!= nd_region_release
);
70 EXPORT_SYMBOL_GPL(to_nd_region
);
73 * nd_region_to_nstype() - region to an integer namespace type
74 * @nd_region: region-device to interrogate
76 * This is the 'nstype' attribute of a region as well, an input to the
77 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
78 * namespace devices with namespace drivers.
80 int nd_region_to_nstype(struct nd_region
*nd_region
)
82 if (is_nd_pmem(&nd_region
->dev
)) {
85 for (i
= 0, alias
= 0; i
< nd_region
->ndr_mappings
; i
++) {
86 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
87 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
89 if (nvdimm
->flags
& NDD_ALIASING
)
93 return ND_DEVICE_NAMESPACE_PMEM
;
95 return ND_DEVICE_NAMESPACE_IO
;
96 } else if (is_nd_blk(&nd_region
->dev
)) {
97 return ND_DEVICE_NAMESPACE_BLK
;
103 static ssize_t
size_show(struct device
*dev
,
104 struct device_attribute
*attr
, char *buf
)
106 struct nd_region
*nd_region
= to_nd_region(dev
);
107 unsigned long long size
= 0;
109 if (is_nd_pmem(dev
)) {
110 size
= nd_region
->ndr_size
;
111 } else if (nd_region
->ndr_mappings
== 1) {
112 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
114 size
= nd_mapping
->size
;
117 return sprintf(buf
, "%llu\n", size
);
119 static DEVICE_ATTR_RO(size
);
121 static ssize_t
mappings_show(struct device
*dev
,
122 struct device_attribute
*attr
, char *buf
)
124 struct nd_region
*nd_region
= to_nd_region(dev
);
126 return sprintf(buf
, "%d\n", nd_region
->ndr_mappings
);
128 static DEVICE_ATTR_RO(mappings
);
130 static ssize_t
nstype_show(struct device
*dev
,
131 struct device_attribute
*attr
, char *buf
)
133 struct nd_region
*nd_region
= to_nd_region(dev
);
135 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
137 static DEVICE_ATTR_RO(nstype
);
139 static ssize_t
set_cookie_show(struct device
*dev
,
140 struct device_attribute
*attr
, char *buf
)
142 struct nd_region
*nd_region
= to_nd_region(dev
);
143 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
145 if (is_nd_pmem(dev
) && nd_set
)
146 /* pass, should be precluded by region_visible */;
150 return sprintf(buf
, "%#llx\n", nd_set
->cookie
);
152 static DEVICE_ATTR_RO(set_cookie
);
154 static ssize_t
init_namespaces_show(struct device
*dev
,
155 struct device_attribute
*attr
, char *buf
)
157 struct nd_region_namespaces
*num_ns
= dev_get_drvdata(dev
);
160 nvdimm_bus_lock(dev
);
162 rc
= sprintf(buf
, "%d/%d\n", num_ns
->active
, num_ns
->count
);
165 nvdimm_bus_unlock(dev
);
169 static DEVICE_ATTR_RO(init_namespaces
);
171 static struct attribute
*nd_region_attributes
[] = {
173 &dev_attr_nstype
.attr
,
174 &dev_attr_mappings
.attr
,
175 &dev_attr_set_cookie
.attr
,
176 &dev_attr_init_namespaces
.attr
,
180 static umode_t
region_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
182 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
183 struct nd_region
*nd_region
= to_nd_region(dev
);
184 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
186 if (a
!= &dev_attr_set_cookie
.attr
)
189 if (is_nd_pmem(dev
) && nd_set
)
195 struct attribute_group nd_region_attribute_group
= {
196 .attrs
= nd_region_attributes
,
197 .is_visible
= region_visible
,
199 EXPORT_SYMBOL_GPL(nd_region_attribute_group
);
202 * Upon successful probe/remove, take/release a reference on the
203 * associated interleave set (if present)
205 static void nd_region_notify_driver_action(struct nvdimm_bus
*nvdimm_bus
,
206 struct device
*dev
, bool probe
)
208 if (is_nd_pmem(dev
) || is_nd_blk(dev
)) {
209 struct nd_region
*nd_region
= to_nd_region(dev
);
212 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
213 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
214 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
217 atomic_inc(&nvdimm
->busy
);
219 atomic_dec(&nvdimm
->busy
);
224 void nd_region_probe_success(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
226 nd_region_notify_driver_action(nvdimm_bus
, dev
, true);
229 void nd_region_disable(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
231 nd_region_notify_driver_action(nvdimm_bus
, dev
, false);
234 static ssize_t
mappingN(struct device
*dev
, char *buf
, int n
)
236 struct nd_region
*nd_region
= to_nd_region(dev
);
237 struct nd_mapping
*nd_mapping
;
238 struct nvdimm
*nvdimm
;
240 if (n
>= nd_region
->ndr_mappings
)
242 nd_mapping
= &nd_region
->mapping
[n
];
243 nvdimm
= nd_mapping
->nvdimm
;
245 return sprintf(buf
, "%s,%llu,%llu\n", dev_name(&nvdimm
->dev
),
246 nd_mapping
->start
, nd_mapping
->size
);
249 #define REGION_MAPPING(idx) \
250 static ssize_t mapping##idx##_show(struct device *dev, \
251 struct device_attribute *attr, char *buf) \
253 return mappingN(dev, buf, idx); \
255 static DEVICE_ATTR_RO(mapping##idx)
258 * 32 should be enough for a while, even in the presence of socket
259 * interleave a 32-way interleave set is a degenerate case.
294 static umode_t
mapping_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
296 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
297 struct nd_region
*nd_region
= to_nd_region(dev
);
299 if (n
< nd_region
->ndr_mappings
)
304 static struct attribute
*mapping_attributes
[] = {
305 &dev_attr_mapping0
.attr
,
306 &dev_attr_mapping1
.attr
,
307 &dev_attr_mapping2
.attr
,
308 &dev_attr_mapping3
.attr
,
309 &dev_attr_mapping4
.attr
,
310 &dev_attr_mapping5
.attr
,
311 &dev_attr_mapping6
.attr
,
312 &dev_attr_mapping7
.attr
,
313 &dev_attr_mapping8
.attr
,
314 &dev_attr_mapping9
.attr
,
315 &dev_attr_mapping10
.attr
,
316 &dev_attr_mapping11
.attr
,
317 &dev_attr_mapping12
.attr
,
318 &dev_attr_mapping13
.attr
,
319 &dev_attr_mapping14
.attr
,
320 &dev_attr_mapping15
.attr
,
321 &dev_attr_mapping16
.attr
,
322 &dev_attr_mapping17
.attr
,
323 &dev_attr_mapping18
.attr
,
324 &dev_attr_mapping19
.attr
,
325 &dev_attr_mapping20
.attr
,
326 &dev_attr_mapping21
.attr
,
327 &dev_attr_mapping22
.attr
,
328 &dev_attr_mapping23
.attr
,
329 &dev_attr_mapping24
.attr
,
330 &dev_attr_mapping25
.attr
,
331 &dev_attr_mapping26
.attr
,
332 &dev_attr_mapping27
.attr
,
333 &dev_attr_mapping28
.attr
,
334 &dev_attr_mapping29
.attr
,
335 &dev_attr_mapping30
.attr
,
336 &dev_attr_mapping31
.attr
,
340 struct attribute_group nd_mapping_attribute_group
= {
341 .is_visible
= mapping_visible
,
342 .attrs
= mapping_attributes
,
344 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group
);
346 void *nd_region_provider_data(struct nd_region
*nd_region
)
348 return nd_region
->provider_data
;
350 EXPORT_SYMBOL_GPL(nd_region_provider_data
);
352 static struct nd_region
*nd_region_create(struct nvdimm_bus
*nvdimm_bus
,
353 struct nd_region_desc
*ndr_desc
, struct device_type
*dev_type
,
356 struct nd_region
*nd_region
;
360 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
361 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
362 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
364 if ((nd_mapping
->start
| nd_mapping
->size
) % SZ_4K
) {
365 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not 4K aligned\n",
366 caller
, dev_name(&nvdimm
->dev
), i
);
372 nd_region
= kzalloc(sizeof(struct nd_region
)
373 + sizeof(struct nd_mapping
) * ndr_desc
->num_mappings
,
377 nd_region
->id
= ida_simple_get(®ion_ida
, 0, 0, GFP_KERNEL
);
378 if (nd_region
->id
< 0) {
383 memcpy(nd_region
->mapping
, ndr_desc
->nd_mapping
,
384 sizeof(struct nd_mapping
) * ndr_desc
->num_mappings
);
385 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
386 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
387 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
389 get_device(&nvdimm
->dev
);
391 nd_region
->ndr_mappings
= ndr_desc
->num_mappings
;
392 nd_region
->provider_data
= ndr_desc
->provider_data
;
393 nd_region
->nd_set
= ndr_desc
->nd_set
;
394 dev
= &nd_region
->dev
;
395 dev_set_name(dev
, "region%d", nd_region
->id
);
396 dev
->parent
= &nvdimm_bus
->dev
;
397 dev
->type
= dev_type
;
398 dev
->groups
= ndr_desc
->attr_groups
;
399 nd_region
->ndr_size
= resource_size(ndr_desc
->res
);
400 nd_region
->ndr_start
= ndr_desc
->res
->start
;
401 nd_device_register(dev
);
406 struct nd_region
*nvdimm_pmem_region_create(struct nvdimm_bus
*nvdimm_bus
,
407 struct nd_region_desc
*ndr_desc
)
409 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_pmem_device_type
,
412 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create
);
414 struct nd_region
*nvdimm_blk_region_create(struct nvdimm_bus
*nvdimm_bus
,
415 struct nd_region_desc
*ndr_desc
)
417 if (ndr_desc
->num_mappings
> 1)
419 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_blk_device_type
,
422 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create
);
424 struct nd_region
*nvdimm_volatile_region_create(struct nvdimm_bus
*nvdimm_bus
,
425 struct nd_region_desc
*ndr_desc
)
427 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_volatile_device_type
,
430 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create
);