qed: Fix stack corruption on probe
[deliverable/linux.git] / drivers / nvdimm / bus.c
CommitLineData
45def22c
DW
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62232e45 14#include <linux/vmalloc.h>
45def22c 15#include <linux/uaccess.h>
3d88002e 16#include <linux/module.h>
8c2f7e86 17#include <linux/blkdev.h>
45def22c 18#include <linux/fcntl.h>
e6dfb2de 19#include <linux/async.h>
8c2f7e86 20#include <linux/genhd.h>
62232e45 21#include <linux/ndctl.h>
4d88a97a 22#include <linux/sched.h>
45def22c
DW
23#include <linux/slab.h>
24#include <linux/fs.h>
25#include <linux/io.h>
62232e45 26#include <linux/mm.h>
4d88a97a 27#include <linux/nd.h>
45def22c 28#include "nd-core.h"
4d88a97a 29#include "nd.h"
45def22c 30
62232e45 31int nvdimm_major;
45def22c
DW
32static int nvdimm_bus_major;
33static struct class *nd_class;
18515942 34static DEFINE_IDA(nd_ida);
45def22c 35
4d88a97a
DW
36static int to_nd_device_type(struct device *dev)
37{
38 if (is_nvdimm(dev))
39 return ND_DEVICE_DIMM;
3d88002e
DW
40 else if (is_nd_pmem(dev))
41 return ND_DEVICE_REGION_PMEM;
42 else if (is_nd_blk(dev))
43 return ND_DEVICE_REGION_BLK;
cd03412a
DW
44 else if (is_nd_dax(dev))
45 return ND_DEVICE_DAX_PMEM;
3d88002e
DW
46 else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent))
47 return nd_region_to_nstype(to_nd_region(dev->parent));
4d88a97a
DW
48
49 return 0;
50}
51
52static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
53{
41d7a6d6
TK
54 /*
55 * Ensure that region devices always have their numa node set as
56 * early as possible.
57 */
58 if (is_nd_pmem(dev) || is_nd_blk(dev))
59 set_dev_node(dev, to_nd_region(dev)->numa_node);
4d88a97a
DW
60 return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
61 to_nd_device_type(dev));
62}
63
3d88002e
DW
64static struct module *to_bus_provider(struct device *dev)
65{
66 /* pin bus providers while regions are enabled */
67 if (is_nd_pmem(dev) || is_nd_blk(dev)) {
68 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
69
bc9775d8 70 return nvdimm_bus->nd_desc->module;
3d88002e
DW
71 }
72 return NULL;
73}
74
eaf96153
DW
75static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
76{
77 nvdimm_bus_lock(&nvdimm_bus->dev);
78 nvdimm_bus->probe_active++;
79 nvdimm_bus_unlock(&nvdimm_bus->dev);
80}
81
82static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
83{
84 nvdimm_bus_lock(&nvdimm_bus->dev);
85 if (--nvdimm_bus->probe_active == 0)
86 wake_up(&nvdimm_bus->probe_wait);
87 nvdimm_bus_unlock(&nvdimm_bus->dev);
88}
89
4d88a97a
DW
90static int nvdimm_bus_probe(struct device *dev)
91{
92 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
3d88002e 93 struct module *provider = to_bus_provider(dev);
4d88a97a
DW
94 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
95 int rc;
96
3d88002e
DW
97 if (!try_module_get(provider))
98 return -ENXIO;
99
eaf96153 100 nvdimm_bus_probe_start(nvdimm_bus);
4d88a97a 101 rc = nd_drv->probe(dev);
eaf96153
DW
102 if (rc == 0)
103 nd_region_probe_success(nvdimm_bus, dev);
bf9bccc1
DW
104 else
105 nd_region_disable(nvdimm_bus, dev);
eaf96153
DW
106 nvdimm_bus_probe_end(nvdimm_bus);
107
4d88a97a
DW
108 dev_dbg(&nvdimm_bus->dev, "%s.probe(%s) = %d\n", dev->driver->name,
109 dev_name(dev), rc);
8c2f7e86 110
3d88002e
DW
111 if (rc != 0)
112 module_put(provider);
4d88a97a
DW
113 return rc;
114}
115
116static int nvdimm_bus_remove(struct device *dev)
117{
118 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
3d88002e 119 struct module *provider = to_bus_provider(dev);
4d88a97a 120 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
6cf9c5ba 121 int rc = 0;
4d88a97a 122
6cf9c5ba
DW
123 if (nd_drv->remove)
124 rc = nd_drv->remove(dev);
eaf96153
DW
125 nd_region_disable(nvdimm_bus, dev);
126
4d88a97a
DW
127 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
128 dev_name(dev), rc);
3d88002e 129 module_put(provider);
4d88a97a
DW
130 return rc;
131}
132
476f848a
DW
133static void nvdimm_bus_shutdown(struct device *dev)
134{
135 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
136 struct nd_device_driver *nd_drv = NULL;
137
138 if (dev->driver)
139 nd_drv = to_nd_device_driver(dev->driver);
140
141 if (nd_drv && nd_drv->shutdown) {
142 nd_drv->shutdown(dev);
143 dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
144 dev->driver->name, dev_name(dev));
145 }
146}
147
71999466
DW
148void nd_device_notify(struct device *dev, enum nvdimm_event event)
149{
150 device_lock(dev);
151 if (dev->driver) {
152 struct nd_device_driver *nd_drv;
153
154 nd_drv = to_nd_device_driver(dev->driver);
155 if (nd_drv->notify)
156 nd_drv->notify(dev, event);
157 }
158 device_unlock(dev);
159}
160EXPORT_SYMBOL(nd_device_notify);
161
162void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
163{
164 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
165
166 if (!nvdimm_bus)
167 return;
168
169 /* caller is responsible for holding a reference on the device */
170 nd_device_notify(&nd_region->dev, event);
171}
172EXPORT_SYMBOL_GPL(nvdimm_region_notify);
173
59e64739
DW
174long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
175 unsigned int len)
176{
177 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
178 struct nvdimm_bus_descriptor *nd_desc;
179 struct nd_cmd_clear_error clear_err;
180 struct nd_cmd_ars_cap ars_cap;
181 u32 clear_err_unit, mask;
182 int cmd_rc, rc;
183
184 if (!nvdimm_bus)
185 return -ENXIO;
186
187 nd_desc = nvdimm_bus->nd_desc;
1e8b8d96
DJ
188 /*
189 * if ndctl does not exist, it's PMEM_LEGACY and
190 * we want to just pretend everything is handled.
191 */
59e64739 192 if (!nd_desc->ndctl)
1e8b8d96 193 return len;
59e64739
DW
194
195 memset(&ars_cap, 0, sizeof(ars_cap));
196 ars_cap.address = phys;
197 ars_cap.length = len;
198 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap,
199 sizeof(ars_cap), &cmd_rc);
200 if (rc < 0)
201 return rc;
202 if (cmd_rc < 0)
203 return cmd_rc;
204 clear_err_unit = ars_cap.clear_err_unit;
205 if (!clear_err_unit || !is_power_of_2(clear_err_unit))
206 return -ENXIO;
207
208 mask = clear_err_unit - 1;
209 if ((phys | len) & mask)
210 return -ENXIO;
211 memset(&clear_err, 0, sizeof(clear_err));
212 clear_err.address = phys;
213 clear_err.length = len;
214 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err,
215 sizeof(clear_err), &cmd_rc);
216 if (rc < 0)
217 return rc;
218 if (cmd_rc < 0)
219 return cmd_rc;
220 return clear_err.cleared;
221}
222EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
223
18515942
DW
224static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
225
4d88a97a 226static struct bus_type nvdimm_bus_type = {
e6dfb2de 227 .name = "nd",
4d88a97a
DW
228 .uevent = nvdimm_bus_uevent,
229 .match = nvdimm_bus_match,
230 .probe = nvdimm_bus_probe,
231 .remove = nvdimm_bus_remove,
476f848a 232 .shutdown = nvdimm_bus_shutdown,
4d88a97a
DW
233};
234
18515942
DW
235static void nvdimm_bus_release(struct device *dev)
236{
237 struct nvdimm_bus *nvdimm_bus;
238
239 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
240 ida_simple_remove(&nd_ida, nvdimm_bus->id);
241 kfree(nvdimm_bus);
242}
243
244static bool is_nvdimm_bus(struct device *dev)
245{
246 return dev->release == nvdimm_bus_release;
247}
248
249struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
250{
251 struct device *dev;
252
253 for (dev = nd_dev; dev; dev = dev->parent)
254 if (is_nvdimm_bus(dev))
255 break;
256 dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
257 if (dev)
258 return to_nvdimm_bus(dev);
259 return NULL;
260}
261
262struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
263{
264 struct nvdimm_bus *nvdimm_bus;
265
266 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
267 WARN_ON(!is_nvdimm_bus(dev));
268 return nvdimm_bus;
269}
270EXPORT_SYMBOL_GPL(to_nvdimm_bus);
271
272struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
273 struct nvdimm_bus_descriptor *nd_desc)
274{
275 struct nvdimm_bus *nvdimm_bus;
276 int rc;
277
278 nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
279 if (!nvdimm_bus)
280 return NULL;
281 INIT_LIST_HEAD(&nvdimm_bus->list);
282 INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
283 INIT_LIST_HEAD(&nvdimm_bus->poison_list);
284 init_waitqueue_head(&nvdimm_bus->probe_wait);
285 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
286 mutex_init(&nvdimm_bus->reconfig_mutex);
287 if (nvdimm_bus->id < 0) {
288 kfree(nvdimm_bus);
289 return NULL;
290 }
291 nvdimm_bus->nd_desc = nd_desc;
292 nvdimm_bus->dev.parent = parent;
293 nvdimm_bus->dev.release = nvdimm_bus_release;
294 nvdimm_bus->dev.groups = nd_desc->attr_groups;
295 nvdimm_bus->dev.bus = &nvdimm_bus_type;
296 dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
297 rc = device_register(&nvdimm_bus->dev);
298 if (rc) {
299 dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
300 goto err;
301 }
302
303 return nvdimm_bus;
304 err:
305 put_device(&nvdimm_bus->dev);
306 return NULL;
307}
308EXPORT_SYMBOL_GPL(nvdimm_bus_register);
309
310void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
311{
312 if (!nvdimm_bus)
313 return;
314 device_unregister(&nvdimm_bus->dev);
315}
316EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
317
318static int child_unregister(struct device *dev, void *data)
319{
320 /*
321 * the singular ndctl class device per bus needs to be
322 * "device_destroy"ed, so skip it here
323 *
324 * i.e. remove classless children
325 */
326 if (dev->class)
327 /* pass */;
328 else
329 nd_device_unregister(dev, ND_SYNC);
330 return 0;
331}
332
333static void free_poison_list(struct list_head *poison_list)
334{
335 struct nd_poison *pl, *next;
336
337 list_for_each_entry_safe(pl, next, poison_list, list) {
338 list_del(&pl->list);
339 kfree(pl);
340 }
341 list_del_init(poison_list);
342}
343
344static int nd_bus_remove(struct device *dev)
345{
346 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
347
348 mutex_lock(&nvdimm_bus_list_mutex);
349 list_del_init(&nvdimm_bus->list);
350 mutex_unlock(&nvdimm_bus_list_mutex);
351
352 nd_synchronize();
353 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
354
355 nvdimm_bus_lock(&nvdimm_bus->dev);
356 free_poison_list(&nvdimm_bus->poison_list);
357 nvdimm_bus_unlock(&nvdimm_bus->dev);
358
359 nvdimm_bus_destroy_ndctl(nvdimm_bus);
360
361 return 0;
362}
363
364static int nd_bus_probe(struct device *dev)
365{
366 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
367 int rc;
368
369 rc = nvdimm_bus_create_ndctl(nvdimm_bus);
370 if (rc)
371 return rc;
372
373 mutex_lock(&nvdimm_bus_list_mutex);
374 list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
375 mutex_unlock(&nvdimm_bus_list_mutex);
376
377 /* enable bus provider attributes to look up their local context */
378 dev_set_drvdata(dev, nvdimm_bus->nd_desc);
379
380 return 0;
381}
382
383static struct nd_device_driver nd_bus_driver = {
384 .probe = nd_bus_probe,
385 .remove = nd_bus_remove,
386 .drv = {
387 .name = "nd_bus",
388 .suppress_bind_attrs = true,
389 .bus = &nvdimm_bus_type,
390 .owner = THIS_MODULE,
391 .mod_name = KBUILD_MODNAME,
392 },
4d88a97a
DW
393};
394
18515942
DW
395static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
396{
397 struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
398
399 if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver)
400 return true;
401
402 return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
403}
404
4d88a97a
DW
405static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain);
406
407void nd_synchronize(void)
408{
409 async_synchronize_full_domain(&nd_async_domain);
410}
411EXPORT_SYMBOL_GPL(nd_synchronize);
412
413static void nd_async_device_register(void *d, async_cookie_t cookie)
414{
415 struct device *dev = d;
416
417 if (device_add(dev) != 0) {
418 dev_err(dev, "%s: failed\n", __func__);
419 put_device(dev);
420 }
421 put_device(dev);
422}
423
424static void nd_async_device_unregister(void *d, async_cookie_t cookie)
425{
426 struct device *dev = d;
427
0ba1c634
DW
428 /* flush bus operations before delete */
429 nvdimm_bus_lock(dev);
430 nvdimm_bus_unlock(dev);
431
4d88a97a
DW
432 device_unregister(dev);
433 put_device(dev);
434}
435
8c2f7e86 436void __nd_device_register(struct device *dev)
4d88a97a 437{
cd03412a
DW
438 if (!dev)
439 return;
4d88a97a 440 dev->bus = &nvdimm_bus_type;
4d88a97a
DW
441 get_device(dev);
442 async_schedule_domain(nd_async_device_register, dev,
443 &nd_async_domain);
444}
8c2f7e86
DW
445
446void nd_device_register(struct device *dev)
447{
448 device_initialize(dev);
449 __nd_device_register(dev);
450}
4d88a97a
DW
451EXPORT_SYMBOL(nd_device_register);
452
453void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
454{
455 switch (mode) {
456 case ND_ASYNC:
457 get_device(dev);
458 async_schedule_domain(nd_async_device_unregister, dev,
459 &nd_async_domain);
460 break;
461 case ND_SYNC:
462 nd_synchronize();
463 device_unregister(dev);
464 break;
465 }
466}
467EXPORT_SYMBOL(nd_device_unregister);
468
469/**
470 * __nd_driver_register() - register a region or a namespace driver
471 * @nd_drv: driver to register
472 * @owner: automatically set by nd_driver_register() macro
473 * @mod_name: automatically set by nd_driver_register() macro
474 */
475int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
476 const char *mod_name)
477{
478 struct device_driver *drv = &nd_drv->drv;
479
480 if (!nd_drv->type) {
481 pr_debug("driver type bitmask not set (%pf)\n",
482 __builtin_return_address(0));
483 return -EINVAL;
484 }
485
6cf9c5ba
DW
486 if (!nd_drv->probe) {
487 pr_debug("%s ->probe() must be specified\n", mod_name);
4d88a97a
DW
488 return -EINVAL;
489 }
490
491 drv->bus = &nvdimm_bus_type;
492 drv->owner = owner;
493 drv->mod_name = mod_name;
494
495 return driver_register(drv);
496}
497EXPORT_SYMBOL(__nd_driver_register);
498
58138820
DW
499int nvdimm_revalidate_disk(struct gendisk *disk)
500{
52c44d93 501 struct device *dev = disk_to_dev(disk)->parent;
58138820
DW
502 struct nd_region *nd_region = to_nd_region(dev->parent);
503 const char *pol = nd_region->ro ? "only" : "write";
504
505 if (nd_region->ro == get_disk_ro(disk))
506 return 0;
507
508 dev_info(dev, "%s read-%s, marking %s read-%s\n",
509 dev_name(&nd_region->dev), pol, disk->disk_name, pol);
510 set_disk_ro(disk, nd_region->ro);
511
512 return 0;
513
514}
515EXPORT_SYMBOL(nvdimm_revalidate_disk);
516
4d88a97a
DW
517static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
518 char *buf)
519{
520 return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n",
521 to_nd_device_type(dev));
522}
523static DEVICE_ATTR_RO(modalias);
524
525static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
526 char *buf)
527{
528 return sprintf(buf, "%s\n", dev->type->name);
529}
530static DEVICE_ATTR_RO(devtype);
531
532static struct attribute *nd_device_attributes[] = {
533 &dev_attr_modalias.attr,
534 &dev_attr_devtype.attr,
535 NULL,
536};
537
538/**
539 * nd_device_attribute_group - generic attributes for all devices on an nd bus
540 */
541struct attribute_group nd_device_attribute_group = {
542 .attrs = nd_device_attributes,
e6dfb2de 543};
4d88a97a 544EXPORT_SYMBOL_GPL(nd_device_attribute_group);
e6dfb2de 545
74ae66c3
TK
546static ssize_t numa_node_show(struct device *dev,
547 struct device_attribute *attr, char *buf)
548{
549 return sprintf(buf, "%d\n", dev_to_node(dev));
550}
551static DEVICE_ATTR_RO(numa_node);
552
553static struct attribute *nd_numa_attributes[] = {
554 &dev_attr_numa_node.attr,
555 NULL,
556};
557
558static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
559 int n)
560{
561 if (!IS_ENABLED(CONFIG_NUMA))
562 return 0;
563
564 return a->mode;
565}
566
567/**
568 * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
569 */
570struct attribute_group nd_numa_attribute_group = {
571 .attrs = nd_numa_attributes,
572 .is_visible = nd_numa_attr_visible,
573};
574EXPORT_SYMBOL_GPL(nd_numa_attribute_group);
575
45def22c
DW
576int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
577{
578 dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
579 struct device *dev;
580
581 dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
582 "ndctl%d", nvdimm_bus->id);
583
42588958 584 if (IS_ERR(dev))
45def22c
DW
585 dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
586 nvdimm_bus->id, PTR_ERR(dev));
42588958 587 return PTR_ERR_OR_ZERO(dev);
45def22c
DW
588}
589
590void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
591{
592 device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
593}
594
62232e45
DW
595static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
596 [ND_CMD_IMPLEMENTED] = { },
597 [ND_CMD_SMART] = {
598 .out_num = 2,
21129112 599 .out_sizes = { 4, 128, },
62232e45
DW
600 },
601 [ND_CMD_SMART_THRESHOLD] = {
602 .out_num = 2,
603 .out_sizes = { 4, 8, },
604 },
605 [ND_CMD_DIMM_FLAGS] = {
606 .out_num = 2,
607 .out_sizes = { 4, 4 },
608 },
609 [ND_CMD_GET_CONFIG_SIZE] = {
610 .out_num = 3,
611 .out_sizes = { 4, 4, 4, },
612 },
613 [ND_CMD_GET_CONFIG_DATA] = {
614 .in_num = 2,
615 .in_sizes = { 4, 4, },
616 .out_num = 2,
617 .out_sizes = { 4, UINT_MAX, },
618 },
619 [ND_CMD_SET_CONFIG_DATA] = {
620 .in_num = 3,
621 .in_sizes = { 4, 4, UINT_MAX, },
622 .out_num = 1,
623 .out_sizes = { 4, },
624 },
625 [ND_CMD_VENDOR] = {
626 .in_num = 3,
627 .in_sizes = { 4, 4, UINT_MAX, },
628 .out_num = 3,
629 .out_sizes = { 4, 4, UINT_MAX, },
630 },
31eca76b
DW
631 [ND_CMD_CALL] = {
632 .in_num = 2,
633 .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
634 .out_num = 1,
635 .out_sizes = { UINT_MAX, },
636 },
62232e45
DW
637};
638
639const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
640{
641 if (cmd < ARRAY_SIZE(__nd_cmd_dimm_descs))
642 return &__nd_cmd_dimm_descs[cmd];
643 return NULL;
644}
645EXPORT_SYMBOL_GPL(nd_cmd_dimm_desc);
646
647static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
648 [ND_CMD_IMPLEMENTED] = { },
649 [ND_CMD_ARS_CAP] = {
650 .in_num = 2,
651 .in_sizes = { 8, 8, },
4577b066
DW
652 .out_num = 4,
653 .out_sizes = { 4, 4, 4, 4, },
62232e45
DW
654 },
655 [ND_CMD_ARS_START] = {
4577b066
DW
656 .in_num = 5,
657 .in_sizes = { 8, 8, 2, 1, 5, },
658 .out_num = 2,
659 .out_sizes = { 4, 4, },
62232e45
DW
660 },
661 [ND_CMD_ARS_STATUS] = {
747ffe11
DW
662 .out_num = 3,
663 .out_sizes = { 4, 4, UINT_MAX, },
62232e45 664 },
d4f32367
DW
665 [ND_CMD_CLEAR_ERROR] = {
666 .in_num = 2,
667 .in_sizes = { 8, 8, },
668 .out_num = 3,
669 .out_sizes = { 4, 4, 8, },
670 },
31eca76b
DW
671 [ND_CMD_CALL] = {
672 .in_num = 2,
673 .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
674 .out_num = 1,
675 .out_sizes = { UINT_MAX, },
676 },
62232e45
DW
677};
678
679const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
680{
681 if (cmd < ARRAY_SIZE(__nd_cmd_bus_descs))
682 return &__nd_cmd_bus_descs[cmd];
683 return NULL;
684}
685EXPORT_SYMBOL_GPL(nd_cmd_bus_desc);
686
687u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
688 const struct nd_cmd_desc *desc, int idx, void *buf)
689{
690 if (idx >= desc->in_num)
691 return UINT_MAX;
692
693 if (desc->in_sizes[idx] < UINT_MAX)
694 return desc->in_sizes[idx];
695
696 if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && idx == 2) {
697 struct nd_cmd_set_config_hdr *hdr = buf;
698
699 return hdr->in_length;
700 } else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) {
701 struct nd_cmd_vendor_hdr *hdr = buf;
702
703 return hdr->in_length;
31eca76b
DW
704 } else if (cmd == ND_CMD_CALL) {
705 struct nd_cmd_pkg *pkg = buf;
706
707 return pkg->nd_size_in;
62232e45
DW
708 }
709
710 return UINT_MAX;
711}
712EXPORT_SYMBOL_GPL(nd_cmd_in_size);
713
714u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
715 const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
716 const u32 *out_field)
717{
718 if (idx >= desc->out_num)
719 return UINT_MAX;
720
721 if (desc->out_sizes[idx] < UINT_MAX)
722 return desc->out_sizes[idx];
723
724 if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1)
725 return in_field[1];
726 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
727 return out_field[1];
747ffe11
DW
728 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
729 return out_field[1] - 8;
31eca76b
DW
730 else if (cmd == ND_CMD_CALL) {
731 struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
732
733 return pkg->nd_size_out;
734 }
735
62232e45
DW
736
737 return UINT_MAX;
738}
739EXPORT_SYMBOL_GPL(nd_cmd_out_size);
740
bf9bccc1 741void wait_nvdimm_bus_probe_idle(struct device *dev)
eaf96153 742{
bf9bccc1
DW
743 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
744
eaf96153
DW
745 do {
746 if (nvdimm_bus->probe_active == 0)
747 break;
748 nvdimm_bus_unlock(&nvdimm_bus->dev);
749 wait_event(nvdimm_bus->probe_wait,
750 nvdimm_bus->probe_active == 0);
751 nvdimm_bus_lock(&nvdimm_bus->dev);
752 } while (true);
753}
754
d4f32367
DW
755static int pmem_active(struct device *dev, void *data)
756{
757 if (is_nd_pmem(dev) && dev->driver)
758 return -EBUSY;
759 return 0;
760}
761
eaf96153 762/* set_config requires an idle interleave set */
87bf572e
DW
763static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
764 struct nvdimm *nvdimm, unsigned int cmd)
eaf96153 765{
87bf572e
DW
766 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
767
768 /* ask the bus provider if it would like to block this request */
769 if (nd_desc->clear_to_send) {
770 int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd);
771
772 if (rc)
773 return rc;
774 }
eaf96153 775
d4f32367
DW
776 /* require clear error to go through the pmem driver */
777 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
778 return device_for_each_child(&nvdimm_bus->dev, NULL,
779 pmem_active);
780
eaf96153
DW
781 if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
782 return 0;
783
87bf572e 784 /* prevent label manipulation while the kernel owns label updates */
bf9bccc1 785 wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
eaf96153
DW
786 if (atomic_read(&nvdimm->busy))
787 return -EBUSY;
788 return 0;
789}
790
62232e45
DW
791static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
792 int read_only, unsigned int ioctl_cmd, unsigned long arg)
793{
794 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
795 size_t buf_len = 0, in_len = 0, out_len = 0;
796 static char out_env[ND_CMD_MAX_ENVELOPE];
797 static char in_env[ND_CMD_MAX_ENVELOPE];
798 const struct nd_cmd_desc *desc = NULL;
799 unsigned int cmd = _IOC_NR(ioctl_cmd);
800 void __user *p = (void __user *) arg;
801 struct device *dev = &nvdimm_bus->dev;
31eca76b 802 struct nd_cmd_pkg pkg;
62232e45 803 const char *cmd_name, *dimm_name;
e3654eca 804 unsigned long cmd_mask;
62232e45
DW
805 void *buf;
806 int rc, i;
807
808 if (nvdimm) {
809 desc = nd_cmd_dimm_desc(cmd);
810 cmd_name = nvdimm_cmd_name(cmd);
e3654eca 811 cmd_mask = nvdimm->cmd_mask;
62232e45
DW
812 dimm_name = dev_name(&nvdimm->dev);
813 } else {
814 desc = nd_cmd_bus_desc(cmd);
815 cmd_name = nvdimm_bus_cmd_name(cmd);
e3654eca 816 cmd_mask = nd_desc->cmd_mask;
62232e45
DW
817 dimm_name = "bus";
818 }
819
31eca76b
DW
820 if (cmd == ND_CMD_CALL) {
821 if (copy_from_user(&pkg, p, sizeof(pkg)))
822 return -EFAULT;
823 }
824
62232e45 825 if (!desc || (desc->out_num + desc->in_num == 0) ||
e3654eca 826 !test_bit(cmd, &cmd_mask))
62232e45
DW
827 return -ENOTTY;
828
829 /* fail write commands (when read-only) */
830 if (read_only)
07accfa9
JH
831 switch (cmd) {
832 case ND_CMD_VENDOR:
833 case ND_CMD_SET_CONFIG_DATA:
834 case ND_CMD_ARS_START:
d4f32367 835 case ND_CMD_CLEAR_ERROR:
31eca76b 836 case ND_CMD_CALL:
62232e45
DW
837 dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
838 nvdimm ? nvdimm_cmd_name(cmd)
839 : nvdimm_bus_cmd_name(cmd));
840 return -EPERM;
841 default:
842 break;
843 }
844
845 /* process an input envelope */
846 for (i = 0; i < desc->in_num; i++) {
847 u32 in_size, copy;
848
849 in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env);
850 if (in_size == UINT_MAX) {
851 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
852 __func__, dimm_name, cmd_name, i);
853 return -ENXIO;
854 }
62232e45
DW
855 if (in_len < sizeof(in_env))
856 copy = min_t(u32, sizeof(in_env) - in_len, in_size);
857 else
858 copy = 0;
859 if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
860 return -EFAULT;
861 in_len += in_size;
862 }
863
31eca76b
DW
864 if (cmd == ND_CMD_CALL) {
865 dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n",
866 __func__, dimm_name, pkg.nd_command,
867 in_len, out_len, buf_len);
868
869 for (i = 0; i < ARRAY_SIZE(pkg.nd_reserved2); i++)
870 if (pkg.nd_reserved2[i])
871 return -EINVAL;
872 }
873
62232e45
DW
874 /* process an output envelope */
875 for (i = 0; i < desc->out_num; i++) {
876 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
877 (u32 *) in_env, (u32 *) out_env);
878 u32 copy;
879
880 if (out_size == UINT_MAX) {
881 dev_dbg(dev, "%s:%s unknown output size cmd: %s field: %d\n",
882 __func__, dimm_name, cmd_name, i);
883 return -EFAULT;
884 }
62232e45
DW
885 if (out_len < sizeof(out_env))
886 copy = min_t(u32, sizeof(out_env) - out_len, out_size);
887 else
888 copy = 0;
889 if (copy && copy_from_user(&out_env[out_len],
890 p + in_len + out_len, copy))
891 return -EFAULT;
892 out_len += out_size;
893 }
894
895 buf_len = out_len + in_len;
62232e45
DW
896 if (buf_len > ND_IOCTL_MAX_BUFLEN) {
897 dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
898 dimm_name, cmd_name, buf_len,
899 ND_IOCTL_MAX_BUFLEN);
900 return -EINVAL;
901 }
902
903 buf = vmalloc(buf_len);
904 if (!buf)
905 return -ENOMEM;
906
907 if (copy_from_user(buf, p, buf_len)) {
908 rc = -EFAULT;
909 goto out;
910 }
911
eaf96153 912 nvdimm_bus_lock(&nvdimm_bus->dev);
87bf572e 913 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd);
eaf96153
DW
914 if (rc)
915 goto out_unlock;
916
aef25338 917 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
62232e45 918 if (rc < 0)
eaf96153 919 goto out_unlock;
62232e45
DW
920 if (copy_to_user(p, buf, buf_len))
921 rc = -EFAULT;
eaf96153
DW
922 out_unlock:
923 nvdimm_bus_unlock(&nvdimm_bus->dev);
62232e45
DW
924 out:
925 vfree(buf);
926 return rc;
927}
928
45def22c
DW
929static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
930{
62232e45 931 long id = (long) file->private_data;
4dc0e7be 932 int rc = -ENXIO, ro;
62232e45
DW
933 struct nvdimm_bus *nvdimm_bus;
934
4dc0e7be 935 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
62232e45
DW
936 mutex_lock(&nvdimm_bus_list_mutex);
937 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
938 if (nvdimm_bus->id == id) {
4dc0e7be 939 rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
62232e45
DW
940 break;
941 }
942 }
943 mutex_unlock(&nvdimm_bus_list_mutex);
944
945 return rc;
946}
947
948static int match_dimm(struct device *dev, void *data)
949{
950 long id = (long) data;
951
952 if (is_nvdimm(dev)) {
953 struct nvdimm *nvdimm = to_nvdimm(dev);
954
955 return nvdimm->id == id;
956 }
957
958 return 0;
959}
960
961static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
962{
4dc0e7be 963 int rc = -ENXIO, ro;
62232e45
DW
964 struct nvdimm_bus *nvdimm_bus;
965
4dc0e7be 966 ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
62232e45
DW
967 mutex_lock(&nvdimm_bus_list_mutex);
968 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
969 struct device *dev = device_find_child(&nvdimm_bus->dev,
970 file->private_data, match_dimm);
971 struct nvdimm *nvdimm;
972
973 if (!dev)
974 continue;
975
976 nvdimm = to_nvdimm(dev);
4dc0e7be 977 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
62232e45
DW
978 put_device(dev);
979 break;
980 }
981 mutex_unlock(&nvdimm_bus_list_mutex);
982
983 return rc;
984}
985
986static int nd_open(struct inode *inode, struct file *file)
987{
988 long minor = iminor(inode);
989
990 file->private_data = (void *) minor;
991 return 0;
45def22c
DW
992}
993
994static const struct file_operations nvdimm_bus_fops = {
995 .owner = THIS_MODULE,
62232e45 996 .open = nd_open,
45def22c
DW
997 .unlocked_ioctl = nd_ioctl,
998 .compat_ioctl = nd_ioctl,
999 .llseek = noop_llseek,
1000};
1001
62232e45
DW
1002static const struct file_operations nvdimm_fops = {
1003 .owner = THIS_MODULE,
1004 .open = nd_open,
1005 .unlocked_ioctl = nvdimm_ioctl,
1006 .compat_ioctl = nvdimm_ioctl,
1007 .llseek = noop_llseek,
1008};
1009
45def22c
DW
1010int __init nvdimm_bus_init(void)
1011{
1012 int rc;
1013
baa51277
DW
1014 BUILD_BUG_ON(sizeof(struct nd_smart_payload) != 128);
1015 BUILD_BUG_ON(sizeof(struct nd_smart_threshold_payload) != 8);
1016
e6dfb2de
DW
1017 rc = bus_register(&nvdimm_bus_type);
1018 if (rc)
1019 return rc;
1020
45def22c
DW
1021 rc = register_chrdev(0, "ndctl", &nvdimm_bus_fops);
1022 if (rc < 0)
62232e45 1023 goto err_bus_chrdev;
45def22c
DW
1024 nvdimm_bus_major = rc;
1025
62232e45
DW
1026 rc = register_chrdev(0, "dimmctl", &nvdimm_fops);
1027 if (rc < 0)
1028 goto err_dimm_chrdev;
1029 nvdimm_major = rc;
1030
45def22c 1031 nd_class = class_create(THIS_MODULE, "nd");
daa1dee4
AL
1032 if (IS_ERR(nd_class)) {
1033 rc = PTR_ERR(nd_class);
45def22c 1034 goto err_class;
daa1dee4 1035 }
45def22c 1036
18515942
DW
1037 rc = driver_register(&nd_bus_driver.drv);
1038 if (rc)
1039 goto err_nd_bus;
1040
45def22c
DW
1041 return 0;
1042
18515942
DW
1043 err_nd_bus:
1044 class_destroy(nd_class);
45def22c 1045 err_class:
62232e45
DW
1046 unregister_chrdev(nvdimm_major, "dimmctl");
1047 err_dimm_chrdev:
45def22c 1048 unregister_chrdev(nvdimm_bus_major, "ndctl");
62232e45 1049 err_bus_chrdev:
e6dfb2de 1050 bus_unregister(&nvdimm_bus_type);
45def22c
DW
1051
1052 return rc;
1053}
1054
4d88a97a 1055void nvdimm_bus_exit(void)
45def22c 1056{
18515942 1057 driver_unregister(&nd_bus_driver.drv);
45def22c
DW
1058 class_destroy(nd_class);
1059 unregister_chrdev(nvdimm_bus_major, "ndctl");
62232e45 1060 unregister_chrdev(nvdimm_major, "dimmctl");
e6dfb2de 1061 bus_unregister(&nvdimm_bus_type);
18515942 1062 ida_destroy(&nd_ida);
45def22c 1063}
This page took 0.12711 seconds and 5 git commands to generate.