nfit: disable userspace initiated ars during scrub
[deliverable/linux.git] / drivers / nvdimm / bus.c
1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/blkdev.h>
18 #include <linux/fcntl.h>
19 #include <linux/async.h>
20 #include <linux/genhd.h>
21 #include <linux/ndctl.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/fs.h>
25 #include <linux/io.h>
26 #include <linux/mm.h>
27 #include <linux/nd.h>
28 #include "nd-core.h"
29 #include "nd.h"
30
31 int nvdimm_major;
32 static int nvdimm_bus_major;
33 static struct class *nd_class;
34
35 static int to_nd_device_type(struct device *dev)
36 {
37 if (is_nvdimm(dev))
38 return ND_DEVICE_DIMM;
39 else if (is_nd_pmem(dev))
40 return ND_DEVICE_REGION_PMEM;
41 else if (is_nd_blk(dev))
42 return ND_DEVICE_REGION_BLK;
43 else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent))
44 return nd_region_to_nstype(to_nd_region(dev->parent));
45
46 return 0;
47 }
48
49 static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
50 {
51 /*
52 * Ensure that region devices always have their numa node set as
53 * early as possible.
54 */
55 if (is_nd_pmem(dev) || is_nd_blk(dev))
56 set_dev_node(dev, to_nd_region(dev)->numa_node);
57 return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
58 to_nd_device_type(dev));
59 }
60
61 static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
62 {
63 struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
64
65 return test_bit(to_nd_device_type(dev), &nd_drv->type);
66 }
67
68 static struct module *to_bus_provider(struct device *dev)
69 {
70 /* pin bus providers while regions are enabled */
71 if (is_nd_pmem(dev) || is_nd_blk(dev)) {
72 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
73
74 return nvdimm_bus->module;
75 }
76 return NULL;
77 }
78
79 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
80 {
81 nvdimm_bus_lock(&nvdimm_bus->dev);
82 nvdimm_bus->probe_active++;
83 nvdimm_bus_unlock(&nvdimm_bus->dev);
84 }
85
86 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
87 {
88 nvdimm_bus_lock(&nvdimm_bus->dev);
89 if (--nvdimm_bus->probe_active == 0)
90 wake_up(&nvdimm_bus->probe_wait);
91 nvdimm_bus_unlock(&nvdimm_bus->dev);
92 }
93
94 static int nvdimm_bus_probe(struct device *dev)
95 {
96 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
97 struct module *provider = to_bus_provider(dev);
98 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
99 int rc;
100
101 if (!try_module_get(provider))
102 return -ENXIO;
103
104 nvdimm_bus_probe_start(nvdimm_bus);
105 rc = nd_drv->probe(dev);
106 if (rc == 0)
107 nd_region_probe_success(nvdimm_bus, dev);
108 else
109 nd_region_disable(nvdimm_bus, dev);
110 nvdimm_bus_probe_end(nvdimm_bus);
111
112 dev_dbg(&nvdimm_bus->dev, "%s.probe(%s) = %d\n", dev->driver->name,
113 dev_name(dev), rc);
114
115 if (rc != 0)
116 module_put(provider);
117 return rc;
118 }
119
120 static int nvdimm_bus_remove(struct device *dev)
121 {
122 struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
123 struct module *provider = to_bus_provider(dev);
124 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
125 int rc;
126
127 rc = nd_drv->remove(dev);
128 nd_region_disable(nvdimm_bus, dev);
129
130 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
131 dev_name(dev), rc);
132 module_put(provider);
133 return rc;
134 }
135
136 void nd_device_notify(struct device *dev, enum nvdimm_event event)
137 {
138 device_lock(dev);
139 if (dev->driver) {
140 struct nd_device_driver *nd_drv;
141
142 nd_drv = to_nd_device_driver(dev->driver);
143 if (nd_drv->notify)
144 nd_drv->notify(dev, event);
145 }
146 device_unlock(dev);
147 }
148 EXPORT_SYMBOL(nd_device_notify);
149
150 void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
151 {
152 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
153
154 if (!nvdimm_bus)
155 return;
156
157 /* caller is responsible for holding a reference on the device */
158 nd_device_notify(&nd_region->dev, event);
159 }
160 EXPORT_SYMBOL_GPL(nvdimm_region_notify);
161
162 static struct bus_type nvdimm_bus_type = {
163 .name = "nd",
164 .uevent = nvdimm_bus_uevent,
165 .match = nvdimm_bus_match,
166 .probe = nvdimm_bus_probe,
167 .remove = nvdimm_bus_remove,
168 };
169
170 static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain);
171
172 void nd_synchronize(void)
173 {
174 async_synchronize_full_domain(&nd_async_domain);
175 }
176 EXPORT_SYMBOL_GPL(nd_synchronize);
177
178 static void nd_async_device_register(void *d, async_cookie_t cookie)
179 {
180 struct device *dev = d;
181
182 if (device_add(dev) != 0) {
183 dev_err(dev, "%s: failed\n", __func__);
184 put_device(dev);
185 }
186 put_device(dev);
187 }
188
189 static void nd_async_device_unregister(void *d, async_cookie_t cookie)
190 {
191 struct device *dev = d;
192
193 /* flush bus operations before delete */
194 nvdimm_bus_lock(dev);
195 nvdimm_bus_unlock(dev);
196
197 device_unregister(dev);
198 put_device(dev);
199 }
200
201 void __nd_device_register(struct device *dev)
202 {
203 dev->bus = &nvdimm_bus_type;
204 get_device(dev);
205 async_schedule_domain(nd_async_device_register, dev,
206 &nd_async_domain);
207 }
208
209 void nd_device_register(struct device *dev)
210 {
211 device_initialize(dev);
212 __nd_device_register(dev);
213 }
214 EXPORT_SYMBOL(nd_device_register);
215
216 void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
217 {
218 switch (mode) {
219 case ND_ASYNC:
220 get_device(dev);
221 async_schedule_domain(nd_async_device_unregister, dev,
222 &nd_async_domain);
223 break;
224 case ND_SYNC:
225 nd_synchronize();
226 device_unregister(dev);
227 break;
228 }
229 }
230 EXPORT_SYMBOL(nd_device_unregister);
231
232 /**
233 * __nd_driver_register() - register a region or a namespace driver
234 * @nd_drv: driver to register
235 * @owner: automatically set by nd_driver_register() macro
236 * @mod_name: automatically set by nd_driver_register() macro
237 */
238 int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
239 const char *mod_name)
240 {
241 struct device_driver *drv = &nd_drv->drv;
242
243 if (!nd_drv->type) {
244 pr_debug("driver type bitmask not set (%pf)\n",
245 __builtin_return_address(0));
246 return -EINVAL;
247 }
248
249 if (!nd_drv->probe || !nd_drv->remove) {
250 pr_debug("->probe() and ->remove() must be specified\n");
251 return -EINVAL;
252 }
253
254 drv->bus = &nvdimm_bus_type;
255 drv->owner = owner;
256 drv->mod_name = mod_name;
257
258 return driver_register(drv);
259 }
260 EXPORT_SYMBOL(__nd_driver_register);
261
262 int nvdimm_revalidate_disk(struct gendisk *disk)
263 {
264 struct device *dev = disk->driverfs_dev;
265 struct nd_region *nd_region = to_nd_region(dev->parent);
266 const char *pol = nd_region->ro ? "only" : "write";
267
268 if (nd_region->ro == get_disk_ro(disk))
269 return 0;
270
271 dev_info(dev, "%s read-%s, marking %s read-%s\n",
272 dev_name(&nd_region->dev), pol, disk->disk_name, pol);
273 set_disk_ro(disk, nd_region->ro);
274
275 return 0;
276
277 }
278 EXPORT_SYMBOL(nvdimm_revalidate_disk);
279
280 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
281 char *buf)
282 {
283 return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n",
284 to_nd_device_type(dev));
285 }
286 static DEVICE_ATTR_RO(modalias);
287
288 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
289 char *buf)
290 {
291 return sprintf(buf, "%s\n", dev->type->name);
292 }
293 static DEVICE_ATTR_RO(devtype);
294
295 static struct attribute *nd_device_attributes[] = {
296 &dev_attr_modalias.attr,
297 &dev_attr_devtype.attr,
298 NULL,
299 };
300
301 /**
302 * nd_device_attribute_group - generic attributes for all devices on an nd bus
303 */
304 struct attribute_group nd_device_attribute_group = {
305 .attrs = nd_device_attributes,
306 };
307 EXPORT_SYMBOL_GPL(nd_device_attribute_group);
308
309 static ssize_t numa_node_show(struct device *dev,
310 struct device_attribute *attr, char *buf)
311 {
312 return sprintf(buf, "%d\n", dev_to_node(dev));
313 }
314 static DEVICE_ATTR_RO(numa_node);
315
316 static struct attribute *nd_numa_attributes[] = {
317 &dev_attr_numa_node.attr,
318 NULL,
319 };
320
321 static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
322 int n)
323 {
324 if (!IS_ENABLED(CONFIG_NUMA))
325 return 0;
326
327 return a->mode;
328 }
329
330 /**
331 * nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
332 */
333 struct attribute_group nd_numa_attribute_group = {
334 .attrs = nd_numa_attributes,
335 .is_visible = nd_numa_attr_visible,
336 };
337 EXPORT_SYMBOL_GPL(nd_numa_attribute_group);
338
339 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
340 {
341 dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
342 struct device *dev;
343
344 dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
345 "ndctl%d", nvdimm_bus->id);
346
347 if (IS_ERR(dev)) {
348 dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
349 nvdimm_bus->id, PTR_ERR(dev));
350 return PTR_ERR(dev);
351 }
352 return 0;
353 }
354
355 void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
356 {
357 device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
358 }
359
360 static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
361 [ND_CMD_IMPLEMENTED] = { },
362 [ND_CMD_SMART] = {
363 .out_num = 2,
364 .out_sizes = { 4, 8, },
365 },
366 [ND_CMD_SMART_THRESHOLD] = {
367 .out_num = 2,
368 .out_sizes = { 4, 8, },
369 },
370 [ND_CMD_DIMM_FLAGS] = {
371 .out_num = 2,
372 .out_sizes = { 4, 4 },
373 },
374 [ND_CMD_GET_CONFIG_SIZE] = {
375 .out_num = 3,
376 .out_sizes = { 4, 4, 4, },
377 },
378 [ND_CMD_GET_CONFIG_DATA] = {
379 .in_num = 2,
380 .in_sizes = { 4, 4, },
381 .out_num = 2,
382 .out_sizes = { 4, UINT_MAX, },
383 },
384 [ND_CMD_SET_CONFIG_DATA] = {
385 .in_num = 3,
386 .in_sizes = { 4, 4, UINT_MAX, },
387 .out_num = 1,
388 .out_sizes = { 4, },
389 },
390 [ND_CMD_VENDOR] = {
391 .in_num = 3,
392 .in_sizes = { 4, 4, UINT_MAX, },
393 .out_num = 3,
394 .out_sizes = { 4, 4, UINT_MAX, },
395 },
396 };
397
398 const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
399 {
400 if (cmd < ARRAY_SIZE(__nd_cmd_dimm_descs))
401 return &__nd_cmd_dimm_descs[cmd];
402 return NULL;
403 }
404 EXPORT_SYMBOL_GPL(nd_cmd_dimm_desc);
405
406 static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
407 [ND_CMD_IMPLEMENTED] = { },
408 [ND_CMD_ARS_CAP] = {
409 .in_num = 2,
410 .in_sizes = { 8, 8, },
411 .out_num = 4,
412 .out_sizes = { 4, 4, 4, 4, },
413 },
414 [ND_CMD_ARS_START] = {
415 .in_num = 5,
416 .in_sizes = { 8, 8, 2, 1, 5, },
417 .out_num = 2,
418 .out_sizes = { 4, 4, },
419 },
420 [ND_CMD_ARS_STATUS] = {
421 .out_num = 3,
422 .out_sizes = { 4, 4, UINT_MAX, },
423 },
424 };
425
426 const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
427 {
428 if (cmd < ARRAY_SIZE(__nd_cmd_bus_descs))
429 return &__nd_cmd_bus_descs[cmd];
430 return NULL;
431 }
432 EXPORT_SYMBOL_GPL(nd_cmd_bus_desc);
433
434 u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
435 const struct nd_cmd_desc *desc, int idx, void *buf)
436 {
437 if (idx >= desc->in_num)
438 return UINT_MAX;
439
440 if (desc->in_sizes[idx] < UINT_MAX)
441 return desc->in_sizes[idx];
442
443 if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && idx == 2) {
444 struct nd_cmd_set_config_hdr *hdr = buf;
445
446 return hdr->in_length;
447 } else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) {
448 struct nd_cmd_vendor_hdr *hdr = buf;
449
450 return hdr->in_length;
451 }
452
453 return UINT_MAX;
454 }
455 EXPORT_SYMBOL_GPL(nd_cmd_in_size);
456
457 u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
458 const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
459 const u32 *out_field)
460 {
461 if (idx >= desc->out_num)
462 return UINT_MAX;
463
464 if (desc->out_sizes[idx] < UINT_MAX)
465 return desc->out_sizes[idx];
466
467 if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1)
468 return in_field[1];
469 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
470 return out_field[1];
471 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
472 return out_field[1] - 8;
473
474 return UINT_MAX;
475 }
476 EXPORT_SYMBOL_GPL(nd_cmd_out_size);
477
478 void wait_nvdimm_bus_probe_idle(struct device *dev)
479 {
480 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
481
482 do {
483 if (nvdimm_bus->probe_active == 0)
484 break;
485 nvdimm_bus_unlock(&nvdimm_bus->dev);
486 wait_event(nvdimm_bus->probe_wait,
487 nvdimm_bus->probe_active == 0);
488 nvdimm_bus_lock(&nvdimm_bus->dev);
489 } while (true);
490 }
491
492 /* set_config requires an idle interleave set */
493 static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
494 struct nvdimm *nvdimm, unsigned int cmd)
495 {
496 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
497
498 /* ask the bus provider if it would like to block this request */
499 if (nd_desc->clear_to_send) {
500 int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd);
501
502 if (rc)
503 return rc;
504 }
505
506 if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
507 return 0;
508
509 /* prevent label manipulation while the kernel owns label updates */
510 wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
511 if (atomic_read(&nvdimm->busy))
512 return -EBUSY;
513 return 0;
514 }
515
516 static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
517 int read_only, unsigned int ioctl_cmd, unsigned long arg)
518 {
519 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
520 size_t buf_len = 0, in_len = 0, out_len = 0;
521 static char out_env[ND_CMD_MAX_ENVELOPE];
522 static char in_env[ND_CMD_MAX_ENVELOPE];
523 const struct nd_cmd_desc *desc = NULL;
524 unsigned int cmd = _IOC_NR(ioctl_cmd);
525 void __user *p = (void __user *) arg;
526 struct device *dev = &nvdimm_bus->dev;
527 const char *cmd_name, *dimm_name;
528 unsigned long dsm_mask;
529 void *buf;
530 int rc, i;
531
532 if (nvdimm) {
533 desc = nd_cmd_dimm_desc(cmd);
534 cmd_name = nvdimm_cmd_name(cmd);
535 dsm_mask = nvdimm->dsm_mask ? *(nvdimm->dsm_mask) : 0;
536 dimm_name = dev_name(&nvdimm->dev);
537 } else {
538 desc = nd_cmd_bus_desc(cmd);
539 cmd_name = nvdimm_bus_cmd_name(cmd);
540 dsm_mask = nd_desc->dsm_mask;
541 dimm_name = "bus";
542 }
543
544 if (!desc || (desc->out_num + desc->in_num == 0) ||
545 !test_bit(cmd, &dsm_mask))
546 return -ENOTTY;
547
548 /* fail write commands (when read-only) */
549 if (read_only)
550 switch (ioctl_cmd) {
551 case ND_IOCTL_VENDOR:
552 case ND_IOCTL_SET_CONFIG_DATA:
553 case ND_IOCTL_ARS_START:
554 dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
555 nvdimm ? nvdimm_cmd_name(cmd)
556 : nvdimm_bus_cmd_name(cmd));
557 return -EPERM;
558 default:
559 break;
560 }
561
562 /* process an input envelope */
563 for (i = 0; i < desc->in_num; i++) {
564 u32 in_size, copy;
565
566 in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env);
567 if (in_size == UINT_MAX) {
568 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
569 __func__, dimm_name, cmd_name, i);
570 return -ENXIO;
571 }
572 if (in_len < sizeof(in_env))
573 copy = min_t(u32, sizeof(in_env) - in_len, in_size);
574 else
575 copy = 0;
576 if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
577 return -EFAULT;
578 in_len += in_size;
579 }
580
581 /* process an output envelope */
582 for (i = 0; i < desc->out_num; i++) {
583 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
584 (u32 *) in_env, (u32 *) out_env);
585 u32 copy;
586
587 if (out_size == UINT_MAX) {
588 dev_dbg(dev, "%s:%s unknown output size cmd: %s field: %d\n",
589 __func__, dimm_name, cmd_name, i);
590 return -EFAULT;
591 }
592 if (out_len < sizeof(out_env))
593 copy = min_t(u32, sizeof(out_env) - out_len, out_size);
594 else
595 copy = 0;
596 if (copy && copy_from_user(&out_env[out_len],
597 p + in_len + out_len, copy))
598 return -EFAULT;
599 out_len += out_size;
600 }
601
602 buf_len = out_len + in_len;
603 if (buf_len > ND_IOCTL_MAX_BUFLEN) {
604 dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
605 dimm_name, cmd_name, buf_len,
606 ND_IOCTL_MAX_BUFLEN);
607 return -EINVAL;
608 }
609
610 buf = vmalloc(buf_len);
611 if (!buf)
612 return -ENOMEM;
613
614 if (copy_from_user(buf, p, buf_len)) {
615 rc = -EFAULT;
616 goto out;
617 }
618
619 nvdimm_bus_lock(&nvdimm_bus->dev);
620 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd);
621 if (rc)
622 goto out_unlock;
623
624 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
625 if (rc < 0)
626 goto out_unlock;
627 if (copy_to_user(p, buf, buf_len))
628 rc = -EFAULT;
629 out_unlock:
630 nvdimm_bus_unlock(&nvdimm_bus->dev);
631 out:
632 vfree(buf);
633 return rc;
634 }
635
636 static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
637 {
638 long id = (long) file->private_data;
639 int rc = -ENXIO, read_only;
640 struct nvdimm_bus *nvdimm_bus;
641
642 read_only = (O_RDWR != (file->f_flags & O_ACCMODE));
643 mutex_lock(&nvdimm_bus_list_mutex);
644 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
645 if (nvdimm_bus->id == id) {
646 rc = __nd_ioctl(nvdimm_bus, NULL, read_only, cmd, arg);
647 break;
648 }
649 }
650 mutex_unlock(&nvdimm_bus_list_mutex);
651
652 return rc;
653 }
654
655 static int match_dimm(struct device *dev, void *data)
656 {
657 long id = (long) data;
658
659 if (is_nvdimm(dev)) {
660 struct nvdimm *nvdimm = to_nvdimm(dev);
661
662 return nvdimm->id == id;
663 }
664
665 return 0;
666 }
667
668 static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
669 {
670 int rc = -ENXIO, read_only;
671 struct nvdimm_bus *nvdimm_bus;
672
673 read_only = (O_RDWR != (file->f_flags & O_ACCMODE));
674 mutex_lock(&nvdimm_bus_list_mutex);
675 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
676 struct device *dev = device_find_child(&nvdimm_bus->dev,
677 file->private_data, match_dimm);
678 struct nvdimm *nvdimm;
679
680 if (!dev)
681 continue;
682
683 nvdimm = to_nvdimm(dev);
684 rc = __nd_ioctl(nvdimm_bus, nvdimm, read_only, cmd, arg);
685 put_device(dev);
686 break;
687 }
688 mutex_unlock(&nvdimm_bus_list_mutex);
689
690 return rc;
691 }
692
693 static int nd_open(struct inode *inode, struct file *file)
694 {
695 long minor = iminor(inode);
696
697 file->private_data = (void *) minor;
698 return 0;
699 }
700
701 static const struct file_operations nvdimm_bus_fops = {
702 .owner = THIS_MODULE,
703 .open = nd_open,
704 .unlocked_ioctl = nd_ioctl,
705 .compat_ioctl = nd_ioctl,
706 .llseek = noop_llseek,
707 };
708
709 static const struct file_operations nvdimm_fops = {
710 .owner = THIS_MODULE,
711 .open = nd_open,
712 .unlocked_ioctl = nvdimm_ioctl,
713 .compat_ioctl = nvdimm_ioctl,
714 .llseek = noop_llseek,
715 };
716
717 int __init nvdimm_bus_init(void)
718 {
719 int rc;
720
721 rc = bus_register(&nvdimm_bus_type);
722 if (rc)
723 return rc;
724
725 rc = register_chrdev(0, "ndctl", &nvdimm_bus_fops);
726 if (rc < 0)
727 goto err_bus_chrdev;
728 nvdimm_bus_major = rc;
729
730 rc = register_chrdev(0, "dimmctl", &nvdimm_fops);
731 if (rc < 0)
732 goto err_dimm_chrdev;
733 nvdimm_major = rc;
734
735 nd_class = class_create(THIS_MODULE, "nd");
736 if (IS_ERR(nd_class)) {
737 rc = PTR_ERR(nd_class);
738 goto err_class;
739 }
740
741 return 0;
742
743 err_class:
744 unregister_chrdev(nvdimm_major, "dimmctl");
745 err_dimm_chrdev:
746 unregister_chrdev(nvdimm_bus_major, "ndctl");
747 err_bus_chrdev:
748 bus_unregister(&nvdimm_bus_type);
749
750 return rc;
751 }
752
753 void nvdimm_bus_exit(void)
754 {
755 class_destroy(nd_class);
756 unregister_chrdev(nvdimm_bus_major, "ndctl");
757 unregister_chrdev(nvdimm_major, "dimmctl");
758 bus_unregister(&nvdimm_bus_type);
759 }
This page took 0.116818 seconds and 5 git commands to generate.