Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug...
[deliverable/linux.git] / fs / char_dev.c
1 /*
2 * linux/fs/char_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/seq_file.h>
18
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 #include <linux/backing-dev.h>
24
25 #include "internal.h"
26
27 /*
28 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
29 * devices
30 * - permits shared-mmap for read, write and/or exec
31 * - does not permit private mmap in NOMMU mode (can't do COW)
32 * - no readahead or I/O queue unplugging required
33 */
34 struct backing_dev_info directly_mappable_cdev_bdi = {
35 .capabilities = (
36 #ifdef CONFIG_MMU
37 /* permit private copies of the data to be taken */
38 BDI_CAP_MAP_COPY |
39 #endif
40 /* permit direct mmap, for read, write or exec */
41 BDI_CAP_MAP_DIRECT |
42 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
43 };
44
45 static struct kobj_map *cdev_map;
46
47 static DEFINE_MUTEX(chrdevs_lock);
48
49 static struct char_device_struct {
50 struct char_device_struct *next;
51 unsigned int major;
52 unsigned int baseminor;
53 int minorct;
54 char name[64];
55 struct cdev *cdev; /* will die */
56 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
57
58 /* index in the above */
59 static inline int major_to_index(int major)
60 {
61 return major % CHRDEV_MAJOR_HASH_SIZE;
62 }
63
64 #ifdef CONFIG_PROC_FS
65
66 void chrdev_show(struct seq_file *f, off_t offset)
67 {
68 struct char_device_struct *cd;
69
70 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
71 mutex_lock(&chrdevs_lock);
72 for (cd = chrdevs[offset]; cd; cd = cd->next)
73 seq_printf(f, "%3d %s\n", cd->major, cd->name);
74 mutex_unlock(&chrdevs_lock);
75 }
76 }
77
78 #endif /* CONFIG_PROC_FS */
79
80 /*
81 * Register a single major with a specified minor range.
82 *
83 * If major == 0 this functions will dynamically allocate a major and return
84 * its number.
85 *
86 * If major > 0 this function will attempt to reserve the passed range of
87 * minors and will return zero on success.
88 *
89 * Returns a -ve errno on failure.
90 */
91 static struct char_device_struct *
92 __register_chrdev_region(unsigned int major, unsigned int baseminor,
93 int minorct, const char *name)
94 {
95 struct char_device_struct *cd, **cp;
96 int ret = 0;
97 int i;
98
99 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
100 if (cd == NULL)
101 return ERR_PTR(-ENOMEM);
102
103 mutex_lock(&chrdevs_lock);
104
105 /* temporary */
106 if (major == 0) {
107 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
108 if (chrdevs[i] == NULL)
109 break;
110 }
111
112 if (i == 0) {
113 ret = -EBUSY;
114 goto out;
115 }
116 major = i;
117 ret = major;
118 }
119
120 cd->major = major;
121 cd->baseminor = baseminor;
122 cd->minorct = minorct;
123 strlcpy(cd->name, name, sizeof(cd->name));
124
125 i = major_to_index(major);
126
127 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
128 if ((*cp)->major > major ||
129 ((*cp)->major == major &&
130 (((*cp)->baseminor >= baseminor) ||
131 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
132 break;
133
134 /* Check for overlapping minor ranges. */
135 if (*cp && (*cp)->major == major) {
136 int old_min = (*cp)->baseminor;
137 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
138 int new_min = baseminor;
139 int new_max = baseminor + minorct - 1;
140
141 /* New driver overlaps from the left. */
142 if (new_max >= old_min && new_max <= old_max) {
143 ret = -EBUSY;
144 goto out;
145 }
146
147 /* New driver overlaps from the right. */
148 if (new_min <= old_max && new_min >= old_min) {
149 ret = -EBUSY;
150 goto out;
151 }
152 }
153
154 cd->next = *cp;
155 *cp = cd;
156 mutex_unlock(&chrdevs_lock);
157 return cd;
158 out:
159 mutex_unlock(&chrdevs_lock);
160 kfree(cd);
161 return ERR_PTR(ret);
162 }
163
164 static struct char_device_struct *
165 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
166 {
167 struct char_device_struct *cd = NULL, **cp;
168 int i = major_to_index(major);
169
170 mutex_lock(&chrdevs_lock);
171 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
172 if ((*cp)->major == major &&
173 (*cp)->baseminor == baseminor &&
174 (*cp)->minorct == minorct)
175 break;
176 if (*cp) {
177 cd = *cp;
178 *cp = cd->next;
179 }
180 mutex_unlock(&chrdevs_lock);
181 return cd;
182 }
183
184 /**
185 * register_chrdev_region() - register a range of device numbers
186 * @from: the first in the desired range of device numbers; must include
187 * the major number.
188 * @count: the number of consecutive device numbers required
189 * @name: the name of the device or driver.
190 *
191 * Return value is zero on success, a negative error code on failure.
192 */
193 int register_chrdev_region(dev_t from, unsigned count, const char *name)
194 {
195 struct char_device_struct *cd;
196 dev_t to = from + count;
197 dev_t n, next;
198
199 for (n = from; n < to; n = next) {
200 next = MKDEV(MAJOR(n)+1, 0);
201 if (next > to)
202 next = to;
203 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
204 next - n, name);
205 if (IS_ERR(cd))
206 goto fail;
207 }
208 return 0;
209 fail:
210 to = n;
211 for (n = from; n < to; n = next) {
212 next = MKDEV(MAJOR(n)+1, 0);
213 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
214 }
215 return PTR_ERR(cd);
216 }
217
218 /**
219 * alloc_chrdev_region() - register a range of char device numbers
220 * @dev: output parameter for first assigned number
221 * @baseminor: first of the requested range of minor numbers
222 * @count: the number of minor numbers required
223 * @name: the name of the associated device or driver
224 *
225 * Allocates a range of char device numbers. The major number will be
226 * chosen dynamically, and returned (along with the first minor number)
227 * in @dev. Returns zero or a negative error code.
228 */
229 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
230 const char *name)
231 {
232 struct char_device_struct *cd;
233 cd = __register_chrdev_region(0, baseminor, count, name);
234 if (IS_ERR(cd))
235 return PTR_ERR(cd);
236 *dev = MKDEV(cd->major, cd->baseminor);
237 return 0;
238 }
239
240 /**
241 * register_chrdev() - Register a major number for character devices.
242 * @major: major device number or 0 for dynamic allocation
243 * @name: name of this range of devices
244 * @fops: file operations associated with this devices
245 *
246 * If @major == 0 this functions will dynamically allocate a major and return
247 * its number.
248 *
249 * If @major > 0 this function will attempt to reserve a device with the given
250 * major number and will return zero on success.
251 *
252 * Returns a -ve errno on failure.
253 *
254 * The name of this device has nothing to do with the name of the device in
255 * /dev. It only helps to keep track of the different owners of devices. If
256 * your module name has only one type of devices it's ok to use e.g. the name
257 * of the module here.
258 *
259 * This function registers a range of 256 minor numbers. The first minor number
260 * is 0.
261 */
262 int register_chrdev(unsigned int major, const char *name,
263 const struct file_operations *fops)
264 {
265 struct char_device_struct *cd;
266 struct cdev *cdev;
267 char *s;
268 int err = -ENOMEM;
269
270 cd = __register_chrdev_region(major, 0, 256, name);
271 if (IS_ERR(cd))
272 return PTR_ERR(cd);
273
274 cdev = cdev_alloc();
275 if (!cdev)
276 goto out2;
277
278 cdev->owner = fops->owner;
279 cdev->ops = fops;
280 kobject_set_name(&cdev->kobj, "%s", name);
281 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
282 *s = '!';
283
284 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
285 if (err)
286 goto out;
287
288 cd->cdev = cdev;
289
290 return major ? 0 : cd->major;
291 out:
292 kobject_put(&cdev->kobj);
293 out2:
294 kfree(__unregister_chrdev_region(cd->major, 0, 256));
295 return err;
296 }
297
298 /**
299 * unregister_chrdev_region() - return a range of device numbers
300 * @from: the first in the range of numbers to unregister
301 * @count: the number of device numbers to unregister
302 *
303 * This function will unregister a range of @count device numbers,
304 * starting with @from. The caller should normally be the one who
305 * allocated those numbers in the first place...
306 */
307 void unregister_chrdev_region(dev_t from, unsigned count)
308 {
309 dev_t to = from + count;
310 dev_t n, next;
311
312 for (n = from; n < to; n = next) {
313 next = MKDEV(MAJOR(n)+1, 0);
314 if (next > to)
315 next = to;
316 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
317 }
318 }
319
320 void unregister_chrdev(unsigned int major, const char *name)
321 {
322 struct char_device_struct *cd;
323 cd = __unregister_chrdev_region(major, 0, 256);
324 if (cd && cd->cdev)
325 cdev_del(cd->cdev);
326 kfree(cd);
327 }
328
329 static DEFINE_SPINLOCK(cdev_lock);
330
331 static struct kobject *cdev_get(struct cdev *p)
332 {
333 struct module *owner = p->owner;
334 struct kobject *kobj;
335
336 if (owner && !try_module_get(owner))
337 return NULL;
338 kobj = kobject_get(&p->kobj);
339 if (!kobj)
340 module_put(owner);
341 return kobj;
342 }
343
344 void cdev_put(struct cdev *p)
345 {
346 if (p) {
347 struct module *owner = p->owner;
348 kobject_put(&p->kobj);
349 module_put(owner);
350 }
351 }
352
353 /*
354 * Called every time a character special file is opened
355 */
356 static int chrdev_open(struct inode *inode, struct file *filp)
357 {
358 struct cdev *p;
359 struct cdev *new = NULL;
360 int ret = 0;
361
362 spin_lock(&cdev_lock);
363 p = inode->i_cdev;
364 if (!p) {
365 struct kobject *kobj;
366 int idx;
367 spin_unlock(&cdev_lock);
368 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
369 if (!kobj)
370 return -ENXIO;
371 new = container_of(kobj, struct cdev, kobj);
372 spin_lock(&cdev_lock);
373 /* Check i_cdev again in case somebody beat us to it while
374 we dropped the lock. */
375 p = inode->i_cdev;
376 if (!p) {
377 inode->i_cdev = p = new;
378 inode->i_cindex = idx;
379 list_add(&inode->i_devices, &p->list);
380 new = NULL;
381 } else if (!cdev_get(p))
382 ret = -ENXIO;
383 } else if (!cdev_get(p))
384 ret = -ENXIO;
385 spin_unlock(&cdev_lock);
386 cdev_put(new);
387 if (ret)
388 return ret;
389
390 ret = -ENXIO;
391 filp->f_op = fops_get(p->ops);
392 if (!filp->f_op)
393 goto out_cdev_put;
394
395 if (filp->f_op->open) {
396 ret = filp->f_op->open(inode,filp);
397 if (ret)
398 goto out_cdev_put;
399 }
400
401 return 0;
402
403 out_cdev_put:
404 cdev_put(p);
405 return ret;
406 }
407
408 void cd_forget(struct inode *inode)
409 {
410 spin_lock(&cdev_lock);
411 list_del_init(&inode->i_devices);
412 inode->i_cdev = NULL;
413 spin_unlock(&cdev_lock);
414 }
415
416 static void cdev_purge(struct cdev *cdev)
417 {
418 spin_lock(&cdev_lock);
419 while (!list_empty(&cdev->list)) {
420 struct inode *inode;
421 inode = container_of(cdev->list.next, struct inode, i_devices);
422 list_del_init(&inode->i_devices);
423 inode->i_cdev = NULL;
424 }
425 spin_unlock(&cdev_lock);
426 }
427
428 /*
429 * Dummy default file-operations: the only thing this does
430 * is contain the open that then fills in the correct operations
431 * depending on the special file...
432 */
433 const struct file_operations def_chr_fops = {
434 .open = chrdev_open,
435 };
436
437 static struct kobject *exact_match(dev_t dev, int *part, void *data)
438 {
439 struct cdev *p = data;
440 return &p->kobj;
441 }
442
443 static int exact_lock(dev_t dev, void *data)
444 {
445 struct cdev *p = data;
446 return cdev_get(p) ? 0 : -1;
447 }
448
449 /**
450 * cdev_add() - add a char device to the system
451 * @p: the cdev structure for the device
452 * @dev: the first device number for which this device is responsible
453 * @count: the number of consecutive minor numbers corresponding to this
454 * device
455 *
456 * cdev_add() adds the device represented by @p to the system, making it
457 * live immediately. A negative error code is returned on failure.
458 */
459 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
460 {
461 p->dev = dev;
462 p->count = count;
463 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
464 }
465
466 static void cdev_unmap(dev_t dev, unsigned count)
467 {
468 kobj_unmap(cdev_map, dev, count);
469 }
470
471 /**
472 * cdev_del() - remove a cdev from the system
473 * @p: the cdev structure to be removed
474 *
475 * cdev_del() removes @p from the system, possibly freeing the structure
476 * itself.
477 */
478 void cdev_del(struct cdev *p)
479 {
480 cdev_unmap(p->dev, p->count);
481 kobject_put(&p->kobj);
482 }
483
484
485 static void cdev_default_release(struct kobject *kobj)
486 {
487 struct cdev *p = container_of(kobj, struct cdev, kobj);
488 cdev_purge(p);
489 }
490
491 static void cdev_dynamic_release(struct kobject *kobj)
492 {
493 struct cdev *p = container_of(kobj, struct cdev, kobj);
494 cdev_purge(p);
495 kfree(p);
496 }
497
498 static struct kobj_type ktype_cdev_default = {
499 .release = cdev_default_release,
500 };
501
502 static struct kobj_type ktype_cdev_dynamic = {
503 .release = cdev_dynamic_release,
504 };
505
506 /**
507 * cdev_alloc() - allocate a cdev structure
508 *
509 * Allocates and returns a cdev structure, or NULL on failure.
510 */
511 struct cdev *cdev_alloc(void)
512 {
513 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
514 if (p) {
515 INIT_LIST_HEAD(&p->list);
516 kobject_init(&p->kobj, &ktype_cdev_dynamic);
517 }
518 return p;
519 }
520
521 /**
522 * cdev_init() - initialize a cdev structure
523 * @cdev: the structure to initialize
524 * @fops: the file_operations for this device
525 *
526 * Initializes @cdev, remembering @fops, making it ready to add to the
527 * system with cdev_add().
528 */
529 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
530 {
531 memset(cdev, 0, sizeof *cdev);
532 INIT_LIST_HEAD(&cdev->list);
533 kobject_init(&cdev->kobj, &ktype_cdev_default);
534 cdev->ops = fops;
535 }
536
537 static struct kobject *base_probe(dev_t dev, int *part, void *data)
538 {
539 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
540 /* Make old-style 2.4 aliases work */
541 request_module("char-major-%d", MAJOR(dev));
542 return NULL;
543 }
544
545 void __init chrdev_init(void)
546 {
547 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
548 bdi_init(&directly_mappable_cdev_bdi);
549 }
550
551
552 /* Let modules do char dev stuff */
553 EXPORT_SYMBOL(register_chrdev_region);
554 EXPORT_SYMBOL(unregister_chrdev_region);
555 EXPORT_SYMBOL(alloc_chrdev_region);
556 EXPORT_SYMBOL(cdev_init);
557 EXPORT_SYMBOL(cdev_alloc);
558 EXPORT_SYMBOL(cdev_del);
559 EXPORT_SYMBOL(cdev_add);
560 EXPORT_SYMBOL(register_chrdev);
561 EXPORT_SYMBOL(unregister_chrdev);
562 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
This page took 0.075666 seconds and 6 git commands to generate.