Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[deliverable/linux.git] / fs / char_dev.c
1 /*
2 * linux/fs/char_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/devfs_fs_kernel.h>
18
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23
24 #ifdef CONFIG_KMOD
25 #include <linux/kmod.h>
26 #endif
27
28 static struct kobj_map *cdev_map;
29
30 #define MAX_PROBE_HASH 255 /* random */
31
32 static DEFINE_MUTEX(chrdevs_lock);
33
34 static struct char_device_struct {
35 struct char_device_struct *next;
36 unsigned int major;
37 unsigned int baseminor;
38 int minorct;
39 char name[64];
40 struct file_operations *fops;
41 struct cdev *cdev; /* will die */
42 } *chrdevs[MAX_PROBE_HASH];
43
44 /* index in the above */
45 static inline int major_to_index(int major)
46 {
47 return major % MAX_PROBE_HASH;
48 }
49
50 struct chrdev_info {
51 int index;
52 struct char_device_struct *cd;
53 };
54
55 void *get_next_chrdev(void *dev)
56 {
57 struct chrdev_info *info;
58
59 if (dev == NULL) {
60 info = kmalloc(sizeof(*info), GFP_KERNEL);
61 if (!info)
62 goto out;
63 info->index=0;
64 info->cd = chrdevs[info->index];
65 if (info->cd)
66 goto out;
67 } else {
68 info = dev;
69 }
70
71 while (info->index < ARRAY_SIZE(chrdevs)) {
72 if (info->cd)
73 info->cd = info->cd->next;
74 if (info->cd)
75 goto out;
76 /*
77 * No devices on this chain, move to the next
78 */
79 info->index++;
80 info->cd = (info->index < ARRAY_SIZE(chrdevs)) ?
81 chrdevs[info->index] : NULL;
82 if (info->cd)
83 goto out;
84 }
85
86 out:
87 return info;
88 }
89
90 void *acquire_chrdev_list(void)
91 {
92 mutex_lock(&chrdevs_lock);
93 return get_next_chrdev(NULL);
94 }
95
96 void release_chrdev_list(void *dev)
97 {
98 mutex_unlock(&chrdevs_lock);
99 kfree(dev);
100 }
101
102
103 int count_chrdev_list(void)
104 {
105 struct char_device_struct *cd;
106 int i, count;
107
108 count = 0;
109
110 for (i = 0; i < ARRAY_SIZE(chrdevs) ; i++) {
111 for (cd = chrdevs[i]; cd; cd = cd->next)
112 count++;
113 }
114
115 return count;
116 }
117
118 int get_chrdev_info(void *dev, int *major, char **name)
119 {
120 struct chrdev_info *info = dev;
121
122 if (info->cd == NULL)
123 return 1;
124
125 *major = info->cd->major;
126 *name = info->cd->name;
127 return 0;
128 }
129
130 /*
131 * Register a single major with a specified minor range.
132 *
133 * If major == 0 this functions will dynamically allocate a major and return
134 * its number.
135 *
136 * If major > 0 this function will attempt to reserve the passed range of
137 * minors and will return zero on success.
138 *
139 * Returns a -ve errno on failure.
140 */
141 static struct char_device_struct *
142 __register_chrdev_region(unsigned int major, unsigned int baseminor,
143 int minorct, const char *name)
144 {
145 struct char_device_struct *cd, **cp;
146 int ret = 0;
147 int i;
148
149 cd = kmalloc(sizeof(struct char_device_struct), GFP_KERNEL);
150 if (cd == NULL)
151 return ERR_PTR(-ENOMEM);
152
153 memset(cd, 0, sizeof(struct char_device_struct));
154
155 mutex_lock(&chrdevs_lock);
156
157 /* temporary */
158 if (major == 0) {
159 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
160 if (chrdevs[i] == NULL)
161 break;
162 }
163
164 if (i == 0) {
165 ret = -EBUSY;
166 goto out;
167 }
168 major = i;
169 ret = major;
170 }
171
172 cd->major = major;
173 cd->baseminor = baseminor;
174 cd->minorct = minorct;
175 strncpy(cd->name,name, 64);
176
177 i = major_to_index(major);
178
179 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
180 if ((*cp)->major > major ||
181 ((*cp)->major == major && (*cp)->baseminor >= baseminor))
182 break;
183 if (*cp && (*cp)->major == major &&
184 (*cp)->baseminor < baseminor + minorct) {
185 ret = -EBUSY;
186 goto out;
187 }
188 cd->next = *cp;
189 *cp = cd;
190 mutex_unlock(&chrdevs_lock);
191 return cd;
192 out:
193 mutex_unlock(&chrdevs_lock);
194 kfree(cd);
195 return ERR_PTR(ret);
196 }
197
198 static struct char_device_struct *
199 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
200 {
201 struct char_device_struct *cd = NULL, **cp;
202 int i = major_to_index(major);
203
204 mutex_lock(&chrdevs_lock);
205 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
206 if ((*cp)->major == major &&
207 (*cp)->baseminor == baseminor &&
208 (*cp)->minorct == minorct)
209 break;
210 if (*cp) {
211 cd = *cp;
212 *cp = cd->next;
213 }
214 mutex_unlock(&chrdevs_lock);
215 return cd;
216 }
217
218 int register_chrdev_region(dev_t from, unsigned count, const char *name)
219 {
220 struct char_device_struct *cd;
221 dev_t to = from + count;
222 dev_t n, next;
223
224 for (n = from; n < to; n = next) {
225 next = MKDEV(MAJOR(n)+1, 0);
226 if (next > to)
227 next = to;
228 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
229 next - n, name);
230 if (IS_ERR(cd))
231 goto fail;
232 }
233 return 0;
234 fail:
235 to = n;
236 for (n = from; n < to; n = next) {
237 next = MKDEV(MAJOR(n)+1, 0);
238 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
239 }
240 return PTR_ERR(cd);
241 }
242
243 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
244 const char *name)
245 {
246 struct char_device_struct *cd;
247 cd = __register_chrdev_region(0, baseminor, count, name);
248 if (IS_ERR(cd))
249 return PTR_ERR(cd);
250 *dev = MKDEV(cd->major, cd->baseminor);
251 return 0;
252 }
253
254 int register_chrdev(unsigned int major, const char *name,
255 struct file_operations *fops)
256 {
257 struct char_device_struct *cd;
258 struct cdev *cdev;
259 char *s;
260 int err = -ENOMEM;
261
262 cd = __register_chrdev_region(major, 0, 256, name);
263 if (IS_ERR(cd))
264 return PTR_ERR(cd);
265
266 cdev = cdev_alloc();
267 if (!cdev)
268 goto out2;
269
270 cdev->owner = fops->owner;
271 cdev->ops = fops;
272 kobject_set_name(&cdev->kobj, "%s", name);
273 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
274 *s = '!';
275
276 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
277 if (err)
278 goto out;
279
280 cd->cdev = cdev;
281
282 return major ? 0 : cd->major;
283 out:
284 kobject_put(&cdev->kobj);
285 out2:
286 kfree(__unregister_chrdev_region(cd->major, 0, 256));
287 return err;
288 }
289
290 void unregister_chrdev_region(dev_t from, unsigned count)
291 {
292 dev_t to = from + count;
293 dev_t n, next;
294
295 for (n = from; n < to; n = next) {
296 next = MKDEV(MAJOR(n)+1, 0);
297 if (next > to)
298 next = to;
299 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
300 }
301 }
302
303 int unregister_chrdev(unsigned int major, const char *name)
304 {
305 struct char_device_struct *cd;
306 cd = __unregister_chrdev_region(major, 0, 256);
307 if (cd && cd->cdev)
308 cdev_del(cd->cdev);
309 kfree(cd);
310 return 0;
311 }
312
313 static DEFINE_SPINLOCK(cdev_lock);
314
315 static struct kobject *cdev_get(struct cdev *p)
316 {
317 struct module *owner = p->owner;
318 struct kobject *kobj;
319
320 if (owner && !try_module_get(owner))
321 return NULL;
322 kobj = kobject_get(&p->kobj);
323 if (!kobj)
324 module_put(owner);
325 return kobj;
326 }
327
328 void cdev_put(struct cdev *p)
329 {
330 if (p) {
331 struct module *owner = p->owner;
332 kobject_put(&p->kobj);
333 module_put(owner);
334 }
335 }
336
337 /*
338 * Called every time a character special file is opened
339 */
340 int chrdev_open(struct inode * inode, struct file * filp)
341 {
342 struct cdev *p;
343 struct cdev *new = NULL;
344 int ret = 0;
345
346 spin_lock(&cdev_lock);
347 p = inode->i_cdev;
348 if (!p) {
349 struct kobject *kobj;
350 int idx;
351 spin_unlock(&cdev_lock);
352 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
353 if (!kobj)
354 return -ENXIO;
355 new = container_of(kobj, struct cdev, kobj);
356 spin_lock(&cdev_lock);
357 p = inode->i_cdev;
358 if (!p) {
359 inode->i_cdev = p = new;
360 inode->i_cindex = idx;
361 list_add(&inode->i_devices, &p->list);
362 new = NULL;
363 } else if (!cdev_get(p))
364 ret = -ENXIO;
365 } else if (!cdev_get(p))
366 ret = -ENXIO;
367 spin_unlock(&cdev_lock);
368 cdev_put(new);
369 if (ret)
370 return ret;
371 filp->f_op = fops_get(p->ops);
372 if (!filp->f_op) {
373 cdev_put(p);
374 return -ENXIO;
375 }
376 if (filp->f_op->open) {
377 lock_kernel();
378 ret = filp->f_op->open(inode,filp);
379 unlock_kernel();
380 }
381 if (ret)
382 cdev_put(p);
383 return ret;
384 }
385
386 void cd_forget(struct inode *inode)
387 {
388 spin_lock(&cdev_lock);
389 list_del_init(&inode->i_devices);
390 inode->i_cdev = NULL;
391 spin_unlock(&cdev_lock);
392 }
393
394 static void cdev_purge(struct cdev *cdev)
395 {
396 spin_lock(&cdev_lock);
397 while (!list_empty(&cdev->list)) {
398 struct inode *inode;
399 inode = container_of(cdev->list.next, struct inode, i_devices);
400 list_del_init(&inode->i_devices);
401 inode->i_cdev = NULL;
402 }
403 spin_unlock(&cdev_lock);
404 }
405
406 /*
407 * Dummy default file-operations: the only thing this does
408 * is contain the open that then fills in the correct operations
409 * depending on the special file...
410 */
411 struct file_operations def_chr_fops = {
412 .open = chrdev_open,
413 };
414
415 static struct kobject *exact_match(dev_t dev, int *part, void *data)
416 {
417 struct cdev *p = data;
418 return &p->kobj;
419 }
420
421 static int exact_lock(dev_t dev, void *data)
422 {
423 struct cdev *p = data;
424 return cdev_get(p) ? 0 : -1;
425 }
426
427 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
428 {
429 p->dev = dev;
430 p->count = count;
431 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
432 }
433
434 static void cdev_unmap(dev_t dev, unsigned count)
435 {
436 kobj_unmap(cdev_map, dev, count);
437 }
438
439 void cdev_del(struct cdev *p)
440 {
441 cdev_unmap(p->dev, p->count);
442 kobject_put(&p->kobj);
443 }
444
445
446 static void cdev_default_release(struct kobject *kobj)
447 {
448 struct cdev *p = container_of(kobj, struct cdev, kobj);
449 cdev_purge(p);
450 }
451
452 static void cdev_dynamic_release(struct kobject *kobj)
453 {
454 struct cdev *p = container_of(kobj, struct cdev, kobj);
455 cdev_purge(p);
456 kfree(p);
457 }
458
459 static struct kobj_type ktype_cdev_default = {
460 .release = cdev_default_release,
461 };
462
463 static struct kobj_type ktype_cdev_dynamic = {
464 .release = cdev_dynamic_release,
465 };
466
467 struct cdev *cdev_alloc(void)
468 {
469 struct cdev *p = kmalloc(sizeof(struct cdev), GFP_KERNEL);
470 if (p) {
471 memset(p, 0, sizeof(struct cdev));
472 p->kobj.ktype = &ktype_cdev_dynamic;
473 INIT_LIST_HEAD(&p->list);
474 kobject_init(&p->kobj);
475 }
476 return p;
477 }
478
479 void cdev_init(struct cdev *cdev, struct file_operations *fops)
480 {
481 memset(cdev, 0, sizeof *cdev);
482 INIT_LIST_HEAD(&cdev->list);
483 cdev->kobj.ktype = &ktype_cdev_default;
484 kobject_init(&cdev->kobj);
485 cdev->ops = fops;
486 }
487
488 static struct kobject *base_probe(dev_t dev, int *part, void *data)
489 {
490 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
491 /* Make old-style 2.4 aliases work */
492 request_module("char-major-%d", MAJOR(dev));
493 return NULL;
494 }
495
496 void __init chrdev_init(void)
497 {
498 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
499 }
500
501
502 /* Let modules do char dev stuff */
503 EXPORT_SYMBOL(register_chrdev_region);
504 EXPORT_SYMBOL(unregister_chrdev_region);
505 EXPORT_SYMBOL(alloc_chrdev_region);
506 EXPORT_SYMBOL(cdev_init);
507 EXPORT_SYMBOL(cdev_alloc);
508 EXPORT_SYMBOL(cdev_del);
509 EXPORT_SYMBOL(cdev_add);
510 EXPORT_SYMBOL(register_chrdev);
511 EXPORT_SYMBOL(unregister_chrdev);
This page took 0.042695 seconds and 6 git commands to generate.