Merge branch 'fix/misc' into for-linus
[deliverable/linux.git] / drivers / mtd / mtdchar.c
1 /*
2 * Character-device access to raw MTD devices.
3 *
4 */
5
6 #include <linux/device.h>
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/backing-dev.h>
17 #include <linux/compat.h>
18 #include <linux/mount.h>
19
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/compatmac.h>
22
23 #include <asm/uaccess.h>
24
25 #define MTD_INODE_FS_MAGIC 0x11307854
26 static struct vfsmount *mtd_inode_mnt __read_mostly;
27
28 /*
29 * Data structure to hold the pointer to the mtd device as well
30 * as mode information ofr various use cases.
31 */
32 struct mtd_file_info {
33 struct mtd_info *mtd;
34 struct inode *ino;
35 enum mtd_file_modes mode;
36 };
37
38 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
39 {
40 struct mtd_file_info *mfi = file->private_data;
41 struct mtd_info *mtd = mfi->mtd;
42
43 switch (orig) {
44 case SEEK_SET:
45 break;
46 case SEEK_CUR:
47 offset += file->f_pos;
48 break;
49 case SEEK_END:
50 offset += mtd->size;
51 break;
52 default:
53 return -EINVAL;
54 }
55
56 if (offset >= 0 && offset <= mtd->size)
57 return file->f_pos = offset;
58
59 return -EINVAL;
60 }
61
62
63
64 static int mtd_open(struct inode *inode, struct file *file)
65 {
66 int minor = iminor(inode);
67 int devnum = minor >> 1;
68 int ret = 0;
69 struct mtd_info *mtd;
70 struct mtd_file_info *mfi;
71 struct inode *mtd_ino;
72
73 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
74
75 /* You can't open the RO devices RW */
76 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
77 return -EACCES;
78
79 lock_kernel();
80 mtd = get_mtd_device(NULL, devnum);
81
82 if (IS_ERR(mtd)) {
83 ret = PTR_ERR(mtd);
84 goto out;
85 }
86
87 if (mtd->type == MTD_ABSENT) {
88 put_mtd_device(mtd);
89 ret = -ENODEV;
90 goto out;
91 }
92
93 mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
94 if (!mtd_ino) {
95 put_mtd_device(mtd);
96 ret = -ENOMEM;
97 goto out;
98 }
99 if (mtd_ino->i_state & I_NEW) {
100 mtd_ino->i_private = mtd;
101 mtd_ino->i_mode = S_IFCHR;
102 mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
103 unlock_new_inode(mtd_ino);
104 }
105 file->f_mapping = mtd_ino->i_mapping;
106
107 /* You can't open it RW if it's not a writeable device */
108 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
109 iput(mtd_ino);
110 put_mtd_device(mtd);
111 ret = -EACCES;
112 goto out;
113 }
114
115 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
116 if (!mfi) {
117 iput(mtd_ino);
118 put_mtd_device(mtd);
119 ret = -ENOMEM;
120 goto out;
121 }
122 mfi->ino = mtd_ino;
123 mfi->mtd = mtd;
124 file->private_data = mfi;
125
126 out:
127 unlock_kernel();
128 return ret;
129 } /* mtd_open */
130
131 /*====================================================================*/
132
133 static int mtd_close(struct inode *inode, struct file *file)
134 {
135 struct mtd_file_info *mfi = file->private_data;
136 struct mtd_info *mtd = mfi->mtd;
137
138 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
139
140 /* Only sync if opened RW */
141 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
142 mtd->sync(mtd);
143
144 iput(mfi->ino);
145
146 put_mtd_device(mtd);
147 file->private_data = NULL;
148 kfree(mfi);
149
150 return 0;
151 } /* mtd_close */
152
153 /* FIXME: This _really_ needs to die. In 2.5, we should lock the
154 userspace buffer down and use it directly with readv/writev.
155 */
156 #define MAX_KMALLOC_SIZE 0x20000
157
158 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
159 {
160 struct mtd_file_info *mfi = file->private_data;
161 struct mtd_info *mtd = mfi->mtd;
162 size_t retlen=0;
163 size_t total_retlen=0;
164 int ret=0;
165 int len;
166 char *kbuf;
167
168 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
169
170 if (*ppos + count > mtd->size)
171 count = mtd->size - *ppos;
172
173 if (!count)
174 return 0;
175
176 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
177 and pass them directly to the MTD functions */
178
179 if (count > MAX_KMALLOC_SIZE)
180 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
181 else
182 kbuf=kmalloc(count, GFP_KERNEL);
183
184 if (!kbuf)
185 return -ENOMEM;
186
187 while (count) {
188
189 if (count > MAX_KMALLOC_SIZE)
190 len = MAX_KMALLOC_SIZE;
191 else
192 len = count;
193
194 switch (mfi->mode) {
195 case MTD_MODE_OTP_FACTORY:
196 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
197 break;
198 case MTD_MODE_OTP_USER:
199 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
200 break;
201 case MTD_MODE_RAW:
202 {
203 struct mtd_oob_ops ops;
204
205 ops.mode = MTD_OOB_RAW;
206 ops.datbuf = kbuf;
207 ops.oobbuf = NULL;
208 ops.len = len;
209
210 ret = mtd->read_oob(mtd, *ppos, &ops);
211 retlen = ops.retlen;
212 break;
213 }
214 default:
215 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
216 }
217 /* Nand returns -EBADMSG on ecc errors, but it returns
218 * the data. For our userspace tools it is important
219 * to dump areas with ecc errors !
220 * For kernel internal usage it also might return -EUCLEAN
221 * to signal the caller that a bitflip has occured and has
222 * been corrected by the ECC algorithm.
223 * Userspace software which accesses NAND this way
224 * must be aware of the fact that it deals with NAND
225 */
226 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
227 *ppos += retlen;
228 if (copy_to_user(buf, kbuf, retlen)) {
229 kfree(kbuf);
230 return -EFAULT;
231 }
232 else
233 total_retlen += retlen;
234
235 count -= retlen;
236 buf += retlen;
237 if (retlen == 0)
238 count = 0;
239 }
240 else {
241 kfree(kbuf);
242 return ret;
243 }
244
245 }
246
247 kfree(kbuf);
248 return total_retlen;
249 } /* mtd_read */
250
251 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
252 {
253 struct mtd_file_info *mfi = file->private_data;
254 struct mtd_info *mtd = mfi->mtd;
255 char *kbuf;
256 size_t retlen;
257 size_t total_retlen=0;
258 int ret=0;
259 int len;
260
261 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
262
263 if (*ppos == mtd->size)
264 return -ENOSPC;
265
266 if (*ppos + count > mtd->size)
267 count = mtd->size - *ppos;
268
269 if (!count)
270 return 0;
271
272 if (count > MAX_KMALLOC_SIZE)
273 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
274 else
275 kbuf=kmalloc(count, GFP_KERNEL);
276
277 if (!kbuf)
278 return -ENOMEM;
279
280 while (count) {
281
282 if (count > MAX_KMALLOC_SIZE)
283 len = MAX_KMALLOC_SIZE;
284 else
285 len = count;
286
287 if (copy_from_user(kbuf, buf, len)) {
288 kfree(kbuf);
289 return -EFAULT;
290 }
291
292 switch (mfi->mode) {
293 case MTD_MODE_OTP_FACTORY:
294 ret = -EROFS;
295 break;
296 case MTD_MODE_OTP_USER:
297 if (!mtd->write_user_prot_reg) {
298 ret = -EOPNOTSUPP;
299 break;
300 }
301 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
302 break;
303
304 case MTD_MODE_RAW:
305 {
306 struct mtd_oob_ops ops;
307
308 ops.mode = MTD_OOB_RAW;
309 ops.datbuf = kbuf;
310 ops.oobbuf = NULL;
311 ops.len = len;
312
313 ret = mtd->write_oob(mtd, *ppos, &ops);
314 retlen = ops.retlen;
315 break;
316 }
317
318 default:
319 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
320 }
321 if (!ret) {
322 *ppos += retlen;
323 total_retlen += retlen;
324 count -= retlen;
325 buf += retlen;
326 }
327 else {
328 kfree(kbuf);
329 return ret;
330 }
331 }
332
333 kfree(kbuf);
334 return total_retlen;
335 } /* mtd_write */
336
337 /*======================================================================
338
339 IOCTL calls for getting device parameters.
340
341 ======================================================================*/
342 static void mtdchar_erase_callback (struct erase_info *instr)
343 {
344 wake_up((wait_queue_head_t *)instr->priv);
345 }
346
347 #ifdef CONFIG_HAVE_MTD_OTP
348 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
349 {
350 struct mtd_info *mtd = mfi->mtd;
351 int ret = 0;
352
353 switch (mode) {
354 case MTD_OTP_FACTORY:
355 if (!mtd->read_fact_prot_reg)
356 ret = -EOPNOTSUPP;
357 else
358 mfi->mode = MTD_MODE_OTP_FACTORY;
359 break;
360 case MTD_OTP_USER:
361 if (!mtd->read_fact_prot_reg)
362 ret = -EOPNOTSUPP;
363 else
364 mfi->mode = MTD_MODE_OTP_USER;
365 break;
366 default:
367 ret = -EINVAL;
368 case MTD_OTP_OFF:
369 break;
370 }
371 return ret;
372 }
373 #else
374 # define otp_select_filemode(f,m) -EOPNOTSUPP
375 #endif
376
377 static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
378 uint64_t start, uint32_t length, void __user *ptr,
379 uint32_t __user *retp)
380 {
381 struct mtd_oob_ops ops;
382 uint32_t retlen;
383 int ret = 0;
384
385 if (!(file->f_mode & FMODE_WRITE))
386 return -EPERM;
387
388 if (length > 4096)
389 return -EINVAL;
390
391 if (!mtd->write_oob)
392 ret = -EOPNOTSUPP;
393 else
394 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
395
396 if (ret)
397 return ret;
398
399 ops.ooblen = length;
400 ops.ooboffs = start & (mtd->oobsize - 1);
401 ops.datbuf = NULL;
402 ops.mode = MTD_OOB_PLACE;
403
404 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
405 return -EINVAL;
406
407 ops.oobbuf = memdup_user(ptr, length);
408 if (IS_ERR(ops.oobbuf))
409 return PTR_ERR(ops.oobbuf);
410
411 start &= ~((uint64_t)mtd->oobsize - 1);
412 ret = mtd->write_oob(mtd, start, &ops);
413
414 if (ops.oobretlen > 0xFFFFFFFFU)
415 ret = -EOVERFLOW;
416 retlen = ops.oobretlen;
417 if (copy_to_user(retp, &retlen, sizeof(length)))
418 ret = -EFAULT;
419
420 kfree(ops.oobbuf);
421 return ret;
422 }
423
424 static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
425 uint32_t length, void __user *ptr, uint32_t __user *retp)
426 {
427 struct mtd_oob_ops ops;
428 int ret = 0;
429
430 if (length > 4096)
431 return -EINVAL;
432
433 if (!mtd->read_oob)
434 ret = -EOPNOTSUPP;
435 else
436 ret = access_ok(VERIFY_WRITE, ptr,
437 length) ? 0 : -EFAULT;
438 if (ret)
439 return ret;
440
441 ops.ooblen = length;
442 ops.ooboffs = start & (mtd->oobsize - 1);
443 ops.datbuf = NULL;
444 ops.mode = MTD_OOB_PLACE;
445
446 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
447 return -EINVAL;
448
449 ops.oobbuf = kmalloc(length, GFP_KERNEL);
450 if (!ops.oobbuf)
451 return -ENOMEM;
452
453 start &= ~((uint64_t)mtd->oobsize - 1);
454 ret = mtd->read_oob(mtd, start, &ops);
455
456 if (put_user(ops.oobretlen, retp))
457 ret = -EFAULT;
458 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
459 ops.oobretlen))
460 ret = -EFAULT;
461
462 kfree(ops.oobbuf);
463 return ret;
464 }
465
466 static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
467 {
468 struct mtd_file_info *mfi = file->private_data;
469 struct mtd_info *mtd = mfi->mtd;
470 void __user *argp = (void __user *)arg;
471 int ret = 0;
472 u_long size;
473 struct mtd_info_user info;
474
475 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
476
477 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
478 if (cmd & IOC_IN) {
479 if (!access_ok(VERIFY_READ, argp, size))
480 return -EFAULT;
481 }
482 if (cmd & IOC_OUT) {
483 if (!access_ok(VERIFY_WRITE, argp, size))
484 return -EFAULT;
485 }
486
487 switch (cmd) {
488 case MEMGETREGIONCOUNT:
489 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
490 return -EFAULT;
491 break;
492
493 case MEMGETREGIONINFO:
494 {
495 uint32_t ur_idx;
496 struct mtd_erase_region_info *kr;
497 struct region_info_user __user *ur = argp;
498
499 if (get_user(ur_idx, &(ur->regionindex)))
500 return -EFAULT;
501
502 kr = &(mtd->eraseregions[ur_idx]);
503
504 if (put_user(kr->offset, &(ur->offset))
505 || put_user(kr->erasesize, &(ur->erasesize))
506 || put_user(kr->numblocks, &(ur->numblocks)))
507 return -EFAULT;
508
509 break;
510 }
511
512 case MEMGETINFO:
513 info.type = mtd->type;
514 info.flags = mtd->flags;
515 info.size = mtd->size;
516 info.erasesize = mtd->erasesize;
517 info.writesize = mtd->writesize;
518 info.oobsize = mtd->oobsize;
519 /* The below fields are obsolete */
520 info.ecctype = -1;
521 info.eccsize = 0;
522 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
523 return -EFAULT;
524 break;
525
526 case MEMERASE:
527 case MEMERASE64:
528 {
529 struct erase_info *erase;
530
531 if(!(file->f_mode & FMODE_WRITE))
532 return -EPERM;
533
534 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
535 if (!erase)
536 ret = -ENOMEM;
537 else {
538 wait_queue_head_t waitq;
539 DECLARE_WAITQUEUE(wait, current);
540
541 init_waitqueue_head(&waitq);
542
543 if (cmd == MEMERASE64) {
544 struct erase_info_user64 einfo64;
545
546 if (copy_from_user(&einfo64, argp,
547 sizeof(struct erase_info_user64))) {
548 kfree(erase);
549 return -EFAULT;
550 }
551 erase->addr = einfo64.start;
552 erase->len = einfo64.length;
553 } else {
554 struct erase_info_user einfo32;
555
556 if (copy_from_user(&einfo32, argp,
557 sizeof(struct erase_info_user))) {
558 kfree(erase);
559 return -EFAULT;
560 }
561 erase->addr = einfo32.start;
562 erase->len = einfo32.length;
563 }
564 erase->mtd = mtd;
565 erase->callback = mtdchar_erase_callback;
566 erase->priv = (unsigned long)&waitq;
567
568 /*
569 FIXME: Allow INTERRUPTIBLE. Which means
570 not having the wait_queue head on the stack.
571
572 If the wq_head is on the stack, and we
573 leave because we got interrupted, then the
574 wq_head is no longer there when the
575 callback routine tries to wake us up.
576 */
577 ret = mtd->erase(mtd, erase);
578 if (!ret) {
579 set_current_state(TASK_UNINTERRUPTIBLE);
580 add_wait_queue(&waitq, &wait);
581 if (erase->state != MTD_ERASE_DONE &&
582 erase->state != MTD_ERASE_FAILED)
583 schedule();
584 remove_wait_queue(&waitq, &wait);
585 set_current_state(TASK_RUNNING);
586
587 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
588 }
589 kfree(erase);
590 }
591 break;
592 }
593
594 case MEMWRITEOOB:
595 {
596 struct mtd_oob_buf buf;
597 struct mtd_oob_buf __user *buf_user = argp;
598
599 /* NOTE: writes return length to buf_user->length */
600 if (copy_from_user(&buf, argp, sizeof(buf)))
601 ret = -EFAULT;
602 else
603 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
604 buf.ptr, &buf_user->length);
605 break;
606 }
607
608 case MEMREADOOB:
609 {
610 struct mtd_oob_buf buf;
611 struct mtd_oob_buf __user *buf_user = argp;
612
613 /* NOTE: writes return length to buf_user->start */
614 if (copy_from_user(&buf, argp, sizeof(buf)))
615 ret = -EFAULT;
616 else
617 ret = mtd_do_readoob(mtd, buf.start, buf.length,
618 buf.ptr, &buf_user->start);
619 break;
620 }
621
622 case MEMWRITEOOB64:
623 {
624 struct mtd_oob_buf64 buf;
625 struct mtd_oob_buf64 __user *buf_user = argp;
626
627 if (copy_from_user(&buf, argp, sizeof(buf)))
628 ret = -EFAULT;
629 else
630 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
631 (void __user *)(uintptr_t)buf.usr_ptr,
632 &buf_user->length);
633 break;
634 }
635
636 case MEMREADOOB64:
637 {
638 struct mtd_oob_buf64 buf;
639 struct mtd_oob_buf64 __user *buf_user = argp;
640
641 if (copy_from_user(&buf, argp, sizeof(buf)))
642 ret = -EFAULT;
643 else
644 ret = mtd_do_readoob(mtd, buf.start, buf.length,
645 (void __user *)(uintptr_t)buf.usr_ptr,
646 &buf_user->length);
647 break;
648 }
649
650 case MEMLOCK:
651 {
652 struct erase_info_user einfo;
653
654 if (copy_from_user(&einfo, argp, sizeof(einfo)))
655 return -EFAULT;
656
657 if (!mtd->lock)
658 ret = -EOPNOTSUPP;
659 else
660 ret = mtd->lock(mtd, einfo.start, einfo.length);
661 break;
662 }
663
664 case MEMUNLOCK:
665 {
666 struct erase_info_user einfo;
667
668 if (copy_from_user(&einfo, argp, sizeof(einfo)))
669 return -EFAULT;
670
671 if (!mtd->unlock)
672 ret = -EOPNOTSUPP;
673 else
674 ret = mtd->unlock(mtd, einfo.start, einfo.length);
675 break;
676 }
677
678 /* Legacy interface */
679 case MEMGETOOBSEL:
680 {
681 struct nand_oobinfo oi;
682
683 if (!mtd->ecclayout)
684 return -EOPNOTSUPP;
685 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
686 return -EINVAL;
687
688 oi.useecc = MTD_NANDECC_AUTOPLACE;
689 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
690 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
691 sizeof(oi.oobfree));
692 oi.eccbytes = mtd->ecclayout->eccbytes;
693
694 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
695 return -EFAULT;
696 break;
697 }
698
699 case MEMGETBADBLOCK:
700 {
701 loff_t offs;
702
703 if (copy_from_user(&offs, argp, sizeof(loff_t)))
704 return -EFAULT;
705 if (!mtd->block_isbad)
706 ret = -EOPNOTSUPP;
707 else
708 return mtd->block_isbad(mtd, offs);
709 break;
710 }
711
712 case MEMSETBADBLOCK:
713 {
714 loff_t offs;
715
716 if (copy_from_user(&offs, argp, sizeof(loff_t)))
717 return -EFAULT;
718 if (!mtd->block_markbad)
719 ret = -EOPNOTSUPP;
720 else
721 return mtd->block_markbad(mtd, offs);
722 break;
723 }
724
725 #ifdef CONFIG_HAVE_MTD_OTP
726 case OTPSELECT:
727 {
728 int mode;
729 if (copy_from_user(&mode, argp, sizeof(int)))
730 return -EFAULT;
731
732 mfi->mode = MTD_MODE_NORMAL;
733
734 ret = otp_select_filemode(mfi, mode);
735
736 file->f_pos = 0;
737 break;
738 }
739
740 case OTPGETREGIONCOUNT:
741 case OTPGETREGIONINFO:
742 {
743 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
744 if (!buf)
745 return -ENOMEM;
746 ret = -EOPNOTSUPP;
747 switch (mfi->mode) {
748 case MTD_MODE_OTP_FACTORY:
749 if (mtd->get_fact_prot_info)
750 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
751 break;
752 case MTD_MODE_OTP_USER:
753 if (mtd->get_user_prot_info)
754 ret = mtd->get_user_prot_info(mtd, buf, 4096);
755 break;
756 default:
757 break;
758 }
759 if (ret >= 0) {
760 if (cmd == OTPGETREGIONCOUNT) {
761 int nbr = ret / sizeof(struct otp_info);
762 ret = copy_to_user(argp, &nbr, sizeof(int));
763 } else
764 ret = copy_to_user(argp, buf, ret);
765 if (ret)
766 ret = -EFAULT;
767 }
768 kfree(buf);
769 break;
770 }
771
772 case OTPLOCK:
773 {
774 struct otp_info oinfo;
775
776 if (mfi->mode != MTD_MODE_OTP_USER)
777 return -EINVAL;
778 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
779 return -EFAULT;
780 if (!mtd->lock_user_prot_reg)
781 return -EOPNOTSUPP;
782 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
783 break;
784 }
785 #endif
786
787 case ECCGETLAYOUT:
788 {
789 if (!mtd->ecclayout)
790 return -EOPNOTSUPP;
791
792 if (copy_to_user(argp, mtd->ecclayout,
793 sizeof(struct nand_ecclayout)))
794 return -EFAULT;
795 break;
796 }
797
798 case ECCGETSTATS:
799 {
800 if (copy_to_user(argp, &mtd->ecc_stats,
801 sizeof(struct mtd_ecc_stats)))
802 return -EFAULT;
803 break;
804 }
805
806 case MTDFILEMODE:
807 {
808 mfi->mode = 0;
809
810 switch(arg) {
811 case MTD_MODE_OTP_FACTORY:
812 case MTD_MODE_OTP_USER:
813 ret = otp_select_filemode(mfi, arg);
814 break;
815
816 case MTD_MODE_RAW:
817 if (!mtd->read_oob || !mtd->write_oob)
818 return -EOPNOTSUPP;
819 mfi->mode = arg;
820
821 case MTD_MODE_NORMAL:
822 break;
823 default:
824 ret = -EINVAL;
825 }
826 file->f_pos = 0;
827 break;
828 }
829
830 default:
831 ret = -ENOTTY;
832 }
833
834 return ret;
835 } /* memory_ioctl */
836
837 static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
838 {
839 int ret;
840
841 lock_kernel();
842 ret = mtd_ioctl(file, cmd, arg);
843 unlock_kernel();
844
845 return ret;
846 }
847
848 #ifdef CONFIG_COMPAT
849
850 struct mtd_oob_buf32 {
851 u_int32_t start;
852 u_int32_t length;
853 compat_caddr_t ptr; /* unsigned char* */
854 };
855
856 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
857 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
858
859 static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
860 unsigned long arg)
861 {
862 struct mtd_file_info *mfi = file->private_data;
863 struct mtd_info *mtd = mfi->mtd;
864 void __user *argp = compat_ptr(arg);
865 int ret = 0;
866
867 lock_kernel();
868
869 switch (cmd) {
870 case MEMWRITEOOB32:
871 {
872 struct mtd_oob_buf32 buf;
873 struct mtd_oob_buf32 __user *buf_user = argp;
874
875 if (copy_from_user(&buf, argp, sizeof(buf)))
876 ret = -EFAULT;
877 else
878 ret = mtd_do_writeoob(file, mtd, buf.start,
879 buf.length, compat_ptr(buf.ptr),
880 &buf_user->length);
881 break;
882 }
883
884 case MEMREADOOB32:
885 {
886 struct mtd_oob_buf32 buf;
887 struct mtd_oob_buf32 __user *buf_user = argp;
888
889 /* NOTE: writes return length to buf->start */
890 if (copy_from_user(&buf, argp, sizeof(buf)))
891 ret = -EFAULT;
892 else
893 ret = mtd_do_readoob(mtd, buf.start,
894 buf.length, compat_ptr(buf.ptr),
895 &buf_user->start);
896 break;
897 }
898 default:
899 ret = mtd_ioctl(file, cmd, (unsigned long)argp);
900 }
901
902 unlock_kernel();
903
904 return ret;
905 }
906
907 #endif /* CONFIG_COMPAT */
908
909 /*
910 * try to determine where a shared mapping can be made
911 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
912 * mappings)
913 */
914 #ifndef CONFIG_MMU
915 static unsigned long mtd_get_unmapped_area(struct file *file,
916 unsigned long addr,
917 unsigned long len,
918 unsigned long pgoff,
919 unsigned long flags)
920 {
921 struct mtd_file_info *mfi = file->private_data;
922 struct mtd_info *mtd = mfi->mtd;
923
924 if (mtd->get_unmapped_area) {
925 unsigned long offset;
926
927 if (addr != 0)
928 return (unsigned long) -EINVAL;
929
930 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
931 return (unsigned long) -EINVAL;
932
933 offset = pgoff << PAGE_SHIFT;
934 if (offset > mtd->size - len)
935 return (unsigned long) -EINVAL;
936
937 return mtd->get_unmapped_area(mtd, len, offset, flags);
938 }
939
940 /* can't map directly */
941 return (unsigned long) -ENOSYS;
942 }
943 #endif
944
945 /*
946 * set up a mapping for shared memory segments
947 */
948 static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
949 {
950 #ifdef CONFIG_MMU
951 struct mtd_file_info *mfi = file->private_data;
952 struct mtd_info *mtd = mfi->mtd;
953
954 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
955 return 0;
956 return -ENOSYS;
957 #else
958 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
959 #endif
960 }
961
962 static const struct file_operations mtd_fops = {
963 .owner = THIS_MODULE,
964 .llseek = mtd_lseek,
965 .read = mtd_read,
966 .write = mtd_write,
967 .unlocked_ioctl = mtd_unlocked_ioctl,
968 #ifdef CONFIG_COMPAT
969 .compat_ioctl = mtd_compat_ioctl,
970 #endif
971 .open = mtd_open,
972 .release = mtd_close,
973 .mmap = mtd_mmap,
974 #ifndef CONFIG_MMU
975 .get_unmapped_area = mtd_get_unmapped_area,
976 #endif
977 };
978
979 static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
980 const char *dev_name, void *data,
981 struct vfsmount *mnt)
982 {
983 return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
984 mnt);
985 }
986
987 static struct file_system_type mtd_inodefs_type = {
988 .name = "mtd_inodefs",
989 .get_sb = mtd_inodefs_get_sb,
990 .kill_sb = kill_anon_super,
991 };
992
993 static void mtdchar_notify_add(struct mtd_info *mtd)
994 {
995 }
996
997 static void mtdchar_notify_remove(struct mtd_info *mtd)
998 {
999 struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
1000
1001 if (mtd_ino) {
1002 /* Destroy the inode if it exists */
1003 mtd_ino->i_nlink = 0;
1004 iput(mtd_ino);
1005 }
1006 }
1007
1008 static struct mtd_notifier mtdchar_notifier = {
1009 .add = mtdchar_notify_add,
1010 .remove = mtdchar_notify_remove,
1011 };
1012
1013 static int __init init_mtdchar(void)
1014 {
1015 int ret;
1016
1017 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1018 "mtd", &mtd_fops);
1019 if (ret < 0) {
1020 pr_notice("Can't allocate major number %d for "
1021 "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
1022 return ret;
1023 }
1024
1025 ret = register_filesystem(&mtd_inodefs_type);
1026 if (ret) {
1027 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
1028 goto err_unregister_chdev;
1029 }
1030
1031 mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
1032 if (IS_ERR(mtd_inode_mnt)) {
1033 ret = PTR_ERR(mtd_inode_mnt);
1034 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
1035 goto err_unregister_filesystem;
1036 }
1037 register_mtd_user(&mtdchar_notifier);
1038
1039 return ret;
1040
1041 err_unregister_filesystem:
1042 unregister_filesystem(&mtd_inodefs_type);
1043 err_unregister_chdev:
1044 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1045 return ret;
1046 }
1047
1048 static void __exit cleanup_mtdchar(void)
1049 {
1050 unregister_mtd_user(&mtdchar_notifier);
1051 mntput(mtd_inode_mnt);
1052 unregister_filesystem(&mtd_inodefs_type);
1053 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1054 }
1055
1056 module_init(init_mtdchar);
1057 module_exit(cleanup_mtdchar);
1058
1059 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1060
1061 MODULE_LICENSE("GPL");
1062 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1063 MODULE_DESCRIPTION("Direct character-device access to MTD devices");
1064 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
This page took 0.085198 seconds and 5 git commands to generate.