Merge branch 'acpi-hotplug'
[deliverable/linux.git] / fs / libfs.c
1 /*
2 * fs/libfs.c
3 * Library for filesystems writers.
4 */
5
6 #include <linux/export.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/mount.h>
10 #include <linux/vfs.h>
11 #include <linux/quotaops.h>
12 #include <linux/mutex.h>
13 #include <linux/namei.h>
14 #include <linux/exportfs.h>
15 #include <linux/writeback.h>
16 #include <linux/buffer_head.h> /* sync_mapping_buffers */
17
18 #include <asm/uaccess.h>
19
20 #include "internal.h"
21
22 static inline int simple_positive(struct dentry *dentry)
23 {
24 return dentry->d_inode && !d_unhashed(dentry);
25 }
26
27 int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
28 struct kstat *stat)
29 {
30 struct inode *inode = dentry->d_inode;
31 generic_fillattr(inode, stat);
32 stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
33 return 0;
34 }
35 EXPORT_SYMBOL(simple_getattr);
36
37 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
38 {
39 buf->f_type = dentry->d_sb->s_magic;
40 buf->f_bsize = PAGE_CACHE_SIZE;
41 buf->f_namelen = NAME_MAX;
42 return 0;
43 }
44 EXPORT_SYMBOL(simple_statfs);
45
46 /*
47 * Retaining negative dentries for an in-memory filesystem just wastes
48 * memory and lookup time: arrange for them to be deleted immediately.
49 */
50 static int simple_delete_dentry(const struct dentry *dentry)
51 {
52 return 1;
53 }
54
55 /*
56 * Lookup the data. This is trivial - if the dentry didn't already
57 * exist, we know it is negative. Set d_op to delete negative dentries.
58 */
59 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
60 {
61 static const struct dentry_operations simple_dentry_operations = {
62 .d_delete = simple_delete_dentry,
63 };
64
65 if (dentry->d_name.len > NAME_MAX)
66 return ERR_PTR(-ENAMETOOLONG);
67 if (!dentry->d_sb->s_d_op)
68 d_set_d_op(dentry, &simple_dentry_operations);
69 d_add(dentry, NULL);
70 return NULL;
71 }
72 EXPORT_SYMBOL(simple_lookup);
73
74 int dcache_dir_open(struct inode *inode, struct file *file)
75 {
76 static struct qstr cursor_name = QSTR_INIT(".", 1);
77
78 file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
79
80 return file->private_data ? 0 : -ENOMEM;
81 }
82 EXPORT_SYMBOL(dcache_dir_open);
83
84 int dcache_dir_close(struct inode *inode, struct file *file)
85 {
86 dput(file->private_data);
87 return 0;
88 }
89 EXPORT_SYMBOL(dcache_dir_close);
90
91 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
92 {
93 struct dentry *dentry = file->f_path.dentry;
94 mutex_lock(&dentry->d_inode->i_mutex);
95 switch (whence) {
96 case 1:
97 offset += file->f_pos;
98 case 0:
99 if (offset >= 0)
100 break;
101 default:
102 mutex_unlock(&dentry->d_inode->i_mutex);
103 return -EINVAL;
104 }
105 if (offset != file->f_pos) {
106 file->f_pos = offset;
107 if (file->f_pos >= 2) {
108 struct list_head *p;
109 struct dentry *cursor = file->private_data;
110 loff_t n = file->f_pos - 2;
111
112 spin_lock(&dentry->d_lock);
113 /* d_lock not required for cursor */
114 list_del(&cursor->d_u.d_child);
115 p = dentry->d_subdirs.next;
116 while (n && p != &dentry->d_subdirs) {
117 struct dentry *next;
118 next = list_entry(p, struct dentry, d_u.d_child);
119 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
120 if (simple_positive(next))
121 n--;
122 spin_unlock(&next->d_lock);
123 p = p->next;
124 }
125 list_add_tail(&cursor->d_u.d_child, p);
126 spin_unlock(&dentry->d_lock);
127 }
128 }
129 mutex_unlock(&dentry->d_inode->i_mutex);
130 return offset;
131 }
132 EXPORT_SYMBOL(dcache_dir_lseek);
133
134 /* Relationship between i_mode and the DT_xxx types */
135 static inline unsigned char dt_type(struct inode *inode)
136 {
137 return (inode->i_mode >> 12) & 15;
138 }
139
140 /*
141 * Directory is locked and all positive dentries in it are safe, since
142 * for ramfs-type trees they can't go away without unlink() or rmdir(),
143 * both impossible due to the lock on directory.
144 */
145
146 int dcache_readdir(struct file *file, struct dir_context *ctx)
147 {
148 struct dentry *dentry = file->f_path.dentry;
149 struct dentry *cursor = file->private_data;
150 struct list_head *p, *q = &cursor->d_u.d_child;
151
152 if (!dir_emit_dots(file, ctx))
153 return 0;
154 spin_lock(&dentry->d_lock);
155 if (ctx->pos == 2)
156 list_move(q, &dentry->d_subdirs);
157
158 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
159 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
160 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
161 if (!simple_positive(next)) {
162 spin_unlock(&next->d_lock);
163 continue;
164 }
165
166 spin_unlock(&next->d_lock);
167 spin_unlock(&dentry->d_lock);
168 if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
169 next->d_inode->i_ino, dt_type(next->d_inode)))
170 return 0;
171 spin_lock(&dentry->d_lock);
172 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
173 /* next is still alive */
174 list_move(q, p);
175 spin_unlock(&next->d_lock);
176 p = q;
177 ctx->pos++;
178 }
179 spin_unlock(&dentry->d_lock);
180 return 0;
181 }
182 EXPORT_SYMBOL(dcache_readdir);
183
184 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
185 {
186 return -EISDIR;
187 }
188 EXPORT_SYMBOL(generic_read_dir);
189
190 const struct file_operations simple_dir_operations = {
191 .open = dcache_dir_open,
192 .release = dcache_dir_close,
193 .llseek = dcache_dir_lseek,
194 .read = generic_read_dir,
195 .iterate = dcache_readdir,
196 .fsync = noop_fsync,
197 };
198 EXPORT_SYMBOL(simple_dir_operations);
199
200 const struct inode_operations simple_dir_inode_operations = {
201 .lookup = simple_lookup,
202 };
203 EXPORT_SYMBOL(simple_dir_inode_operations);
204
205 static const struct super_operations simple_super_operations = {
206 .statfs = simple_statfs,
207 };
208
209 /*
210 * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
211 * will never be mountable)
212 */
213 struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name,
214 const struct super_operations *ops,
215 const struct dentry_operations *dops, unsigned long magic)
216 {
217 struct super_block *s;
218 struct dentry *dentry;
219 struct inode *root;
220 struct qstr d_name = QSTR_INIT(name, strlen(name));
221
222 s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
223 if (IS_ERR(s))
224 return ERR_CAST(s);
225
226 s->s_maxbytes = MAX_LFS_FILESIZE;
227 s->s_blocksize = PAGE_SIZE;
228 s->s_blocksize_bits = PAGE_SHIFT;
229 s->s_magic = magic;
230 s->s_op = ops ? ops : &simple_super_operations;
231 s->s_time_gran = 1;
232 root = new_inode(s);
233 if (!root)
234 goto Enomem;
235 /*
236 * since this is the first inode, make it number 1. New inodes created
237 * after this must take care not to collide with it (by passing
238 * max_reserved of 1 to iunique).
239 */
240 root->i_ino = 1;
241 root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
242 root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
243 dentry = __d_alloc(s, &d_name);
244 if (!dentry) {
245 iput(root);
246 goto Enomem;
247 }
248 d_instantiate(dentry, root);
249 s->s_root = dentry;
250 s->s_d_op = dops;
251 s->s_flags |= MS_ACTIVE;
252 return dget(s->s_root);
253
254 Enomem:
255 deactivate_locked_super(s);
256 return ERR_PTR(-ENOMEM);
257 }
258 EXPORT_SYMBOL(mount_pseudo);
259
260 int simple_open(struct inode *inode, struct file *file)
261 {
262 if (inode->i_private)
263 file->private_data = inode->i_private;
264 return 0;
265 }
266 EXPORT_SYMBOL(simple_open);
267
268 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
269 {
270 struct inode *inode = old_dentry->d_inode;
271
272 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
273 inc_nlink(inode);
274 ihold(inode);
275 dget(dentry);
276 d_instantiate(dentry, inode);
277 return 0;
278 }
279 EXPORT_SYMBOL(simple_link);
280
281 int simple_empty(struct dentry *dentry)
282 {
283 struct dentry *child;
284 int ret = 0;
285
286 spin_lock(&dentry->d_lock);
287 list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
288 spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
289 if (simple_positive(child)) {
290 spin_unlock(&child->d_lock);
291 goto out;
292 }
293 spin_unlock(&child->d_lock);
294 }
295 ret = 1;
296 out:
297 spin_unlock(&dentry->d_lock);
298 return ret;
299 }
300 EXPORT_SYMBOL(simple_empty);
301
302 int simple_unlink(struct inode *dir, struct dentry *dentry)
303 {
304 struct inode *inode = dentry->d_inode;
305
306 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
307 drop_nlink(inode);
308 dput(dentry);
309 return 0;
310 }
311 EXPORT_SYMBOL(simple_unlink);
312
313 int simple_rmdir(struct inode *dir, struct dentry *dentry)
314 {
315 if (!simple_empty(dentry))
316 return -ENOTEMPTY;
317
318 drop_nlink(dentry->d_inode);
319 simple_unlink(dir, dentry);
320 drop_nlink(dir);
321 return 0;
322 }
323 EXPORT_SYMBOL(simple_rmdir);
324
325 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
326 struct inode *new_dir, struct dentry *new_dentry)
327 {
328 struct inode *inode = old_dentry->d_inode;
329 int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
330
331 if (!simple_empty(new_dentry))
332 return -ENOTEMPTY;
333
334 if (new_dentry->d_inode) {
335 simple_unlink(new_dir, new_dentry);
336 if (they_are_dirs) {
337 drop_nlink(new_dentry->d_inode);
338 drop_nlink(old_dir);
339 }
340 } else if (they_are_dirs) {
341 drop_nlink(old_dir);
342 inc_nlink(new_dir);
343 }
344
345 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
346 new_dir->i_mtime = inode->i_ctime = CURRENT_TIME;
347
348 return 0;
349 }
350 EXPORT_SYMBOL(simple_rename);
351
352 /**
353 * simple_setattr - setattr for simple filesystem
354 * @dentry: dentry
355 * @iattr: iattr structure
356 *
357 * Returns 0 on success, -error on failure.
358 *
359 * simple_setattr is a simple ->setattr implementation without a proper
360 * implementation of size changes.
361 *
362 * It can either be used for in-memory filesystems or special files
363 * on simple regular filesystems. Anything that needs to change on-disk
364 * or wire state on size changes needs its own setattr method.
365 */
366 int simple_setattr(struct dentry *dentry, struct iattr *iattr)
367 {
368 struct inode *inode = dentry->d_inode;
369 int error;
370
371 error = inode_change_ok(inode, iattr);
372 if (error)
373 return error;
374
375 if (iattr->ia_valid & ATTR_SIZE)
376 truncate_setsize(inode, iattr->ia_size);
377 setattr_copy(inode, iattr);
378 mark_inode_dirty(inode);
379 return 0;
380 }
381 EXPORT_SYMBOL(simple_setattr);
382
383 int simple_readpage(struct file *file, struct page *page)
384 {
385 clear_highpage(page);
386 flush_dcache_page(page);
387 SetPageUptodate(page);
388 unlock_page(page);
389 return 0;
390 }
391 EXPORT_SYMBOL(simple_readpage);
392
393 int simple_write_begin(struct file *file, struct address_space *mapping,
394 loff_t pos, unsigned len, unsigned flags,
395 struct page **pagep, void **fsdata)
396 {
397 struct page *page;
398 pgoff_t index;
399
400 index = pos >> PAGE_CACHE_SHIFT;
401
402 page = grab_cache_page_write_begin(mapping, index, flags);
403 if (!page)
404 return -ENOMEM;
405
406 *pagep = page;
407
408 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
409 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
410
411 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
412 }
413 return 0;
414 }
415 EXPORT_SYMBOL(simple_write_begin);
416
417 /**
418 * simple_write_end - .write_end helper for non-block-device FSes
419 * @available: See .write_end of address_space_operations
420 * @file: "
421 * @mapping: "
422 * @pos: "
423 * @len: "
424 * @copied: "
425 * @page: "
426 * @fsdata: "
427 *
428 * simple_write_end does the minimum needed for updating a page after writing is
429 * done. It has the same API signature as the .write_end of
430 * address_space_operations vector. So it can just be set onto .write_end for
431 * FSes that don't need any other processing. i_mutex is assumed to be held.
432 * Block based filesystems should use generic_write_end().
433 * NOTE: Even though i_size might get updated by this function, mark_inode_dirty
434 * is not called, so a filesystem that actually does store data in .write_inode
435 * should extend on what's done here with a call to mark_inode_dirty() in the
436 * case that i_size has changed.
437 */
438 int simple_write_end(struct file *file, struct address_space *mapping,
439 loff_t pos, unsigned len, unsigned copied,
440 struct page *page, void *fsdata)
441 {
442 struct inode *inode = page->mapping->host;
443 loff_t last_pos = pos + copied;
444
445 /* zero the stale part of the page if we did a short copy */
446 if (copied < len) {
447 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
448
449 zero_user(page, from + copied, len - copied);
450 }
451
452 if (!PageUptodate(page))
453 SetPageUptodate(page);
454 /*
455 * No need to use i_size_read() here, the i_size
456 * cannot change under us because we hold the i_mutex.
457 */
458 if (last_pos > inode->i_size)
459 i_size_write(inode, last_pos);
460
461 set_page_dirty(page);
462 unlock_page(page);
463 page_cache_release(page);
464
465 return copied;
466 }
467 EXPORT_SYMBOL(simple_write_end);
468
469 /*
470 * the inodes created here are not hashed. If you use iunique to generate
471 * unique inode values later for this filesystem, then you must take care
472 * to pass it an appropriate max_reserved value to avoid collisions.
473 */
474 int simple_fill_super(struct super_block *s, unsigned long magic,
475 struct tree_descr *files)
476 {
477 struct inode *inode;
478 struct dentry *root;
479 struct dentry *dentry;
480 int i;
481
482 s->s_blocksize = PAGE_CACHE_SIZE;
483 s->s_blocksize_bits = PAGE_CACHE_SHIFT;
484 s->s_magic = magic;
485 s->s_op = &simple_super_operations;
486 s->s_time_gran = 1;
487
488 inode = new_inode(s);
489 if (!inode)
490 return -ENOMEM;
491 /*
492 * because the root inode is 1, the files array must not contain an
493 * entry at index 1
494 */
495 inode->i_ino = 1;
496 inode->i_mode = S_IFDIR | 0755;
497 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
498 inode->i_op = &simple_dir_inode_operations;
499 inode->i_fop = &simple_dir_operations;
500 set_nlink(inode, 2);
501 root = d_make_root(inode);
502 if (!root)
503 return -ENOMEM;
504 for (i = 0; !files->name || files->name[0]; i++, files++) {
505 if (!files->name)
506 continue;
507
508 /* warn if it tries to conflict with the root inode */
509 if (unlikely(i == 1))
510 printk(KERN_WARNING "%s: %s passed in a files array"
511 "with an index of 1!\n", __func__,
512 s->s_type->name);
513
514 dentry = d_alloc_name(root, files->name);
515 if (!dentry)
516 goto out;
517 inode = new_inode(s);
518 if (!inode) {
519 dput(dentry);
520 goto out;
521 }
522 inode->i_mode = S_IFREG | files->mode;
523 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
524 inode->i_fop = files->ops;
525 inode->i_ino = i;
526 d_add(dentry, inode);
527 }
528 s->s_root = root;
529 return 0;
530 out:
531 d_genocide(root);
532 shrink_dcache_parent(root);
533 dput(root);
534 return -ENOMEM;
535 }
536 EXPORT_SYMBOL(simple_fill_super);
537
538 static DEFINE_SPINLOCK(pin_fs_lock);
539
540 int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *count)
541 {
542 struct vfsmount *mnt = NULL;
543 spin_lock(&pin_fs_lock);
544 if (unlikely(!*mount)) {
545 spin_unlock(&pin_fs_lock);
546 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL);
547 if (IS_ERR(mnt))
548 return PTR_ERR(mnt);
549 spin_lock(&pin_fs_lock);
550 if (!*mount)
551 *mount = mnt;
552 }
553 mntget(*mount);
554 ++*count;
555 spin_unlock(&pin_fs_lock);
556 mntput(mnt);
557 return 0;
558 }
559 EXPORT_SYMBOL(simple_pin_fs);
560
561 void simple_release_fs(struct vfsmount **mount, int *count)
562 {
563 struct vfsmount *mnt;
564 spin_lock(&pin_fs_lock);
565 mnt = *mount;
566 if (!--*count)
567 *mount = NULL;
568 spin_unlock(&pin_fs_lock);
569 mntput(mnt);
570 }
571 EXPORT_SYMBOL(simple_release_fs);
572
573 /**
574 * simple_read_from_buffer - copy data from the buffer to user space
575 * @to: the user space buffer to read to
576 * @count: the maximum number of bytes to read
577 * @ppos: the current position in the buffer
578 * @from: the buffer to read from
579 * @available: the size of the buffer
580 *
581 * The simple_read_from_buffer() function reads up to @count bytes from the
582 * buffer @from at offset @ppos into the user space address starting at @to.
583 *
584 * On success, the number of bytes read is returned and the offset @ppos is
585 * advanced by this number, or negative value is returned on error.
586 **/
587 ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
588 const void *from, size_t available)
589 {
590 loff_t pos = *ppos;
591 size_t ret;
592
593 if (pos < 0)
594 return -EINVAL;
595 if (pos >= available || !count)
596 return 0;
597 if (count > available - pos)
598 count = available - pos;
599 ret = copy_to_user(to, from + pos, count);
600 if (ret == count)
601 return -EFAULT;
602 count -= ret;
603 *ppos = pos + count;
604 return count;
605 }
606 EXPORT_SYMBOL(simple_read_from_buffer);
607
608 /**
609 * simple_write_to_buffer - copy data from user space to the buffer
610 * @to: the buffer to write to
611 * @available: the size of the buffer
612 * @ppos: the current position in the buffer
613 * @from: the user space buffer to read from
614 * @count: the maximum number of bytes to read
615 *
616 * The simple_write_to_buffer() function reads up to @count bytes from the user
617 * space address starting at @from into the buffer @to at offset @ppos.
618 *
619 * On success, the number of bytes written is returned and the offset @ppos is
620 * advanced by this number, or negative value is returned on error.
621 **/
622 ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
623 const void __user *from, size_t count)
624 {
625 loff_t pos = *ppos;
626 size_t res;
627
628 if (pos < 0)
629 return -EINVAL;
630 if (pos >= available || !count)
631 return 0;
632 if (count > available - pos)
633 count = available - pos;
634 res = copy_from_user(to + pos, from, count);
635 if (res == count)
636 return -EFAULT;
637 count -= res;
638 *ppos = pos + count;
639 return count;
640 }
641 EXPORT_SYMBOL(simple_write_to_buffer);
642
643 /**
644 * memory_read_from_buffer - copy data from the buffer
645 * @to: the kernel space buffer to read to
646 * @count: the maximum number of bytes to read
647 * @ppos: the current position in the buffer
648 * @from: the buffer to read from
649 * @available: the size of the buffer
650 *
651 * The memory_read_from_buffer() function reads up to @count bytes from the
652 * buffer @from at offset @ppos into the kernel space address starting at @to.
653 *
654 * On success, the number of bytes read is returned and the offset @ppos is
655 * advanced by this number, or negative value is returned on error.
656 **/
657 ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
658 const void *from, size_t available)
659 {
660 loff_t pos = *ppos;
661
662 if (pos < 0)
663 return -EINVAL;
664 if (pos >= available)
665 return 0;
666 if (count > available - pos)
667 count = available - pos;
668 memcpy(to, from + pos, count);
669 *ppos = pos + count;
670
671 return count;
672 }
673 EXPORT_SYMBOL(memory_read_from_buffer);
674
675 /*
676 * Transaction based IO.
677 * The file expects a single write which triggers the transaction, and then
678 * possibly a read which collects the result - which is stored in a
679 * file-local buffer.
680 */
681
682 void simple_transaction_set(struct file *file, size_t n)
683 {
684 struct simple_transaction_argresp *ar = file->private_data;
685
686 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
687
688 /*
689 * The barrier ensures that ar->size will really remain zero until
690 * ar->data is ready for reading.
691 */
692 smp_mb();
693 ar->size = n;
694 }
695 EXPORT_SYMBOL(simple_transaction_set);
696
697 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
698 {
699 struct simple_transaction_argresp *ar;
700 static DEFINE_SPINLOCK(simple_transaction_lock);
701
702 if (size > SIMPLE_TRANSACTION_LIMIT - 1)
703 return ERR_PTR(-EFBIG);
704
705 ar = (struct simple_transaction_argresp *)get_zeroed_page(GFP_KERNEL);
706 if (!ar)
707 return ERR_PTR(-ENOMEM);
708
709 spin_lock(&simple_transaction_lock);
710
711 /* only one write allowed per open */
712 if (file->private_data) {
713 spin_unlock(&simple_transaction_lock);
714 free_page((unsigned long)ar);
715 return ERR_PTR(-EBUSY);
716 }
717
718 file->private_data = ar;
719
720 spin_unlock(&simple_transaction_lock);
721
722 if (copy_from_user(ar->data, buf, size))
723 return ERR_PTR(-EFAULT);
724
725 return ar->data;
726 }
727 EXPORT_SYMBOL(simple_transaction_get);
728
729 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
730 {
731 struct simple_transaction_argresp *ar = file->private_data;
732
733 if (!ar)
734 return 0;
735 return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
736 }
737 EXPORT_SYMBOL(simple_transaction_read);
738
739 int simple_transaction_release(struct inode *inode, struct file *file)
740 {
741 free_page((unsigned long)file->private_data);
742 return 0;
743 }
744 EXPORT_SYMBOL(simple_transaction_release);
745
746 /* Simple attribute files */
747
748 struct simple_attr {
749 int (*get)(void *, u64 *);
750 int (*set)(void *, u64);
751 char get_buf[24]; /* enough to store a u64 and "\n\0" */
752 char set_buf[24];
753 void *data;
754 const char *fmt; /* format for read operation */
755 struct mutex mutex; /* protects access to these buffers */
756 };
757
758 /* simple_attr_open is called by an actual attribute open file operation
759 * to set the attribute specific access operations. */
760 int simple_attr_open(struct inode *inode, struct file *file,
761 int (*get)(void *, u64 *), int (*set)(void *, u64),
762 const char *fmt)
763 {
764 struct simple_attr *attr;
765
766 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
767 if (!attr)
768 return -ENOMEM;
769
770 attr->get = get;
771 attr->set = set;
772 attr->data = inode->i_private;
773 attr->fmt = fmt;
774 mutex_init(&attr->mutex);
775
776 file->private_data = attr;
777
778 return nonseekable_open(inode, file);
779 }
780 EXPORT_SYMBOL_GPL(simple_attr_open);
781
782 int simple_attr_release(struct inode *inode, struct file *file)
783 {
784 kfree(file->private_data);
785 return 0;
786 }
787 EXPORT_SYMBOL_GPL(simple_attr_release); /* GPL-only? This? Really? */
788
789 /* read from the buffer that is filled with the get function */
790 ssize_t simple_attr_read(struct file *file, char __user *buf,
791 size_t len, loff_t *ppos)
792 {
793 struct simple_attr *attr;
794 size_t size;
795 ssize_t ret;
796
797 attr = file->private_data;
798
799 if (!attr->get)
800 return -EACCES;
801
802 ret = mutex_lock_interruptible(&attr->mutex);
803 if (ret)
804 return ret;
805
806 if (*ppos) { /* continued read */
807 size = strlen(attr->get_buf);
808 } else { /* first read */
809 u64 val;
810 ret = attr->get(attr->data, &val);
811 if (ret)
812 goto out;
813
814 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
815 attr->fmt, (unsigned long long)val);
816 }
817
818 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
819 out:
820 mutex_unlock(&attr->mutex);
821 return ret;
822 }
823 EXPORT_SYMBOL_GPL(simple_attr_read);
824
825 /* interpret the buffer as a number to call the set function with */
826 ssize_t simple_attr_write(struct file *file, const char __user *buf,
827 size_t len, loff_t *ppos)
828 {
829 struct simple_attr *attr;
830 u64 val;
831 size_t size;
832 ssize_t ret;
833
834 attr = file->private_data;
835 if (!attr->set)
836 return -EACCES;
837
838 ret = mutex_lock_interruptible(&attr->mutex);
839 if (ret)
840 return ret;
841
842 ret = -EFAULT;
843 size = min(sizeof(attr->set_buf) - 1, len);
844 if (copy_from_user(attr->set_buf, buf, size))
845 goto out;
846
847 attr->set_buf[size] = '\0';
848 val = simple_strtoll(attr->set_buf, NULL, 0);
849 ret = attr->set(attr->data, val);
850 if (ret == 0)
851 ret = len; /* on success, claim we got the whole input */
852 out:
853 mutex_unlock(&attr->mutex);
854 return ret;
855 }
856 EXPORT_SYMBOL_GPL(simple_attr_write);
857
858 /**
859 * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
860 * @sb: filesystem to do the file handle conversion on
861 * @fid: file handle to convert
862 * @fh_len: length of the file handle in bytes
863 * @fh_type: type of file handle
864 * @get_inode: filesystem callback to retrieve inode
865 *
866 * This function decodes @fid as long as it has one of the well-known
867 * Linux filehandle types and calls @get_inode on it to retrieve the
868 * inode for the object specified in the file handle.
869 */
870 struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid,
871 int fh_len, int fh_type, struct inode *(*get_inode)
872 (struct super_block *sb, u64 ino, u32 gen))
873 {
874 struct inode *inode = NULL;
875
876 if (fh_len < 2)
877 return NULL;
878
879 switch (fh_type) {
880 case FILEID_INO32_GEN:
881 case FILEID_INO32_GEN_PARENT:
882 inode = get_inode(sb, fid->i32.ino, fid->i32.gen);
883 break;
884 }
885
886 return d_obtain_alias(inode);
887 }
888 EXPORT_SYMBOL_GPL(generic_fh_to_dentry);
889
890 /**
891 * generic_fh_to_parent - generic helper for the fh_to_parent export operation
892 * @sb: filesystem to do the file handle conversion on
893 * @fid: file handle to convert
894 * @fh_len: length of the file handle in bytes
895 * @fh_type: type of file handle
896 * @get_inode: filesystem callback to retrieve inode
897 *
898 * This function decodes @fid as long as it has one of the well-known
899 * Linux filehandle types and calls @get_inode on it to retrieve the
900 * inode for the _parent_ object specified in the file handle if it
901 * is specified in the file handle, or NULL otherwise.
902 */
903 struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
904 int fh_len, int fh_type, struct inode *(*get_inode)
905 (struct super_block *sb, u64 ino, u32 gen))
906 {
907 struct inode *inode = NULL;
908
909 if (fh_len <= 2)
910 return NULL;
911
912 switch (fh_type) {
913 case FILEID_INO32_GEN_PARENT:
914 inode = get_inode(sb, fid->i32.parent_ino,
915 (fh_len > 3 ? fid->i32.parent_gen : 0));
916 break;
917 }
918
919 return d_obtain_alias(inode);
920 }
921 EXPORT_SYMBOL_GPL(generic_fh_to_parent);
922
923 /**
924 * generic_file_fsync - generic fsync implementation for simple filesystems
925 * @file: file to synchronize
926 * @datasync: only synchronize essential metadata if true
927 *
928 * This is a generic implementation of the fsync method for simple
929 * filesystems which track all non-inode metadata in the buffers list
930 * hanging off the address_space structure.
931 */
932 int generic_file_fsync(struct file *file, loff_t start, loff_t end,
933 int datasync)
934 {
935 struct inode *inode = file->f_mapping->host;
936 int err;
937 int ret;
938
939 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
940 if (err)
941 return err;
942
943 mutex_lock(&inode->i_mutex);
944 ret = sync_mapping_buffers(inode->i_mapping);
945 if (!(inode->i_state & I_DIRTY))
946 goto out;
947 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
948 goto out;
949
950 err = sync_inode_metadata(inode, 1);
951 if (ret == 0)
952 ret = err;
953 out:
954 mutex_unlock(&inode->i_mutex);
955 return ret;
956 }
957 EXPORT_SYMBOL(generic_file_fsync);
958
959 /**
960 * generic_check_addressable - Check addressability of file system
961 * @blocksize_bits: log of file system block size
962 * @num_blocks: number of blocks in file system
963 *
964 * Determine whether a file system with @num_blocks blocks (and a
965 * block size of 2**@blocksize_bits) is addressable by the sector_t
966 * and page cache of the system. Return 0 if so and -EFBIG otherwise.
967 */
968 int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
969 {
970 u64 last_fs_block = num_blocks - 1;
971 u64 last_fs_page =
972 last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits);
973
974 if (unlikely(num_blocks == 0))
975 return 0;
976
977 if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT))
978 return -EINVAL;
979
980 if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
981 (last_fs_page > (pgoff_t)(~0ULL))) {
982 return -EFBIG;
983 }
984 return 0;
985 }
986 EXPORT_SYMBOL(generic_check_addressable);
987
988 /*
989 * No-op implementation of ->fsync for in-memory filesystems.
990 */
991 int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
992 {
993 return 0;
994 }
995 EXPORT_SYMBOL(noop_fsync);
996
997 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
998 void *cookie)
999 {
1000 char *s = nd_get_link(nd);
1001 if (!IS_ERR(s))
1002 kfree(s);
1003 }
1004 EXPORT_SYMBOL(kfree_put_link);
1005
1006 /*
1007 * nop .set_page_dirty method so that people can use .page_mkwrite on
1008 * anon inodes.
1009 */
1010 static int anon_set_page_dirty(struct page *page)
1011 {
1012 return 0;
1013 };
1014
1015 /*
1016 * A single inode exists for all anon_inode files. Contrary to pipes,
1017 * anon_inode inodes have no associated per-instance data, so we need
1018 * only allocate one of them.
1019 */
1020 struct inode *alloc_anon_inode(struct super_block *s)
1021 {
1022 static const struct address_space_operations anon_aops = {
1023 .set_page_dirty = anon_set_page_dirty,
1024 };
1025 struct inode *inode = new_inode_pseudo(s);
1026
1027 if (!inode)
1028 return ERR_PTR(-ENOMEM);
1029
1030 inode->i_ino = get_next_ino();
1031 inode->i_mapping->a_ops = &anon_aops;
1032
1033 /*
1034 * Mark the inode dirty from the very beginning,
1035 * that way it will never be moved to the dirty
1036 * list because mark_inode_dirty() will think
1037 * that it already _is_ on the dirty list.
1038 */
1039 inode->i_state = I_DIRTY;
1040 inode->i_mode = S_IRUSR | S_IWUSR;
1041 inode->i_uid = current_fsuid();
1042 inode->i_gid = current_fsgid();
1043 inode->i_flags |= S_PRIVATE;
1044 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1045 return inode;
1046 }
1047 EXPORT_SYMBOL(alloc_anon_inode);
This page took 0.053252 seconds and 6 git commands to generate.