Merge remote-tracking branch 'omap_dss2/for-next'
[deliverable/linux.git] / fs / orangefs / file.c
1 /*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * See COPYING in top-level directory.
5 */
6
7 /*
8 * Linux VFS file operations.
9 */
10
11 #include "protocol.h"
12 #include "orangefs-kernel.h"
13 #include "orangefs-bufmap.h"
14 #include <linux/fs.h>
15 #include <linux/pagemap.h>
16
17 static int flush_racache(struct inode *inode)
18 {
19 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
20 struct orangefs_kernel_op_s *new_op;
21 int ret;
22
23 gossip_debug(GOSSIP_UTILS_DEBUG,
24 "%s: %pU: Handle is %pU | fs_id %d\n", __func__,
25 get_khandle_from_ino(inode), &orangefs_inode->refn.khandle,
26 orangefs_inode->refn.fs_id);
27
28 new_op = op_alloc(ORANGEFS_VFS_OP_RA_FLUSH);
29 if (!new_op)
30 return -ENOMEM;
31 new_op->upcall.req.ra_cache_flush.refn = orangefs_inode->refn;
32
33 ret = service_operation(new_op, "orangefs_flush_racache",
34 get_interruptible_flag(inode));
35
36 gossip_debug(GOSSIP_UTILS_DEBUG, "%s: got return value of %d\n",
37 __func__, ret);
38
39 op_release(new_op);
40 return ret;
41 }
42
43 /*
44 * Copy to client-core's address space from the buffers specified
45 * by the iovec upto total_size bytes.
46 * NOTE: the iovector can either contain addresses which
47 * can futher be kernel-space or user-space addresses.
48 * or it can pointers to struct page's
49 */
50 static int precopy_buffers(int buffer_index,
51 struct iov_iter *iter,
52 size_t total_size)
53 {
54 int ret = 0;
55 /*
56 * copy data from application/kernel by pulling it out
57 * of the iovec.
58 */
59
60
61 if (total_size) {
62 ret = orangefs_bufmap_copy_from_iovec(iter,
63 buffer_index,
64 total_size);
65 if (ret < 0)
66 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
67 __func__,
68 (long)ret);
69 }
70
71 if (ret < 0)
72 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
73 __func__,
74 (long)ret);
75 return ret;
76 }
77
78 /*
79 * Copy from client-core's address space to the buffers specified
80 * by the iovec upto total_size bytes.
81 * NOTE: the iovector can either contain addresses which
82 * can futher be kernel-space or user-space addresses.
83 * or it can pointers to struct page's
84 */
85 static int postcopy_buffers(int buffer_index,
86 struct iov_iter *iter,
87 size_t total_size)
88 {
89 int ret = 0;
90 /*
91 * copy data to application/kernel by pushing it out to
92 * the iovec. NOTE; target buffers can be addresses or
93 * struct page pointers.
94 */
95 if (total_size) {
96 ret = orangefs_bufmap_copy_to_iovec(iter,
97 buffer_index,
98 total_size);
99 if (ret < 0)
100 gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
101 __func__,
102 (long)ret);
103 }
104 return ret;
105 }
106
107 /*
108 * Post and wait for the I/O upcall to finish
109 */
110 static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode,
111 loff_t *offset, struct iov_iter *iter,
112 size_t total_size, loff_t readahead_size)
113 {
114 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
115 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
116 struct orangefs_kernel_op_s *new_op = NULL;
117 struct iov_iter saved = *iter;
118 int buffer_index = -1;
119 ssize_t ret;
120
121 new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
122 if (!new_op)
123 return -ENOMEM;
124
125 /* synchronous I/O */
126 new_op->upcall.req.io.readahead_size = readahead_size;
127 new_op->upcall.req.io.io_type = type;
128 new_op->upcall.req.io.refn = orangefs_inode->refn;
129
130 populate_shared_memory:
131 /* get a shared buffer index */
132 buffer_index = orangefs_bufmap_get();
133 if (buffer_index < 0) {
134 ret = buffer_index;
135 gossip_debug(GOSSIP_FILE_DEBUG,
136 "%s: orangefs_bufmap_get failure (%zd)\n",
137 __func__, ret);
138 goto out;
139 }
140 gossip_debug(GOSSIP_FILE_DEBUG,
141 "%s(%pU): GET op %p -> buffer_index %d\n",
142 __func__,
143 handle,
144 new_op,
145 buffer_index);
146
147 new_op->uses_shared_memory = 1;
148 new_op->upcall.req.io.buf_index = buffer_index;
149 new_op->upcall.req.io.count = total_size;
150 new_op->upcall.req.io.offset = *offset;
151
152 gossip_debug(GOSSIP_FILE_DEBUG,
153 "%s(%pU): offset: %llu total_size: %zd\n",
154 __func__,
155 handle,
156 llu(*offset),
157 total_size);
158 /*
159 * Stage 1: copy the buffers into client-core's address space
160 * precopy_buffers only pertains to writes.
161 */
162 if (type == ORANGEFS_IO_WRITE) {
163 ret = precopy_buffers(buffer_index,
164 iter,
165 total_size);
166 if (ret < 0)
167 goto out;
168 }
169
170 gossip_debug(GOSSIP_FILE_DEBUG,
171 "%s(%pU): Calling post_io_request with tag (%llu)\n",
172 __func__,
173 handle,
174 llu(new_op->tag));
175
176 /* Stage 2: Service the I/O operation */
177 ret = service_operation(new_op,
178 type == ORANGEFS_IO_WRITE ?
179 "file_write" :
180 "file_read",
181 get_interruptible_flag(inode));
182
183 /*
184 * If service_operation() returns -EAGAIN #and# the operation was
185 * purged from orangefs_request_list or htable_ops_in_progress, then
186 * we know that the client was restarted, causing the shared memory
187 * area to be wiped clean. To restart a write operation in this
188 * case, we must re-copy the data from the user's iovec to a NEW
189 * shared memory location. To restart a read operation, we must get
190 * a new shared memory location.
191 */
192 if (ret == -EAGAIN && op_state_purged(new_op)) {
193 orangefs_bufmap_put(buffer_index);
194 buffer_index = -1;
195 if (type == ORANGEFS_IO_WRITE)
196 *iter = saved;
197 gossip_debug(GOSSIP_FILE_DEBUG,
198 "%s:going to repopulate_shared_memory.\n",
199 __func__);
200 goto populate_shared_memory;
201 }
202
203 if (ret < 0) {
204 if (ret == -EINTR) {
205 /*
206 * We can't return EINTR if any data was written,
207 * it's not POSIX. It is minimally acceptable
208 * to give a partial write, the way NFS does.
209 *
210 * It would be optimal to return all or nothing,
211 * but if a userspace write is bigger than
212 * an IO buffer, and the interrupt occurs
213 * between buffer writes, that would not be
214 * possible.
215 */
216 switch (new_op->op_state - OP_VFS_STATE_GIVEN_UP) {
217 /*
218 * If the op was waiting when the interrupt
219 * occurred, then the client-core did not
220 * trigger the write.
221 */
222 case OP_VFS_STATE_WAITING:
223 if (*offset == 0)
224 ret = -EINTR;
225 else
226 ret = 0;
227 break;
228 /*
229 * If the op was in progress when the interrupt
230 * occurred, then the client-core was able to
231 * trigger the write.
232 */
233 case OP_VFS_STATE_INPROGR:
234 ret = total_size;
235 break;
236 default:
237 gossip_err("%s: unexpected op state :%d:.\n",
238 __func__,
239 new_op->op_state);
240 ret = 0;
241 break;
242 }
243 gossip_debug(GOSSIP_FILE_DEBUG,
244 "%s: got EINTR, state:%d: %p\n",
245 __func__,
246 new_op->op_state,
247 new_op);
248 } else {
249 gossip_err("%s: error in %s handle %pU, returning %zd\n",
250 __func__,
251 type == ORANGEFS_IO_READ ?
252 "read from" : "write to",
253 handle, ret);
254 }
255 if (orangefs_cancel_op_in_progress(new_op))
256 return ret;
257
258 goto out;
259 }
260
261 /*
262 * Stage 3: Post copy buffers from client-core's address space
263 * postcopy_buffers only pertains to reads.
264 */
265 if (type == ORANGEFS_IO_READ) {
266 ret = postcopy_buffers(buffer_index,
267 iter,
268 new_op->downcall.resp.io.amt_complete);
269 if (ret < 0)
270 goto out;
271 }
272 gossip_debug(GOSSIP_FILE_DEBUG,
273 "%s(%pU): Amount %s, returned by the sys-io call:%d\n",
274 __func__,
275 handle,
276 type == ORANGEFS_IO_READ ? "read" : "written",
277 (int)new_op->downcall.resp.io.amt_complete);
278
279 ret = new_op->downcall.resp.io.amt_complete;
280
281 out:
282 if (buffer_index >= 0) {
283 orangefs_bufmap_put(buffer_index);
284 gossip_debug(GOSSIP_FILE_DEBUG,
285 "%s(%pU): PUT buffer_index %d\n",
286 __func__, handle, buffer_index);
287 buffer_index = -1;
288 }
289 op_release(new_op);
290 return ret;
291 }
292
293 /*
294 * Common entry point for read/write/readv/writev
295 * This function will dispatch it to either the direct I/O
296 * or buffered I/O path depending on the mount options and/or
297 * augmented/extended metadata attached to the file.
298 * Note: File extended attributes override any mount options.
299 */
300 static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file,
301 loff_t *offset, struct iov_iter *iter)
302 {
303 struct inode *inode = file->f_mapping->host;
304 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
305 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
306 size_t count = iov_iter_count(iter);
307 ssize_t total_count = 0;
308 ssize_t ret = -EINVAL;
309
310 gossip_debug(GOSSIP_FILE_DEBUG,
311 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
312 __func__,
313 handle,
314 (int)count);
315
316 if (type == ORANGEFS_IO_WRITE) {
317 gossip_debug(GOSSIP_FILE_DEBUG,
318 "%s(%pU): proceeding with offset : %llu, "
319 "size %d\n",
320 __func__,
321 handle,
322 llu(*offset),
323 (int)count);
324 }
325
326 if (count == 0) {
327 ret = 0;
328 goto out;
329 }
330
331 while (iov_iter_count(iter)) {
332 size_t each_count = iov_iter_count(iter);
333 size_t amt_complete;
334
335 /* how much to transfer in this loop iteration */
336 if (each_count > orangefs_bufmap_size_query())
337 each_count = orangefs_bufmap_size_query();
338
339 gossip_debug(GOSSIP_FILE_DEBUG,
340 "%s(%pU): size of each_count(%d)\n",
341 __func__,
342 handle,
343 (int)each_count);
344 gossip_debug(GOSSIP_FILE_DEBUG,
345 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
346 __func__,
347 handle,
348 (int)*offset);
349
350 ret = wait_for_direct_io(type, inode, offset, iter,
351 each_count, 0);
352 gossip_debug(GOSSIP_FILE_DEBUG,
353 "%s(%pU): return from wait_for_io:%d\n",
354 __func__,
355 handle,
356 (int)ret);
357
358 if (ret < 0)
359 goto out;
360
361 *offset += ret;
362 total_count += ret;
363 amt_complete = ret;
364
365 gossip_debug(GOSSIP_FILE_DEBUG,
366 "%s(%pU): AFTER wait_for_io: offset is %d\n",
367 __func__,
368 handle,
369 (int)*offset);
370
371 /*
372 * if we got a short I/O operations,
373 * fall out and return what we got so far
374 */
375 if (amt_complete < each_count)
376 break;
377 } /*end while */
378
379 out:
380 if (total_count > 0)
381 ret = total_count;
382 if (ret > 0) {
383 if (type == ORANGEFS_IO_READ) {
384 file_accessed(file);
385 } else {
386 SetMtimeFlag(orangefs_inode);
387 inode->i_mtime = CURRENT_TIME;
388 mark_inode_dirty_sync(inode);
389 }
390 }
391
392 gossip_debug(GOSSIP_FILE_DEBUG,
393 "%s(%pU): Value(%d) returned.\n",
394 __func__,
395 handle,
396 (int)ret);
397
398 return ret;
399 }
400
401 /*
402 * Read data from a specified offset in a file (referenced by inode).
403 * Data may be placed either in a user or kernel buffer.
404 */
405 ssize_t orangefs_inode_read(struct inode *inode,
406 struct iov_iter *iter,
407 loff_t *offset,
408 loff_t readahead_size)
409 {
410 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
411 size_t count = iov_iter_count(iter);
412 size_t bufmap_size;
413 ssize_t ret = -EINVAL;
414
415 g_orangefs_stats.reads++;
416
417 bufmap_size = orangefs_bufmap_size_query();
418 if (count > bufmap_size) {
419 gossip_debug(GOSSIP_FILE_DEBUG,
420 "%s: count is too large (%zd/%zd)!\n",
421 __func__, count, bufmap_size);
422 return -EINVAL;
423 }
424
425 gossip_debug(GOSSIP_FILE_DEBUG,
426 "%s(%pU) %zd@%llu\n",
427 __func__,
428 &orangefs_inode->refn.khandle,
429 count,
430 llu(*offset));
431
432 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter,
433 count, readahead_size);
434 if (ret > 0)
435 *offset += ret;
436
437 gossip_debug(GOSSIP_FILE_DEBUG,
438 "%s(%pU): Value(%zd) returned.\n",
439 __func__,
440 &orangefs_inode->refn.khandle,
441 ret);
442
443 return ret;
444 }
445
446 static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
447 {
448 struct file *file = iocb->ki_filp;
449 loff_t pos = *(&iocb->ki_pos);
450 ssize_t rc = 0;
451
452 BUG_ON(iocb->private);
453
454 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
455
456 g_orangefs_stats.reads++;
457
458 rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter);
459 iocb->ki_pos = pos;
460
461 return rc;
462 }
463
464 static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
465 {
466 struct file *file = iocb->ki_filp;
467 loff_t pos;
468 ssize_t rc;
469
470 BUG_ON(iocb->private);
471
472 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
473
474 inode_lock(file->f_mapping->host);
475
476 /* Make sure generic_write_checks sees an up to date inode size. */
477 if (file->f_flags & O_APPEND) {
478 rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1);
479 if (rc == -ESTALE)
480 rc = -EIO;
481 if (rc) {
482 gossip_err("%s: orangefs_inode_getattr failed, "
483 "rc:%zd:.\n", __func__, rc);
484 goto out;
485 }
486 }
487
488 if (file->f_pos > i_size_read(file->f_mapping->host))
489 orangefs_i_size_write(file->f_mapping->host, file->f_pos);
490
491 rc = generic_write_checks(iocb, iter);
492
493 if (rc <= 0) {
494 gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
495 __func__, rc);
496 goto out;
497 }
498
499 /*
500 * if we are appending, generic_write_checks would have updated
501 * pos to the end of the file, so we will wait till now to set
502 * pos...
503 */
504 pos = *(&iocb->ki_pos);
505
506 rc = do_readv_writev(ORANGEFS_IO_WRITE,
507 file,
508 &pos,
509 iter);
510 if (rc < 0) {
511 gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
512 __func__, rc);
513 goto out;
514 }
515
516 iocb->ki_pos = pos;
517 g_orangefs_stats.writes++;
518
519 out:
520
521 inode_unlock(file->f_mapping->host);
522 return rc;
523 }
524
525 /*
526 * Perform a miscellaneous operation on a file.
527 */
528 static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
529 {
530 int ret = -ENOTTY;
531 __u64 val = 0;
532 unsigned long uval;
533
534 gossip_debug(GOSSIP_FILE_DEBUG,
535 "orangefs_ioctl: called with cmd %d\n",
536 cmd);
537
538 /*
539 * we understand some general ioctls on files, such as the immutable
540 * and append flags
541 */
542 if (cmd == FS_IOC_GETFLAGS) {
543 val = 0;
544 ret = orangefs_inode_getxattr(file_inode(file),
545 "user.pvfs2.meta_hint",
546 &val, sizeof(val));
547 if (ret < 0 && ret != -ENODATA)
548 return ret;
549 else if (ret == -ENODATA)
550 val = 0;
551 uval = val;
552 gossip_debug(GOSSIP_FILE_DEBUG,
553 "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n",
554 (unsigned long long)uval);
555 return put_user(uval, (int __user *)arg);
556 } else if (cmd == FS_IOC_SETFLAGS) {
557 ret = 0;
558 if (get_user(uval, (int __user *)arg))
559 return -EFAULT;
560 /*
561 * ORANGEFS_MIRROR_FL is set internally when the mirroring mode
562 * is turned on for a file. The user is not allowed to turn
563 * on this bit, but the bit is present if the user first gets
564 * the flags and then updates the flags with some new
565 * settings. So, we ignore it in the following edit. bligon.
566 */
567 if ((uval & ~ORANGEFS_MIRROR_FL) &
568 (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
569 gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
570 return -EINVAL;
571 }
572 val = uval;
573 gossip_debug(GOSSIP_FILE_DEBUG,
574 "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n",
575 (unsigned long long)val);
576 ret = orangefs_inode_setxattr(file_inode(file),
577 "user.pvfs2.meta_hint",
578 &val, sizeof(val), 0);
579 }
580
581 return ret;
582 }
583
584 /*
585 * Memory map a region of a file.
586 */
587 static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
588 {
589 gossip_debug(GOSSIP_FILE_DEBUG,
590 "orangefs_file_mmap: called on %s\n",
591 (file ?
592 (char *)file->f_path.dentry->d_name.name :
593 (char *)"Unknown"));
594
595 /* set the sequential readahead hint */
596 vma->vm_flags |= VM_SEQ_READ;
597 vma->vm_flags &= ~VM_RAND_READ;
598
599 /* Use readonly mmap since we cannot support writable maps. */
600 return generic_file_readonly_mmap(file, vma);
601 }
602
603 #define mapping_nrpages(idata) ((idata)->nrpages)
604
605 /*
606 * Called to notify the module that there are no more references to
607 * this file (i.e. no processes have it open).
608 *
609 * \note Not called when each file is closed.
610 */
611 static int orangefs_file_release(struct inode *inode, struct file *file)
612 {
613 gossip_debug(GOSSIP_FILE_DEBUG,
614 "orangefs_file_release: called on %pD\n",
615 file);
616
617 orangefs_flush_inode(inode);
618
619 /*
620 * remove all associated inode pages from the page cache and
621 * readahead cache (if any); this forces an expensive refresh of
622 * data for the next caller of mmap (or 'get_block' accesses)
623 */
624 if (file->f_path.dentry->d_inode &&
625 file->f_path.dentry->d_inode->i_mapping &&
626 mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) {
627 gossip_debug(GOSSIP_INODE_DEBUG,
628 "calling flush_racache on %pU\n",
629 get_khandle_from_ino(inode));
630 flush_racache(inode);
631 gossip_debug(GOSSIP_INODE_DEBUG, "flush_racache finished\n");
632 truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
633 0);
634 }
635 return 0;
636 }
637
638 /*
639 * Push all data for a specific file onto permanent storage.
640 */
641 static int orangefs_fsync(struct file *file,
642 loff_t start,
643 loff_t end,
644 int datasync)
645 {
646 int ret = -EINVAL;
647 struct orangefs_inode_s *orangefs_inode =
648 ORANGEFS_I(file->f_path.dentry->d_inode);
649 struct orangefs_kernel_op_s *new_op = NULL;
650
651 /* required call */
652 filemap_write_and_wait_range(file->f_mapping, start, end);
653
654 new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC);
655 if (!new_op)
656 return -ENOMEM;
657 new_op->upcall.req.fsync.refn = orangefs_inode->refn;
658
659 ret = service_operation(new_op,
660 "orangefs_fsync",
661 get_interruptible_flag(file->f_path.dentry->d_inode));
662
663 gossip_debug(GOSSIP_FILE_DEBUG,
664 "orangefs_fsync got return value of %d\n",
665 ret);
666
667 op_release(new_op);
668
669 orangefs_flush_inode(file->f_path.dentry->d_inode);
670 return ret;
671 }
672
673 /*
674 * Change the file pointer position for an instance of an open file.
675 *
676 * \note If .llseek is overriden, we must acquire lock as described in
677 * Documentation/filesystems/Locking.
678 *
679 * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
680 * require much changes to the FS
681 */
682 static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin)
683 {
684 int ret = -EINVAL;
685 struct inode *inode = file_inode(file);
686
687 if (origin == SEEK_END) {
688 /*
689 * revalidate the inode's file size.
690 * NOTE: We are only interested in file size here,
691 * so we set mask accordingly.
692 */
693 ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1);
694 if (ret == -ESTALE)
695 ret = -EIO;
696 if (ret) {
697 gossip_debug(GOSSIP_FILE_DEBUG,
698 "%s:%s:%d calling make bad inode\n",
699 __FILE__,
700 __func__,
701 __LINE__);
702 return ret;
703 }
704 }
705
706 gossip_debug(GOSSIP_FILE_DEBUG,
707 "orangefs_file_llseek: offset is %ld | origin is %d"
708 " | inode size is %lu\n",
709 (long)offset,
710 origin,
711 (unsigned long)i_size_read(inode));
712
713 return generic_file_llseek(file, offset, origin);
714 }
715
716 /*
717 * Support local locks (locks that only this kernel knows about)
718 * if Orangefs was mounted -o local_lock.
719 */
720 static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
721 {
722 int rc = -EINVAL;
723
724 if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
725 if (cmd == F_GETLK) {
726 rc = 0;
727 posix_test_lock(filp, fl);
728 } else {
729 rc = posix_lock_file(filp, fl, NULL);
730 }
731 }
732
733 return rc;
734 }
735
736 /** ORANGEFS implementation of VFS file operations */
737 const struct file_operations orangefs_file_operations = {
738 .llseek = orangefs_file_llseek,
739 .read_iter = orangefs_file_read_iter,
740 .write_iter = orangefs_file_write_iter,
741 .lock = orangefs_lock,
742 .unlocked_ioctl = orangefs_ioctl,
743 .mmap = orangefs_file_mmap,
744 .open = generic_file_open,
745 .release = orangefs_file_release,
746 .fsync = orangefs_fsync,
747 };
This page took 0.047785 seconds and 5 git commands to generate.