2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/uio.h>
17 #include <linux/blkdev.h>
19 #include <linux/smp_lock.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/ext2_fs.h>
23 #include <linux/crc32.h>
24 #include <linux/iflags.h>
25 #include <asm/semaphore.h>
26 #include <asm/uaccess.h>
29 #include "lm_interface.h"
47 /* "bad" is for NFS support */
48 struct filldir_bad_entry
{
50 unsigned int fbe_length
;
52 struct gfs2_inum fbe_inum
;
53 unsigned int fbe_type
;
57 struct gfs2_sbd
*fdb_sbd
;
59 struct filldir_bad_entry
*fdb_entry
;
60 unsigned int fdb_entry_num
;
61 unsigned int fdb_entry_off
;
64 unsigned int fdb_name_size
;
65 unsigned int fdb_name_off
;
68 /* For regular, non-NFS */
70 struct gfs2_sbd
*fdr_sbd
;
73 filldir_t fdr_filldir
;
78 * Most fields left uninitialised to catch anybody who tries to
79 * use them. f_flags set to prevent file_accessed() from touching
80 * any other part of this. Its use is purely as a flag so that we
81 * know (in readpage()) whether or not do to locking.
83 struct file gfs2_internal_file_sentinal
= {
84 .f_flags
= O_NOATIME
|O_RDONLY
,
87 static int gfs2_read_actor(read_descriptor_t
*desc
, struct page
*page
,
88 unsigned long offset
, unsigned long size
)
91 unsigned long count
= desc
->count
;
97 memcpy(desc
->arg
.buf
, kaddr
+ offset
, size
);
100 desc
->count
= count
- size
;
101 desc
->written
+= size
;
102 desc
->arg
.buf
+= size
;
106 int gfs2_internal_read(struct gfs2_inode
*ip
, struct file_ra_state
*ra_state
,
107 char *buf
, loff_t
*pos
, unsigned size
)
109 struct inode
*inode
= ip
->i_vnode
;
110 read_descriptor_t desc
;
115 do_generic_mapping_read(inode
->i_mapping
, ra_state
,
116 &gfs2_internal_file_sentinal
, pos
, &desc
,
118 return desc
.written
? desc
.written
: desc
.error
;
122 * gfs2_llseek - seek to a location in a file
124 * @offset: the offset
125 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
127 * SEEK_END requires the glock for the file because it references the
130 * Returns: The new offset, or errno
133 static loff_t
gfs2_llseek(struct file
*file
, loff_t offset
, int origin
)
135 struct gfs2_inode
*ip
= file
->f_mapping
->host
->u
.generic_ip
;
136 struct gfs2_holder i_gh
;
140 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
143 error
= remote_llseek(file
, offset
, origin
);
144 gfs2_glock_dq_uninit(&i_gh
);
147 error
= remote_llseek(file
, offset
, origin
);
153 static ssize_t
gfs2_direct_IO_read(struct kiocb
*iocb
, const struct iovec
*iov
,
154 loff_t offset
, unsigned long nr_segs
)
156 struct file
*file
= iocb
->ki_filp
;
157 struct address_space
*mapping
= file
->f_mapping
;
160 retval
= filemap_write_and_wait(mapping
);
162 retval
= mapping
->a_ops
->direct_IO(READ
, iocb
, iov
, offset
,
169 * __gfs2_file_aio_read - The main GFS2 read function
171 * N.B. This is almost, but not quite the same as __generic_file_aio_read()
172 * the important subtle different being that inode->i_size isn't valid
173 * unless we are holding a lock, and we do this _only_ on the O_DIRECT
174 * path since otherwise locking is done entirely at the page cache
177 static ssize_t
__gfs2_file_aio_read(struct kiocb
*iocb
,
178 const struct iovec
*iov
,
179 unsigned long nr_segs
, loff_t
*ppos
)
181 struct file
*filp
= iocb
->ki_filp
;
182 struct gfs2_inode
*ip
= filp
->f_mapping
->host
->u
.generic_ip
;
183 struct gfs2_holder gh
;
189 for (seg
= 0; seg
< nr_segs
; seg
++) {
190 const struct iovec
*iv
= &iov
[seg
];
193 * If any segment has a negative length, or the cumulative
194 * length ever wraps negative then return -EINVAL.
196 count
+= iv
->iov_len
;
197 if (unlikely((ssize_t
)(count
|iv
->iov_len
) < 0))
199 if (access_ok(VERIFY_WRITE
, iv
->iov_base
, iv
->iov_len
))
204 count
-= iv
->iov_len
; /* This segment is no good */
208 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
209 if (filp
->f_flags
& O_DIRECT
) {
210 loff_t pos
= *ppos
, size
;
211 struct address_space
*mapping
;
214 mapping
= filp
->f_mapping
;
215 inode
= mapping
->host
;
218 goto out
; /* skip atime */
220 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, GL_ATIME
, &gh
);
221 retval
= gfs2_glock_nq_m_atime(1, &gh
);
224 if (gfs2_is_stuffed(ip
)) {
225 gfs2_glock_dq_m(1, &gh
);
226 gfs2_holder_uninit(&gh
);
227 goto fallback_to_normal
;
229 size
= i_size_read(inode
);
231 retval
= gfs2_direct_IO_read(iocb
, iov
, pos
, nr_segs
);
232 if (retval
> 0 && !is_sync_kiocb(iocb
))
233 retval
= -EIOCBQUEUED
;
235 *ppos
= pos
+ retval
;
238 gfs2_glock_dq_m(1, &gh
);
239 gfs2_holder_uninit(&gh
);
246 for (seg
= 0; seg
< nr_segs
; seg
++) {
247 read_descriptor_t desc
;
250 desc
.arg
.buf
= iov
[seg
].iov_base
;
251 desc
.count
= iov
[seg
].iov_len
;
255 do_generic_file_read(filp
,ppos
,&desc
,file_read_actor
);
256 retval
+= desc
.written
;
258 retval
= retval
?: desc
.error
;
268 * gfs2_read - Read bytes from a file
269 * @file: The file to read from
270 * @buf: The buffer to copy into
271 * @size: The amount of data requested
272 * @offset: The current file offset
274 * Outputs: Offset - updated according to number of bytes read
276 * Returns: The number of bytes read, errno on failure
279 static ssize_t
gfs2_read(struct file
*filp
, char __user
*buf
, size_t size
,
282 struct iovec local_iov
= { .iov_base
= buf
, .iov_len
= size
};
286 init_sync_kiocb(&kiocb
, filp
);
287 ret
= __gfs2_file_aio_read(&kiocb
, &local_iov
, 1, offset
);
288 if (-EIOCBQUEUED
== ret
)
289 ret
= wait_on_sync_kiocb(&kiocb
);
293 static ssize_t
gfs2_file_readv(struct file
*filp
, const struct iovec
*iov
,
294 unsigned long nr_segs
, loff_t
*ppos
)
299 init_sync_kiocb(&kiocb
, filp
);
300 ret
= __gfs2_file_aio_read(&kiocb
, iov
, nr_segs
, ppos
);
301 if (-EIOCBQUEUED
== ret
)
302 ret
= wait_on_sync_kiocb(&kiocb
);
306 static ssize_t
gfs2_file_aio_read(struct kiocb
*iocb
, char __user
*buf
,
307 size_t count
, loff_t pos
)
309 struct iovec local_iov
= { .iov_base
= buf
, .iov_len
= count
};
311 BUG_ON(iocb
->ki_pos
!= pos
);
312 return __gfs2_file_aio_read(iocb
, &local_iov
, 1, &iocb
->ki_pos
);
317 * filldir_reg_func - Report a directory entry to the caller of gfs2_dir_read()
318 * @opaque: opaque data used by the function
319 * @name: the name of the directory entry
320 * @length: the length of the name
321 * @offset: the entry's offset in the directory
322 * @inum: the inode number the entry points to
323 * @type: the type of inode the entry points to
325 * Returns: 0 on success, 1 if buffer full
328 static int filldir_reg_func(void *opaque
, const char *name
, unsigned int length
,
329 uint64_t offset
, struct gfs2_inum
*inum
,
332 struct filldir_reg
*fdr
= (struct filldir_reg
*)opaque
;
333 struct gfs2_sbd
*sdp
= fdr
->fdr_sbd
;
336 error
= fdr
->fdr_filldir(fdr
->fdr_opaque
, name
, length
, offset
,
337 inum
->no_formal_ino
, type
);
341 if (fdr
->fdr_prefetch
&& !(length
== 1 && *name
== '.')) {
342 gfs2_glock_prefetch_num(sdp
,
343 inum
->no_addr
, &gfs2_inode_glops
,
344 LM_ST_SHARED
, LM_FLAG_TRY
| LM_FLAG_ANY
);
345 gfs2_glock_prefetch_num(sdp
,
346 inum
->no_addr
, &gfs2_iopen_glops
,
347 LM_ST_SHARED
, LM_FLAG_TRY
);
354 * readdir_reg - Read directory entries from a directory
355 * @file: The directory to read from
356 * @dirent: Buffer for dirents
357 * @filldir: Function used to do the copying
362 static int readdir_reg(struct file
*file
, void *dirent
, filldir_t filldir
)
364 struct inode
*dir
= file
->f_mapping
->host
;
365 struct gfs2_inode
*dip
= dir
->u
.generic_ip
;
366 struct filldir_reg fdr
;
367 struct gfs2_holder d_gh
;
368 uint64_t offset
= file
->f_pos
;
371 fdr
.fdr_sbd
= dip
->i_sbd
;
372 fdr
.fdr_prefetch
= 1;
373 fdr
.fdr_filldir
= filldir
;
374 fdr
.fdr_opaque
= dirent
;
376 gfs2_holder_init(dip
->i_gl
, LM_ST_SHARED
, GL_ATIME
, &d_gh
);
377 error
= gfs2_glock_nq_atime(&d_gh
);
379 gfs2_holder_uninit(&d_gh
);
383 error
= gfs2_dir_read(dir
, &offset
, &fdr
, filldir_reg_func
);
385 gfs2_glock_dq_uninit(&d_gh
);
387 file
->f_pos
= offset
;
393 * filldir_bad_func - Report a directory entry to the caller of gfs2_dir_read()
394 * @opaque: opaque data used by the function
395 * @name: the name of the directory entry
396 * @length: the length of the name
397 * @offset: the entry's offset in the directory
398 * @inum: the inode number the entry points to
399 * @type: the type of inode the entry points to
401 * For supporting NFS.
403 * Returns: 0 on success, 1 if buffer full
406 static int filldir_bad_func(void *opaque
, const char *name
, unsigned int length
,
407 uint64_t offset
, struct gfs2_inum
*inum
,
410 struct filldir_bad
*fdb
= (struct filldir_bad
*)opaque
;
411 struct gfs2_sbd
*sdp
= fdb
->fdb_sbd
;
412 struct filldir_bad_entry
*fbe
;
414 if (fdb
->fdb_entry_off
== fdb
->fdb_entry_num
||
415 fdb
->fdb_name_off
+ length
> fdb
->fdb_name_size
)
418 fbe
= &fdb
->fdb_entry
[fdb
->fdb_entry_off
];
419 fbe
->fbe_name
= fdb
->fdb_name
+ fdb
->fdb_name_off
;
420 memcpy(fbe
->fbe_name
, name
, length
);
421 fbe
->fbe_length
= length
;
422 fbe
->fbe_offset
= offset
;
423 fbe
->fbe_inum
= *inum
;
424 fbe
->fbe_type
= type
;
426 fdb
->fdb_entry_off
++;
427 fdb
->fdb_name_off
+= length
;
429 if (!(length
== 1 && *name
== '.')) {
430 gfs2_glock_prefetch_num(sdp
,
431 inum
->no_addr
, &gfs2_inode_glops
,
432 LM_ST_SHARED
, LM_FLAG_TRY
| LM_FLAG_ANY
);
433 gfs2_glock_prefetch_num(sdp
,
434 inum
->no_addr
, &gfs2_iopen_glops
,
435 LM_ST_SHARED
, LM_FLAG_TRY
);
442 * readdir_bad - Read directory entries from a directory
443 * @file: The directory to read from
444 * @dirent: Buffer for dirents
445 * @filldir: Function used to do the copying
447 * For supporting NFS.
452 static int readdir_bad(struct file
*file
, void *dirent
, filldir_t filldir
)
454 struct inode
*dir
= file
->f_mapping
->host
;
455 struct gfs2_inode
*dip
= dir
->u
.generic_ip
;
456 struct gfs2_sbd
*sdp
= dip
->i_sbd
;
457 struct filldir_reg fdr
;
458 unsigned int entries
, size
;
459 struct filldir_bad
*fdb
;
460 struct gfs2_holder d_gh
;
461 uint64_t offset
= file
->f_pos
;
463 struct filldir_bad_entry
*fbe
;
466 entries
= gfs2_tune_get(sdp
, gt_entries_per_readdir
);
467 size
= sizeof(struct filldir_bad
) +
468 entries
* (sizeof(struct filldir_bad_entry
) + GFS2_FAST_NAME_SIZE
);
470 fdb
= kzalloc(size
, GFP_KERNEL
);
475 fdb
->fdb_entry
= (struct filldir_bad_entry
*)(fdb
+ 1);
476 fdb
->fdb_entry_num
= entries
;
477 fdb
->fdb_name
= ((char *)fdb
) + sizeof(struct filldir_bad
) +
478 entries
* sizeof(struct filldir_bad_entry
);
479 fdb
->fdb_name_size
= entries
* GFS2_FAST_NAME_SIZE
;
481 gfs2_holder_init(dip
->i_gl
, LM_ST_SHARED
, GL_ATIME
, &d_gh
);
482 error
= gfs2_glock_nq_atime(&d_gh
);
484 gfs2_holder_uninit(&d_gh
);
488 error
= gfs2_dir_read(dir
, &offset
, fdb
, filldir_bad_func
);
490 gfs2_glock_dq_uninit(&d_gh
);
493 fdr
.fdr_prefetch
= 0;
494 fdr
.fdr_filldir
= filldir
;
495 fdr
.fdr_opaque
= dirent
;
497 for (x
= 0; x
< fdb
->fdb_entry_off
; x
++) {
498 fbe
= &fdb
->fdb_entry
[x
];
500 error
= filldir_reg_func(&fdr
,
501 fbe
->fbe_name
, fbe
->fbe_length
,
503 &fbe
->fbe_inum
, fbe
->fbe_type
);
505 file
->f_pos
= fbe
->fbe_offset
;
511 file
->f_pos
= offset
;
520 * gfs2_readdir - Read directory entries from a directory
521 * @file: The directory to read from
522 * @dirent: Buffer for dirents
523 * @filldir: Function used to do the copying
528 static int gfs2_readdir(struct file
*file
, void *dirent
, filldir_t filldir
)
532 if (strcmp(current
->comm
, "nfsd") != 0)
533 error
= readdir_reg(file
, dirent
, filldir
);
535 error
= readdir_bad(file
, dirent
, filldir
);
540 static const u32 iflags_to_gfs2
[32] = {
541 [iflag_Sync
] = GFS2_DIF_SYNC
,
542 [iflag_Immutable
] = GFS2_DIF_IMMUTABLE
,
543 [iflag_Append
] = GFS2_DIF_APPENDONLY
,
544 [iflag_NoAtime
] = GFS2_DIF_NOATIME
,
545 [iflag_Index
] = GFS2_DIF_EXHASH
,
546 [iflag_JournalData
] = GFS2_DIF_JDATA
,
547 [iflag_DirectIO
] = GFS2_DIF_DIRECTIO
,
548 [iflag_InheritDirectIO
] = GFS2_DIF_INHERIT_DIRECTIO
,
549 [iflag_InheritJdata
] = GFS2_DIF_INHERIT_JDATA
,
552 static const u32 gfs2_to_iflags
[32] = {
553 [gfs2fl_Sync
] = IFLAG_SYNC
,
554 [gfs2fl_Immutable
] = IFLAG_IMMUTABLE
,
555 [gfs2fl_AppendOnly
] = IFLAG_APPEND
,
556 [gfs2fl_NoAtime
] = IFLAG_NOATIME
,
557 [gfs2fl_ExHash
] = IFLAG_INDEX
,
558 [gfs2fl_Jdata
] = IFLAG_JOURNAL_DATA
,
559 [gfs2fl_Directio
] = IFLAG_DIRECTIO
,
560 [gfs2fl_InheritDirectio
] = IFLAG_INHERITDIRECTIO
,
561 [gfs2fl_InheritJdata
] = IFLAG_INHERITJDATA
,
564 static int gfs2_get_flags(struct inode
*inode
, u32 __user
*ptr
)
566 struct gfs2_inode
*ip
= inode
->u
.generic_ip
;
567 struct gfs2_holder gh
;
571 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, GL_ATIME
, &gh
);
572 error
= gfs2_glock_nq_m_atime(1, &gh
);
576 iflags
= iflags_cvt(gfs2_to_iflags
, ip
->i_di
.di_flags
);
577 if (put_user(iflags
, ptr
))
580 gfs2_glock_dq_m(1, &gh
);
581 gfs2_holder_uninit(&gh
);
585 /* Flags that can be set by user space */
586 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
588 GFS2_DIF_IMMUTABLE| \
589 GFS2_DIF_APPENDONLY| \
593 GFS2_DIF_INHERIT_DIRECTIO| \
594 GFS2_DIF_INHERIT_JDATA)
597 * gfs2_set_flags - set flags on an inode
599 * @flags: The flags to set
600 * @mask: Indicates which flags are valid
603 static int do_gfs2_set_flags(struct inode
*inode
, u32 flags
, u32 mask
)
605 struct gfs2_inode
*ip
= inode
->u
.generic_ip
;
606 struct buffer_head
*bh
;
607 struct gfs2_holder gh
;
611 gfs2_holder_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
612 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
616 new_flags
= (ip
->i_di
.di_flags
& ~mask
) | (flags
& mask
);
617 if ((new_flags
^ flags
) == 0)
621 if ((new_flags
^ flags
) & ~GFS2_FLAGS_USER_SET
)
624 if (S_ISDIR(inode
->i_mode
)) {
625 if ((new_flags
^ flags
) & (GFS2_DIF_JDATA
| GFS2_DIF_DIRECTIO
))
627 } else if (S_ISREG(inode
->i_mode
)) {
628 if ((new_flags
^ flags
) & (GFS2_DIF_INHERIT_DIRECTIO
|
629 GFS2_DIF_INHERIT_JDATA
))
635 if (IS_IMMUTABLE(inode
) && (new_flags
& GFS2_DIF_IMMUTABLE
))
637 if (IS_APPEND(inode
) && (new_flags
& GFS2_DIF_APPENDONLY
))
639 error
= gfs2_repermission(inode
, MAY_WRITE
, NULL
);
643 error
= gfs2_meta_inode_buffer(ip
, &bh
);
646 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
647 ip
->i_di
.di_flags
= new_flags
;
648 gfs2_dinode_out(&ip
->i_di
, bh
->b_data
);
651 gfs2_glock_dq_uninit(&gh
);
655 static int gfs2_set_flags(struct inode
*inode
, u32 __user
*ptr
)
657 u32 iflags
, gfsflags
;
658 if (get_user(iflags
, ptr
))
660 gfsflags
= iflags_cvt(iflags_to_gfs2
, iflags
);
661 return do_gfs2_set_flags(inode
, gfsflags
, ~0);
664 int gfs2_ioctl(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
669 return gfs2_get_flags(inode
, (u32 __user
*)arg
);
671 return gfs2_set_flags(inode
, (u32 __user
*)arg
);
679 * @file: The file to map
680 * @vma: The VMA which described the mapping
682 * Returns: 0 or error code
685 static int gfs2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
687 struct gfs2_inode
*ip
= file
->f_mapping
->host
->u
.generic_ip
;
688 struct gfs2_holder i_gh
;
691 gfs2_holder_init(ip
->i_gl
, LM_ST_SHARED
, GL_ATIME
, &i_gh
);
692 error
= gfs2_glock_nq_atime(&i_gh
);
694 gfs2_holder_uninit(&i_gh
);
698 /* This is VM_MAYWRITE instead of VM_WRITE because a call
699 to mprotect() can turn on VM_WRITE later. */
701 if ((vma
->vm_flags
& (VM_MAYSHARE
| VM_MAYWRITE
)) ==
702 (VM_MAYSHARE
| VM_MAYWRITE
))
703 vma
->vm_ops
= &gfs2_vm_ops_sharewrite
;
705 vma
->vm_ops
= &gfs2_vm_ops_private
;
707 gfs2_glock_dq_uninit(&i_gh
);
713 * gfs2_open - open a file
714 * @inode: the inode to open
715 * @file: the struct file for this opening
720 static int gfs2_open(struct inode
*inode
, struct file
*file
)
722 struct gfs2_inode
*ip
= inode
->u
.generic_ip
;
723 struct gfs2_holder i_gh
;
724 struct gfs2_file
*fp
;
727 fp
= kzalloc(sizeof(struct gfs2_file
), GFP_KERNEL
);
731 mutex_init(&fp
->f_fl_mutex
);
736 gfs2_assert_warn(ip
->i_sbd
, !file
->private_data
);
737 file
->private_data
= fp
;
739 if (S_ISREG(ip
->i_di
.di_mode
)) {
740 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, LM_FLAG_ANY
,
745 if (!(file
->f_flags
& O_LARGEFILE
) &&
746 ip
->i_di
.di_size
> MAX_NON_LFS
) {
751 /* Listen to the Direct I/O flag */
753 if (ip
->i_di
.di_flags
& GFS2_DIF_DIRECTIO
)
754 file
->f_flags
|= O_DIRECT
;
756 gfs2_glock_dq_uninit(&i_gh
);
762 gfs2_glock_dq_uninit(&i_gh
);
765 file
->private_data
= NULL
;
772 * gfs2_close - called to close a struct file
773 * @inode: the inode the struct file belongs to
774 * @file: the struct file being closed
779 static int gfs2_close(struct inode
*inode
, struct file
*file
)
781 struct gfs2_sbd
*sdp
= inode
->i_sb
->s_fs_info
;
782 struct gfs2_file
*fp
;
784 fp
= file
->private_data
;
785 file
->private_data
= NULL
;
787 if (gfs2_assert_warn(sdp
, fp
))
796 * gfs2_fsync - sync the dirty data for a file (across the cluster)
797 * @file: the file that points to the dentry (we ignore this)
798 * @dentry: the dentry that points to the inode to sync
803 static int gfs2_fsync(struct file
*file
, struct dentry
*dentry
, int datasync
)
805 struct gfs2_inode
*ip
= dentry
->d_inode
->u
.generic_ip
;
807 gfs2_log_flush_glock(ip
->i_gl
);
813 * gfs2_lock - acquire/release a posix lock on a file
814 * @file: the file pointer
815 * @cmd: either modify or retrieve lock state, possibly wait
816 * @fl: type and range of lock
821 static int gfs2_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
823 struct gfs2_inode
*ip
= file
->f_mapping
->host
->u
.generic_ip
;
824 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
825 struct lm_lockname name
=
826 { .ln_number
= ip
->i_num
.no_addr
,
827 .ln_type
= LM_TYPE_PLOCK
};
829 if (!(fl
->fl_flags
& FL_POSIX
))
831 if ((ip
->i_di
.di_mode
& (S_ISGID
| S_IXGRP
)) == S_ISGID
)
834 if (sdp
->sd_args
.ar_localflocks
) {
836 struct file_lock
*tmp
;
838 tmp
= posix_test_lock(file
, fl
);
839 fl
->fl_type
= F_UNLCK
;
841 memcpy(fl
, tmp
, sizeof(struct file_lock
));
847 error
= posix_lock_file_wait(file
, fl
);
854 return gfs2_lm_plock_get(sdp
, &name
, file
, fl
);
855 else if (fl
->fl_type
== F_UNLCK
)
856 return gfs2_lm_punlock(sdp
, &name
, file
, fl
);
858 return gfs2_lm_plock(sdp
, &name
, file
, cmd
, fl
);
862 * gfs2_sendfile - Send bytes to a file or socket
863 * @in_file: The file to read from
864 * @out_file: The file to write to
865 * @count: The amount of data
866 * @offset: The beginning file offset
868 * Outputs: offset - updated according to number of bytes read
870 * Returns: The number of bytes sent, errno on failure
873 static ssize_t
gfs2_sendfile(struct file
*in_file
, loff_t
*offset
, size_t count
,
874 read_actor_t actor
, void *target
)
876 return generic_file_sendfile(in_file
, offset
, count
, actor
, target
);
879 static int do_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
881 struct gfs2_file
*fp
= file
->private_data
;
882 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
883 struct gfs2_inode
*ip
= fp
->f_inode
;
884 struct gfs2_glock
*gl
;
889 state
= (fl
->fl_type
== F_WRLCK
) ? LM_ST_EXCLUSIVE
: LM_ST_SHARED
;
890 flags
= ((IS_SETLKW(cmd
)) ? 0 : LM_FLAG_TRY
) | GL_EXACT
| GL_NOCACHE
;
892 mutex_lock(&fp
->f_fl_mutex
);
896 if (fl_gh
->gh_state
== state
)
899 flock_lock_file_wait(file
,
900 &(struct file_lock
){.fl_type
= F_UNLCK
});
901 gfs2_glock_dq_uninit(fl_gh
);
903 error
= gfs2_glock_get(ip
->i_sbd
,
904 ip
->i_num
.no_addr
, &gfs2_flock_glops
,
910 gfs2_holder_init(gl
, state
, flags
, fl_gh
);
913 error
= gfs2_glock_nq(fl_gh
);
915 gfs2_holder_uninit(fl_gh
);
916 if (error
== GLR_TRYFAILED
)
919 error
= flock_lock_file_wait(file
, fl
);
920 gfs2_assert_warn(ip
->i_sbd
, !error
);
924 mutex_unlock(&fp
->f_fl_mutex
);
929 static void do_unflock(struct file
*file
, struct file_lock
*fl
)
931 struct gfs2_file
*fp
= file
->private_data
;
932 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
934 mutex_lock(&fp
->f_fl_mutex
);
935 flock_lock_file_wait(file
, fl
);
937 gfs2_glock_dq_uninit(fl_gh
);
938 mutex_unlock(&fp
->f_fl_mutex
);
942 * gfs2_flock - acquire/release a flock lock on a file
943 * @file: the file pointer
944 * @cmd: either modify or retrieve lock state, possibly wait
945 * @fl: type and range of lock
950 static int gfs2_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
952 struct gfs2_inode
*ip
= file
->f_mapping
->host
->u
.generic_ip
;
953 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
955 if (!(fl
->fl_flags
& FL_FLOCK
))
957 if ((ip
->i_di
.di_mode
& (S_ISGID
| S_IXGRP
)) == S_ISGID
)
960 if (sdp
->sd_args
.ar_localflocks
)
961 return flock_lock_file_wait(file
, fl
);
963 if (fl
->fl_type
== F_UNLCK
) {
964 do_unflock(file
, fl
);
967 return do_flock(file
, cmd
, fl
);
970 struct file_operations gfs2_file_fops
= {
971 .llseek
= gfs2_llseek
,
973 .readv
= gfs2_file_readv
,
974 .aio_read
= gfs2_file_aio_read
,
975 .write
= generic_file_write
,
976 .writev
= generic_file_writev
,
977 .aio_write
= generic_file_aio_write
,
981 .release
= gfs2_close
,
984 .sendfile
= gfs2_sendfile
,
988 struct file_operations gfs2_dir_fops
= {
989 .readdir
= gfs2_readdir
,
992 .release
= gfs2_close
,