CIFS: directio read/write cleanups
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
608712fe 63static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 64{
608712fe 65 u32 posix_flags = 0;
e10f7b55 66
7fc8f4e9 67 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 68 posix_flags = SMB_O_RDONLY;
7fc8f4e9 69 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
73
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 81 if (flags & O_DSYNC)
608712fe 82 posix_flags |= SMB_O_SYNC;
7fc8f4e9 83 if (flags & O_DIRECTORY)
608712fe 84 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 85 if (flags & O_NOFOLLOW)
608712fe 86 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 87 if (flags & O_DIRECT)
608712fe 88 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
89
90 return posix_flags;
1da177e4
LT
91}
92
93static inline int cifs_get_disposition(unsigned int flags)
94{
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
55aa2e09
SF
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
1da177e4
LT
103 else
104 return FILE_OPEN;
105}
106
608712fe
JL
107int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
110{
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
117 struct cifsTconInfo *tcon;
118
119 cFYI(1, "posix open %s", full_path);
120
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
124
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
129 }
130
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
133
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
140
141 if (rc)
142 goto posix_open_ret;
143
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
146
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
149
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
151
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
159 }
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
162 }
163
164posix_open_ret:
165 kfree(presp_data);
166 return rc;
167}
168
eeb910a6
PS
169static int
170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
172 __u16 *pnetfid, int xid)
173{
174 int rc;
175 int desiredAccess;
176 int disposition;
177 FILE_ALL_INFO *buf;
178
179 desiredAccess = cifs_convert_flags(f_flags);
180
181/*********************************************************************
182 * open flag mapping table:
183 *
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
191 *
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
197 *?
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
204
205 disposition = cifs_get_disposition(f_flags);
206
207 /* BB pass O_SYNC flag through on file attributes .. BB */
208
209 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 if (!buf)
211 return -ENOMEM;
212
213 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else
219 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223
224 if (rc)
225 goto out;
226
227 if (tcon->unix_ext)
228 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 xid);
230 else
231 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 xid, pnetfid);
233
234out:
235 kfree(buf);
236 return rc;
237}
238
15ecb436
JL
239struct cifsFileInfo *
240cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 struct tcon_link *tlink, __u32 oplock)
242{
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 struct cifsFileInfo *pCifsFile;
247
248 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 if (pCifsFile == NULL)
250 return pCifsFile;
251
5f6dbc9e 252 pCifsFile->count = 1;
15ecb436
JL
253 pCifsFile->netfid = fileHandle;
254 pCifsFile->pid = current->tgid;
255 pCifsFile->uid = current_fsuid();
256 pCifsFile->dentry = dget(dentry);
257 pCifsFile->f_flags = file->f_flags;
258 pCifsFile->invalidHandle = false;
15ecb436
JL
259 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
15ecb436
JL
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
264
4477288a 265 spin_lock(&cifs_file_list_lock);
15ecb436
JL
266 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 else
271 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 272 spin_unlock(&cifs_file_list_lock);
15ecb436 273
c6723628 274 cifs_set_oplock_level(pCifsInode, oplock);
15ecb436
JL
275
276 file->private_data = pCifsFile;
277 return pCifsFile;
278}
279
cdff08e7
SF
280/*
281 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
cdff08e7 284 */
b33879aa
JL
285void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
286{
e66673e3 287 struct inode *inode = cifs_file->dentry->d_inode;
cdff08e7 288 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
291 struct cifsLockInfo *li, *tmp;
292
293 spin_lock(&cifs_file_list_lock);
5f6dbc9e 294 if (--cifs_file->count > 0) {
cdff08e7
SF
295 spin_unlock(&cifs_file_list_lock);
296 return;
297 }
298
299 /* remove it from the lists */
300 list_del(&cifs_file->flist);
301 list_del(&cifs_file->tlist);
302
303 if (list_empty(&cifsi->openFileList)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file->dentry->d_inode);
4f8ba8a0
PS
306
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 CIFS_I(inode)->invalid_mapping = true;
312
c6723628 313 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
314 }
315 spin_unlock(&cifs_file_list_lock);
316
317 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
318 int xid, rc;
319
320 xid = GetXid();
321 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
322 FreeXid(xid);
323 }
324
325 /* Delete any outstanding lock records. We'll lose them when the file
326 * is closed anyway.
327 */
328 mutex_lock(&cifs_file->lock_mutex);
329 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
330 list_del(&li->llist);
331 kfree(li);
b33879aa 332 }
cdff08e7
SF
333 mutex_unlock(&cifs_file->lock_mutex);
334
335 cifs_put_tlink(cifs_file->tlink);
336 dput(cifs_file->dentry);
337 kfree(cifs_file);
b33879aa
JL
338}
339
1da177e4
LT
340int cifs_open(struct inode *inode, struct file *file)
341{
342 int rc = -EACCES;
590a3fe0
JL
343 int xid;
344 __u32 oplock;
1da177e4 345 struct cifs_sb_info *cifs_sb;
276a74a4 346 struct cifsTconInfo *tcon;
7ffec372 347 struct tcon_link *tlink;
6ca9f3ba 348 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 349 char *full_path = NULL;
7e12eddb 350 bool posix_open_ok = false;
1da177e4 351 __u16 netfid;
1da177e4
LT
352
353 xid = GetXid();
354
355 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
356 tlink = cifs_sb_tlink(cifs_sb);
357 if (IS_ERR(tlink)) {
358 FreeXid(xid);
359 return PTR_ERR(tlink);
360 }
361 tcon = tlink_tcon(tlink);
1da177e4 362
e6a00296 363 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 364 if (full_path == NULL) {
0f3bc09e 365 rc = -ENOMEM;
232341ba 366 goto out;
1da177e4
LT
367 }
368
b6b38f70
JP
369 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
370 inode, file->f_flags, full_path);
276a74a4
SF
371
372 if (oplockEnabled)
373 oplock = REQ_OPLOCK;
374 else
375 oplock = 0;
376
64cc2c63
SF
377 if (!tcon->broken_posix_open && tcon->unix_ext &&
378 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
379 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
380 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 381 /* can not refresh inode info since size could be stale */
2422f676 382 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 383 cifs_sb->mnt_file_mode /* ignored */,
608712fe 384 file->f_flags, &oplock, &netfid, xid);
276a74a4 385 if (rc == 0) {
b6b38f70 386 cFYI(1, "posix open succeeded");
7e12eddb 387 posix_open_ok = true;
64cc2c63
SF
388 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
389 if (tcon->ses->serverNOS)
b6b38f70 390 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
391 " unexpected error on SMB posix open"
392 ", disabling posix open support."
393 " Check if server update available.",
394 tcon->ses->serverName,
b6b38f70 395 tcon->ses->serverNOS);
64cc2c63 396 tcon->broken_posix_open = true;
276a74a4
SF
397 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
398 (rc != -EOPNOTSUPP)) /* path not found or net err */
399 goto out;
64cc2c63
SF
400 /* else fallthrough to retry open the old way on network i/o
401 or DFS errors */
276a74a4
SF
402 }
403
7e12eddb
PS
404 if (!posix_open_ok) {
405 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
406 file->f_flags, &oplock, &netfid, xid);
407 if (rc)
408 goto out;
409 }
47c78b7f 410
abfe1eed 411 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 412 if (pCifsFile == NULL) {
7e12eddb 413 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
414 rc = -ENOMEM;
415 goto out;
416 }
1da177e4 417
9451a9a5
SJ
418 cifs_fscache_set_inode_cookie(inode, file);
419
7e12eddb 420 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
421 /* time to set mode which we can not set earlier due to
422 problems creating new read-only files */
7e12eddb
PS
423 struct cifs_unix_set_info_args args = {
424 .mode = inode->i_mode,
425 .uid = NO_CHANGE_64,
426 .gid = NO_CHANGE_64,
427 .ctime = NO_CHANGE_64,
428 .atime = NO_CHANGE_64,
429 .mtime = NO_CHANGE_64,
430 .device = 0,
431 };
d44a9fe2
JL
432 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
433 pCifsFile->pid);
1da177e4
LT
434 }
435
436out:
1da177e4
LT
437 kfree(full_path);
438 FreeXid(xid);
7ffec372 439 cifs_put_tlink(tlink);
1da177e4
LT
440 return rc;
441}
442
0418726b 443/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
444/* to server was lost */
445static int cifs_relock_file(struct cifsFileInfo *cifsFile)
446{
447 int rc = 0;
448
449/* BB list all locks open on this file and relock */
450
451 return rc;
452}
453
15886177 454static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
455{
456 int rc = -EACCES;
590a3fe0
JL
457 int xid;
458 __u32 oplock;
1da177e4 459 struct cifs_sb_info *cifs_sb;
7fc8f4e9 460 struct cifsTconInfo *tcon;
1da177e4 461 struct cifsInodeInfo *pCifsInode;
fb8c4b14 462 struct inode *inode;
1da177e4
LT
463 char *full_path = NULL;
464 int desiredAccess;
465 int disposition = FILE_OPEN;
466 __u16 netfid;
467
1da177e4 468 xid = GetXid();
f0a71eb8 469 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 470 if (!pCifsFile->invalidHandle) {
f0a71eb8 471 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 472 rc = 0;
1da177e4 473 FreeXid(xid);
0f3bc09e 474 return rc;
1da177e4
LT
475 }
476
15886177 477 inode = pCifsFile->dentry->d_inode;
1da177e4 478 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 479 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 480
1da177e4
LT
481/* can not grab rename sem here because various ops, including
482 those that already have the rename sem can end up causing writepage
483 to get called and if the server was down that means we end up here,
484 and we can never tell if the caller already has the rename_sem */
15886177 485 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 486 if (full_path == NULL) {
3a9f462f 487 rc = -ENOMEM;
f0a71eb8 488 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 489 FreeXid(xid);
3a9f462f 490 return rc;
1da177e4
LT
491 }
492
b6b38f70 493 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 494 inode, pCifsFile->f_flags, full_path);
1da177e4
LT
495
496 if (oplockEnabled)
497 oplock = REQ_OPLOCK;
498 else
4b18f2a9 499 oplock = 0;
1da177e4 500
7fc8f4e9
SF
501 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
502 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
503 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
504
505 /*
506 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
507 * original open. Must mask them off for a reopen.
508 */
15886177
JL
509 unsigned int oflags = pCifsFile->f_flags &
510 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 511
2422f676 512 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
513 cifs_sb->mnt_file_mode /* ignored */,
514 oflags, &oplock, &netfid, xid);
7fc8f4e9 515 if (rc == 0) {
b6b38f70 516 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
517 goto reopen_success;
518 }
519 /* fallthrough to retry open the old way on errors, especially
520 in the reconnect path it is important to retry hard */
521 }
522
15886177 523 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 524
1da177e4 525 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
526 by SMBOpen and then calling get_inode_info with returned buf
527 since file might have write behind data that needs to be flushed
1da177e4
LT
528 and server version of file size can be stale. If we knew for sure
529 that inode was not dirty locally we could do this */
530
7fc8f4e9 531 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 532 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 533 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 534 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 535 if (rc) {
f0a71eb8 536 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
537 cFYI(1, "cifs_open returned 0x%x", rc);
538 cFYI(1, "oplock: %d", oplock);
15886177
JL
539 goto reopen_error_exit;
540 }
541
7fc8f4e9 542reopen_success:
15886177
JL
543 pCifsFile->netfid = netfid;
544 pCifsFile->invalidHandle = false;
545 mutex_unlock(&pCifsFile->fh_mutex);
546 pCifsInode = CIFS_I(inode);
547
548 if (can_flush) {
549 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 550 mapping_set_error(inode->i_mapping, rc);
15886177 551
15886177
JL
552 if (tcon->unix_ext)
553 rc = cifs_get_inode_info_unix(&inode,
554 full_path, inode->i_sb, xid);
555 else
556 rc = cifs_get_inode_info(&inode,
557 full_path, NULL, inode->i_sb,
558 xid, NULL);
559 } /* else we are writing out data to server already
560 and could deadlock if we tried to flush data, and
561 since we do not know if we have data that would
562 invalidate the current end of file on the server
563 we can not go to the server to get the new inod
564 info */
e66673e3 565
c6723628 566 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 567
15886177
JL
568 cifs_relock_file(pCifsFile);
569
570reopen_error_exit:
1da177e4
LT
571 kfree(full_path);
572 FreeXid(xid);
573 return rc;
574}
575
576int cifs_close(struct inode *inode, struct file *file)
577{
77970693
JL
578 if (file->private_data != NULL) {
579 cifsFileInfo_put(file->private_data);
580 file->private_data = NULL;
581 }
7ee1af76 582
cdff08e7
SF
583 /* return code from the ->release op is always ignored */
584 return 0;
1da177e4
LT
585}
586
587int cifs_closedir(struct inode *inode, struct file *file)
588{
589 int rc = 0;
590 int xid;
c21dfb69 591 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
592 char *ptmp;
593
b6b38f70 594 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
595
596 xid = GetXid();
597
598 if (pCFileStruct) {
13cfb733 599 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 600
b6b38f70 601 cFYI(1, "Freeing private data in close dir");
4477288a 602 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
603 if (!pCFileStruct->srch_inf.endOfSearch &&
604 !pCFileStruct->invalidHandle) {
605 pCFileStruct->invalidHandle = true;
4477288a 606 spin_unlock(&cifs_file_list_lock);
1da177e4 607 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
608 cFYI(1, "Closing uncompleted readdir with rc %d",
609 rc);
1da177e4
LT
610 /* not much we can do if it fails anyway, ignore rc */
611 rc = 0;
ddb4cbfc 612 } else
4477288a 613 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
614 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
615 if (ptmp) {
b6b38f70 616 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 617 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 618 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
619 cifs_small_buf_release(ptmp);
620 else
621 cifs_buf_release(ptmp);
1da177e4 622 }
13cfb733 623 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
624 kfree(file->private_data);
625 file->private_data = NULL;
626 }
627 /* BB can we lock the filestruct while this is going on? */
628 FreeXid(xid);
629 return rc;
630}
631
7ee1af76
JA
632static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
633 __u64 offset, __u8 lockType)
634{
fb8c4b14
SF
635 struct cifsLockInfo *li =
636 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
637 if (li == NULL)
638 return -ENOMEM;
639 li->offset = offset;
640 li->length = len;
641 li->type = lockType;
796e5661 642 mutex_lock(&fid->lock_mutex);
7ee1af76 643 list_add(&li->llist, &fid->llist);
796e5661 644 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
645 return 0;
646}
647
1da177e4
LT
648int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
649{
650 int rc, xid;
1da177e4
LT
651 __u32 numLock = 0;
652 __u32 numUnlock = 0;
653 __u64 length;
4b18f2a9 654 bool wait_flag = false;
1da177e4 655 struct cifs_sb_info *cifs_sb;
13a6e42a 656 struct cifsTconInfo *tcon;
08547b03
SF
657 __u16 netfid;
658 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 659 bool posix_locking = 0;
1da177e4
LT
660
661 length = 1 + pfLock->fl_end - pfLock->fl_start;
662 rc = -EACCES;
663 xid = GetXid();
664
b6b38f70 665 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 666 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 667 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 668 pfLock->fl_end);
1da177e4
LT
669
670 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 671 cFYI(1, "Posix");
1da177e4 672 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 673 cFYI(1, "Flock");
1da177e4 674 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 675 cFYI(1, "Blocking lock");
4b18f2a9 676 wait_flag = true;
1da177e4
LT
677 }
678 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
679 cFYI(1, "Process suspended by mandatory locking - "
680 "not implemented yet");
1da177e4 681 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 682 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 683 if (pfLock->fl_flags &
1da177e4 684 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 685 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
686
687 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 688 cFYI(1, "F_WRLCK ");
1da177e4
LT
689 numLock = 1;
690 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 691 cFYI(1, "F_UNLCK");
1da177e4 692 numUnlock = 1;
d47d7c1a
SF
693 /* Check if unlock includes more than
694 one lock range */
1da177e4 695 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 696 cFYI(1, "F_RDLCK");
1da177e4
LT
697 lockType |= LOCKING_ANDX_SHARED_LOCK;
698 numLock = 1;
699 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 700 cFYI(1, "F_EXLCK");
1da177e4
LT
701 numLock = 1;
702 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 703 cFYI(1, "F_SHLCK");
1da177e4
LT
704 lockType |= LOCKING_ANDX_SHARED_LOCK;
705 numLock = 1;
706 } else
b6b38f70 707 cFYI(1, "Unknown type of lock");
1da177e4 708
e6a00296 709 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 710 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
08547b03
SF
711 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
712
13a6e42a
SF
713 if ((tcon->ses->capabilities & CAP_UNIX) &&
714 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 715 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 716 posix_locking = 1;
08547b03
SF
717 /* BB add code here to normalize offset and length to
718 account for negative length which we can not accept over the
719 wire */
1da177e4 720 if (IS_GETLK(cmd)) {
fb8c4b14 721 if (posix_locking) {
08547b03 722 int posix_lock_type;
fb8c4b14 723 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
724 posix_lock_type = CIFS_RDLCK;
725 else
726 posix_lock_type = CIFS_WRLCK;
13a6e42a 727 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 728 length, pfLock,
08547b03
SF
729 posix_lock_type, wait_flag);
730 FreeXid(xid);
731 return rc;
732 }
733
734 /* BB we could chain these into one lock request BB */
13a6e42a 735 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
12fed00d 736 0, 1, lockType, 0 /* wait flag */, 0);
1da177e4 737 if (rc == 0) {
13a6e42a 738 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
739 pfLock->fl_start, 1 /* numUnlock */ ,
740 0 /* numLock */ , lockType,
12fed00d 741 0 /* wait flag */, 0);
1da177e4
LT
742 pfLock->fl_type = F_UNLCK;
743 if (rc != 0)
b6b38f70
JP
744 cERROR(1, "Error unlocking previously locked "
745 "range %d during test of lock", rc);
1da177e4
LT
746 rc = 0;
747
748 } else {
749 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
750 rc = 0;
751
752 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
753 pfLock->fl_type = F_WRLCK;
754 } else {
755 rc = CIFSSMBLock(xid, tcon, netfid, length,
756 pfLock->fl_start, 0, 1,
757 lockType | LOCKING_ANDX_SHARED_LOCK,
12fed00d 758 0 /* wait flag */, 0);
f05337c6
PS
759 if (rc == 0) {
760 rc = CIFSSMBLock(xid, tcon, netfid,
761 length, pfLock->fl_start, 1, 0,
762 lockType |
763 LOCKING_ANDX_SHARED_LOCK,
12fed00d 764 0 /* wait flag */, 0);
f05337c6
PS
765 pfLock->fl_type = F_RDLCK;
766 if (rc != 0)
f19159dc 767 cERROR(1, "Error unlocking "
f05337c6 768 "previously locked range %d "
f19159dc 769 "during test of lock", rc);
f05337c6
PS
770 rc = 0;
771 } else {
772 pfLock->fl_type = F_WRLCK;
773 rc = 0;
774 }
775 }
1da177e4
LT
776 }
777
778 FreeXid(xid);
779 return rc;
780 }
7ee1af76
JA
781
782 if (!numLock && !numUnlock) {
783 /* if no lock or unlock then nothing
784 to do since we do not know what it is */
785 FreeXid(xid);
786 return -EOPNOTSUPP;
787 }
788
789 if (posix_locking) {
08547b03 790 int posix_lock_type;
fb8c4b14 791 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
792 posix_lock_type = CIFS_RDLCK;
793 else
794 posix_lock_type = CIFS_WRLCK;
50c2f753 795
fb8c4b14 796 if (numUnlock == 1)
beb84dc8 797 posix_lock_type = CIFS_UNLCK;
7ee1af76 798
13a6e42a 799 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 800 length, pfLock,
08547b03 801 posix_lock_type, wait_flag);
7ee1af76 802 } else {
c21dfb69 803 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
804
805 if (numLock) {
13a6e42a 806 rc = CIFSSMBLock(xid, tcon, netfid, length,
12fed00d
PS
807 pfLock->fl_start, 0, numLock, lockType,
808 wait_flag, 0);
7ee1af76
JA
809
810 if (rc == 0) {
811 /* For Windows locks we must store them. */
812 rc = store_file_lock(fid, length,
813 pfLock->fl_start, lockType);
814 }
815 } else if (numUnlock) {
816 /* For each stored lock that this unlock overlaps
817 completely, unlock it. */
818 int stored_rc = 0;
819 struct cifsLockInfo *li, *tmp;
820
6b70c955 821 rc = 0;
796e5661 822 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
823 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
824 if (pfLock->fl_start <= li->offset &&
c19eb710 825 (pfLock->fl_start + length) >=
39db810c 826 (li->offset + li->length)) {
13a6e42a 827 stored_rc = CIFSSMBLock(xid, tcon,
12fed00d
PS
828 netfid, li->length,
829 li->offset, 1, 0,
830 li->type, false, 0);
7ee1af76
JA
831 if (stored_rc)
832 rc = stored_rc;
2c964d1f
PS
833 else {
834 list_del(&li->llist);
835 kfree(li);
836 }
7ee1af76
JA
837 }
838 }
796e5661 839 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
840 }
841 }
842
d634cc15 843 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
844 posix_lock_file_wait(file, pfLock);
845 FreeXid(xid);
846 return rc;
847}
848
fbec9ab9 849/* update the file size (if needed) after a write */
72432ffc 850void
fbec9ab9
JL
851cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
852 unsigned int bytes_written)
853{
854 loff_t end_of_write = offset + bytes_written;
855
856 if (end_of_write > cifsi->server_eof)
857 cifsi->server_eof = end_of_write;
858}
859
7da4b49a
JL
860static ssize_t cifs_write(struct cifsFileInfo *open_file,
861 const char *write_data, size_t write_size,
862 loff_t *poffset)
1da177e4
LT
863{
864 int rc = 0;
865 unsigned int bytes_written = 0;
866 unsigned int total_written;
867 struct cifs_sb_info *cifs_sb;
868 struct cifsTconInfo *pTcon;
7749981e 869 int xid;
7da4b49a
JL
870 struct dentry *dentry = open_file->dentry;
871 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1da177e4 872
7da4b49a 873 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 874
b6b38f70 875 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 876 *poffset, dentry->d_name.name);
1da177e4 877
13cfb733 878 pTcon = tlink_tcon(open_file->tlink);
50c2f753 879
1da177e4 880 xid = GetXid();
1da177e4 881
1da177e4
LT
882 for (total_written = 0; write_size > total_written;
883 total_written += bytes_written) {
884 rc = -EAGAIN;
885 while (rc == -EAGAIN) {
ca83ce3d
JL
886 struct kvec iov[2];
887 unsigned int len;
888
1da177e4 889 if (open_file->invalidHandle) {
1da177e4
LT
890 /* we could deadlock if we called
891 filemap_fdatawait from here so tell
fb8c4b14 892 reopen_file not to flush data to
1da177e4 893 server now */
15886177 894 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
895 if (rc != 0)
896 break;
897 }
ca83ce3d
JL
898
899 len = min((size_t)cifs_sb->wsize,
900 write_size - total_written);
901 /* iov[0] is reserved for smb header */
902 iov[1].iov_base = (char *)write_data + total_written;
903 iov[1].iov_len = len;
904 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
905 *poffset, &bytes_written, iov, 1, 0);
1da177e4
LT
906 }
907 if (rc || (bytes_written == 0)) {
908 if (total_written)
909 break;
910 else {
911 FreeXid(xid);
912 return rc;
913 }
fbec9ab9
JL
914 } else {
915 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 916 *poffset += bytes_written;
fbec9ab9 917 }
1da177e4
LT
918 }
919
a4544347 920 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 921
7da4b49a
JL
922 if (total_written > 0) {
923 spin_lock(&dentry->d_inode->i_lock);
924 if (*poffset > dentry->d_inode->i_size)
925 i_size_write(dentry->d_inode, *poffset);
926 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 927 }
7da4b49a 928 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
929 FreeXid(xid);
930 return total_written;
931}
932
6508d904
JL
933struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
934 bool fsuid_only)
630f3f0c
SF
935{
936 struct cifsFileInfo *open_file = NULL;
6508d904
JL
937 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
938
939 /* only filter by fsuid on multiuser mounts */
940 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
941 fsuid_only = false;
630f3f0c 942
4477288a 943 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
944 /* we could simply get the first_list_entry since write-only entries
945 are always at the end of the list but since the first entry might
946 have a close pending, we go through the whole list */
947 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
948 if (fsuid_only && open_file->uid != current_fsuid())
949 continue;
2e396b83 950 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
951 if (!open_file->invalidHandle) {
952 /* found a good file */
953 /* lock it so it will not be closed on us */
6ab409b5 954 cifsFileInfo_get(open_file);
4477288a 955 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
956 return open_file;
957 } /* else might as well continue, and look for
958 another, or simply have the caller reopen it
959 again rather than trying to fix this handle */
960 } else /* write only file */
961 break; /* write only files are last so must be done */
962 }
4477288a 963 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
964 return NULL;
965}
630f3f0c 966
6508d904
JL
967struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
968 bool fsuid_only)
6148a742
SF
969{
970 struct cifsFileInfo *open_file;
d3892294 971 struct cifs_sb_info *cifs_sb;
2846d386 972 bool any_available = false;
dd99cd80 973 int rc;
6148a742 974
60808233
SF
975 /* Having a null inode here (because mapping->host was set to zero by
976 the VFS or MM) should not happen but we had reports of on oops (due to
977 it being zero) during stress testcases so we need to check for it */
978
fb8c4b14 979 if (cifs_inode == NULL) {
b6b38f70 980 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
981 dump_stack();
982 return NULL;
983 }
984
d3892294
JL
985 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
986
6508d904
JL
987 /* only filter by fsuid on multiuser mounts */
988 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
989 fsuid_only = false;
990
4477288a 991 spin_lock(&cifs_file_list_lock);
9b22b0b7 992refind_writable:
6148a742 993 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
994 if (!any_available && open_file->pid != current->tgid)
995 continue;
996 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 997 continue;
2e396b83 998 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 999 cifsFileInfo_get(open_file);
9b22b0b7
SF
1000
1001 if (!open_file->invalidHandle) {
1002 /* found a good writable file */
4477288a 1003 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1004 return open_file;
1005 }
8840dee9 1006
4477288a 1007 spin_unlock(&cifs_file_list_lock);
cdff08e7 1008
9b22b0b7 1009 /* Had to unlock since following call can block */
15886177 1010 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1011 if (!rc)
1012 return open_file;
9b22b0b7 1013
cdff08e7 1014 /* if it fails, try another handle if possible */
b6b38f70 1015 cFYI(1, "wp failed on reopen file");
6ab409b5 1016 cifsFileInfo_put(open_file);
8840dee9 1017
cdff08e7
SF
1018 spin_lock(&cifs_file_list_lock);
1019
9b22b0b7
SF
1020 /* else we simply continue to the next entry. Thus
1021 we do not loop on reopen errors. If we
1022 can not reopen the file, for example if we
1023 reconnected to a server with another client
1024 racing to delete or lock the file we would not
1025 make progress if we restarted before the beginning
1026 of the loop here. */
6148a742
SF
1027 }
1028 }
2846d386
JL
1029 /* couldn't find useable FH with same pid, try any available */
1030 if (!any_available) {
1031 any_available = true;
1032 goto refind_writable;
1033 }
4477288a 1034 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1035 return NULL;
1036}
1037
1da177e4
LT
1038static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1039{
1040 struct address_space *mapping = page->mapping;
1041 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1042 char *write_data;
1043 int rc = -EFAULT;
1044 int bytes_written = 0;
1da177e4 1045 struct inode *inode;
6148a742 1046 struct cifsFileInfo *open_file;
1da177e4
LT
1047
1048 if (!mapping || !mapping->host)
1049 return -EFAULT;
1050
1051 inode = page->mapping->host;
1da177e4
LT
1052
1053 offset += (loff_t)from;
1054 write_data = kmap(page);
1055 write_data += from;
1056
1057 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1058 kunmap(page);
1059 return -EIO;
1060 }
1061
1062 /* racing with truncate? */
1063 if (offset > mapping->host->i_size) {
1064 kunmap(page);
1065 return 0; /* don't care */
1066 }
1067
1068 /* check to make sure that we are not extending the file */
1069 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1070 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1071
6508d904 1072 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1073 if (open_file) {
7da4b49a
JL
1074 bytes_written = cifs_write(open_file, write_data,
1075 to - from, &offset);
6ab409b5 1076 cifsFileInfo_put(open_file);
1da177e4 1077 /* Does mm or vfs already set times? */
6148a742 1078 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1079 if ((bytes_written > 0) && (offset))
6148a742 1080 rc = 0;
bb5a9a04
SF
1081 else if (bytes_written < 0)
1082 rc = bytes_written;
6148a742 1083 } else {
b6b38f70 1084 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1085 rc = -EIO;
1086 }
1087
1088 kunmap(page);
1089 return rc;
1090}
1091
1da177e4 1092static int cifs_writepages(struct address_space *mapping,
37c0eb46 1093 struct writeback_control *wbc)
1da177e4 1094{
37c0eb46
SF
1095 unsigned int bytes_to_write;
1096 unsigned int bytes_written;
1097 struct cifs_sb_info *cifs_sb;
1098 int done = 0;
111ebb6e 1099 pgoff_t end;
37c0eb46 1100 pgoff_t index;
fb8c4b14
SF
1101 int range_whole = 0;
1102 struct kvec *iov;
84d2f07e 1103 int len;
37c0eb46
SF
1104 int n_iov = 0;
1105 pgoff_t next;
1106 int nr_pages;
1107 __u64 offset = 0;
23e7dd7d 1108 struct cifsFileInfo *open_file;
ba00ba64 1109 struct cifsTconInfo *tcon;
fbec9ab9 1110 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1111 struct page *page;
1112 struct pagevec pvec;
1113 int rc = 0;
1114 int scanned = 0;
7749981e 1115 int xid;
1da177e4 1116
37c0eb46 1117 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1118
37c0eb46
SF
1119 /*
1120 * If wsize is smaller that the page cache size, default to writing
1121 * one page at a time via cifs_writepage
1122 */
1123 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1124 return generic_writepages(mapping, wbc);
1125
9a0c8230 1126 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1127 if (iov == NULL)
9a0c8230
SF
1128 return generic_writepages(mapping, wbc);
1129
37c0eb46 1130 /*
f3983c21
JL
1131 * if there's no open file, then this is likely to fail too,
1132 * but it'll at least handle the return. Maybe it should be
1133 * a BUG() instead?
37c0eb46 1134 */
6508d904 1135 open_file = find_writable_file(CIFS_I(mapping->host), false);
f3983c21 1136 if (!open_file) {
9a0c8230 1137 kfree(iov);
f3983c21
JL
1138 return generic_writepages(mapping, wbc);
1139 }
1140
13cfb733 1141 tcon = tlink_tcon(open_file->tlink);
f3983c21 1142 cifsFileInfo_put(open_file);
37c0eb46 1143
1da177e4
LT
1144 xid = GetXid();
1145
37c0eb46 1146 pagevec_init(&pvec, 0);
111ebb6e 1147 if (wbc->range_cyclic) {
37c0eb46 1148 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1149 end = -1;
1150 } else {
1151 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1152 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1153 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1154 range_whole = 1;
37c0eb46
SF
1155 scanned = 1;
1156 }
1157retry:
1158 while (!done && (index <= end) &&
1159 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1160 PAGECACHE_TAG_DIRTY,
1161 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1162 int first;
1163 unsigned int i;
1164
37c0eb46
SF
1165 first = -1;
1166 next = 0;
1167 n_iov = 0;
1168 bytes_to_write = 0;
1169
1170 for (i = 0; i < nr_pages; i++) {
1171 page = pvec.pages[i];
1172 /*
1173 * At this point we hold neither mapping->tree_lock nor
1174 * lock on the page itself: the page may be truncated or
1175 * invalidated (changing page->mapping to NULL), or even
1176 * swizzled back from swapper_space to tmpfs file
1177 * mapping
1178 */
1179
1180 if (first < 0)
1181 lock_page(page);
529ae9aa 1182 else if (!trylock_page(page))
37c0eb46
SF
1183 break;
1184
1185 if (unlikely(page->mapping != mapping)) {
1186 unlock_page(page);
1187 break;
1188 }
1189
111ebb6e 1190 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1191 done = 1;
1192 unlock_page(page);
1193 break;
1194 }
1195
1196 if (next && (page->index != next)) {
1197 /* Not next consecutive page */
1198 unlock_page(page);
1199 break;
1200 }
1201
1202 if (wbc->sync_mode != WB_SYNC_NONE)
1203 wait_on_page_writeback(page);
1204
1205 if (PageWriteback(page) ||
cb876f45 1206 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1207 unlock_page(page);
1208 break;
1209 }
84d2f07e 1210
cb876f45
LT
1211 /*
1212 * This actually clears the dirty bit in the radix tree.
1213 * See cifs_writepage() for more commentary.
1214 */
1215 set_page_writeback(page);
1216
84d2f07e
SF
1217 if (page_offset(page) >= mapping->host->i_size) {
1218 done = 1;
1219 unlock_page(page);
cb876f45 1220 end_page_writeback(page);
84d2f07e
SF
1221 break;
1222 }
1223
37c0eb46
SF
1224 /*
1225 * BB can we get rid of this? pages are held by pvec
1226 */
1227 page_cache_get(page);
1228
84d2f07e
SF
1229 len = min(mapping->host->i_size - page_offset(page),
1230 (loff_t)PAGE_CACHE_SIZE);
1231
37c0eb46
SF
1232 /* reserve iov[0] for the smb header */
1233 n_iov++;
1234 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1235 iov[n_iov].iov_len = len;
1236 bytes_to_write += len;
37c0eb46
SF
1237
1238 if (first < 0) {
1239 first = i;
1240 offset = page_offset(page);
1241 }
1242 next = page->index + 1;
1243 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1244 break;
1245 }
1246 if (n_iov) {
941b853d 1247retry_write:
6508d904
JL
1248 open_file = find_writable_file(CIFS_I(mapping->host),
1249 false);
23e7dd7d 1250 if (!open_file) {
b6b38f70 1251 cERROR(1, "No writable handles for inode");
23e7dd7d 1252 rc = -EBADF;
1047abc1 1253 } else {
f3983c21 1254 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
23e7dd7d
SF
1255 bytes_to_write, offset,
1256 &bytes_written, iov, n_iov,
7749981e 1257 0);
6ab409b5 1258 cifsFileInfo_put(open_file);
f3983c21 1259 }
fbec9ab9 1260
941b853d
JL
1261 cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
1262
1263 /*
1264 * For now, treat a short write as if nothing got
1265 * written. A zero length write however indicates
1266 * ENOSPC or EFBIG. We have no way to know which
1267 * though, so call it ENOSPC for now. EFBIG would
1268 * get translated to AS_EIO anyway.
1269 *
1270 * FIXME: make it take into account the data that did
1271 * get written
1272 */
1273 if (rc == 0) {
1274 if (bytes_written == 0)
1275 rc = -ENOSPC;
1276 else if (bytes_written < bytes_to_write)
1277 rc = -EAGAIN;
1278 }
1279
1280 /* retry on data-integrity flush */
1281 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
1282 goto retry_write;
1283
1284 /* fix the stats and EOF */
1285 if (bytes_written > 0) {
f3983c21 1286 cifs_stats_bytes_written(tcon, bytes_written);
941b853d 1287 cifs_update_eof(cifsi, offset, bytes_written);
37c0eb46 1288 }
f3983c21 1289
37c0eb46
SF
1290 for (i = 0; i < n_iov; i++) {
1291 page = pvec.pages[first + i];
941b853d
JL
1292 /* on retryable write error, redirty page */
1293 if (rc == -EAGAIN)
1294 redirty_page_for_writepage(wbc, page);
1295 else if (rc != 0)
eb9bdaa3 1296 SetPageError(page);
37c0eb46
SF
1297 kunmap(page);
1298 unlock_page(page);
cb876f45 1299 end_page_writeback(page);
37c0eb46
SF
1300 page_cache_release(page);
1301 }
941b853d
JL
1302
1303 if (rc != -EAGAIN)
1304 mapping_set_error(mapping, rc);
1305 else
1306 rc = 0;
1307
37c0eb46
SF
1308 if ((wbc->nr_to_write -= n_iov) <= 0)
1309 done = 1;
1310 index = next;
b066a48c
DK
1311 } else
1312 /* Need to re-find the pages we skipped */
1313 index = pvec.pages[0]->index + 1;
1314
37c0eb46
SF
1315 pagevec_release(&pvec);
1316 }
1317 if (!scanned && !done) {
1318 /*
1319 * We hit the last page and there is more work to be done: wrap
1320 * back to the start of the file
1321 */
1322 scanned = 1;
1323 index = 0;
1324 goto retry;
1325 }
111ebb6e 1326 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1327 mapping->writeback_index = index;
1328
1da177e4 1329 FreeXid(xid);
9a0c8230 1330 kfree(iov);
1da177e4
LT
1331 return rc;
1332}
1da177e4 1333
9ad1506b
PS
1334static int
1335cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1336{
9ad1506b 1337 int rc;
1da177e4
LT
1338 int xid;
1339
1340 xid = GetXid();
1341/* BB add check for wbc flags */
1342 page_cache_get(page);
ad7a2926 1343 if (!PageUptodate(page))
b6b38f70 1344 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1345
1346 /*
1347 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1348 *
1349 * A writepage() implementation always needs to do either this,
1350 * or re-dirty the page with "redirty_page_for_writepage()" in
1351 * the case of a failure.
1352 *
1353 * Just unlocking the page will cause the radix tree tag-bits
1354 * to fail to update with the state of the page correctly.
1355 */
fb8c4b14 1356 set_page_writeback(page);
9ad1506b 1357retry_write:
1da177e4 1358 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1359 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1360 goto retry_write;
1361 else if (rc == -EAGAIN)
1362 redirty_page_for_writepage(wbc, page);
1363 else if (rc != 0)
1364 SetPageError(page);
1365 else
1366 SetPageUptodate(page);
cb876f45
LT
1367 end_page_writeback(page);
1368 page_cache_release(page);
1da177e4
LT
1369 FreeXid(xid);
1370 return rc;
1371}
1372
9ad1506b
PS
1373static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1374{
1375 int rc = cifs_writepage_locked(page, wbc);
1376 unlock_page(page);
1377 return rc;
1378}
1379
d9414774
NP
1380static int cifs_write_end(struct file *file, struct address_space *mapping,
1381 loff_t pos, unsigned len, unsigned copied,
1382 struct page *page, void *fsdata)
1da177e4 1383{
d9414774
NP
1384 int rc;
1385 struct inode *inode = mapping->host;
1da177e4 1386
b6b38f70
JP
1387 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1388 page, pos, copied);
d9414774 1389
a98ee8c1
JL
1390 if (PageChecked(page)) {
1391 if (copied == len)
1392 SetPageUptodate(page);
1393 ClearPageChecked(page);
1394 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1395 SetPageUptodate(page);
ad7a2926 1396
1da177e4 1397 if (!PageUptodate(page)) {
d9414774
NP
1398 char *page_data;
1399 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1400 int xid;
1401
1402 xid = GetXid();
1da177e4
LT
1403 /* this is probably better than directly calling
1404 partialpage_write since in this function the file handle is
1405 known which we might as well leverage */
1406 /* BB check if anything else missing out of ppw
1407 such as updating last write time */
1408 page_data = kmap(page);
7da4b49a
JL
1409 rc = cifs_write(file->private_data, page_data + offset,
1410 copied, &pos);
d9414774 1411 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1412 kunmap(page);
d9414774
NP
1413
1414 FreeXid(xid);
fb8c4b14 1415 } else {
d9414774
NP
1416 rc = copied;
1417 pos += copied;
1da177e4
LT
1418 set_page_dirty(page);
1419 }
1420
d9414774
NP
1421 if (rc > 0) {
1422 spin_lock(&inode->i_lock);
1423 if (pos > inode->i_size)
1424 i_size_write(inode, pos);
1425 spin_unlock(&inode->i_lock);
1426 }
1427
1428 unlock_page(page);
1429 page_cache_release(page);
1430
1da177e4
LT
1431 return rc;
1432}
1433
8be7e6ba 1434int cifs_strict_fsync(struct file *file, int datasync)
1da177e4
LT
1435{
1436 int xid;
1437 int rc = 0;
b298f223 1438 struct cifsTconInfo *tcon;
c21dfb69 1439 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1440 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1441 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4
LT
1442
1443 xid = GetXid();
1444
b6b38f70 1445 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1446 file->f_path.dentry->d_name.name, datasync);
50c2f753 1447
8be7e6ba
PS
1448 if (!CIFS_I(inode)->clientCanCacheRead)
1449 cifs_invalidate_mapping(inode);
eb4b756b 1450
8be7e6ba
PS
1451 tcon = tlink_tcon(smbfile->tlink);
1452 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1453 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1454
1455 FreeXid(xid);
1456 return rc;
1457}
1458
1459int cifs_fsync(struct file *file, int datasync)
1460{
1461 int xid;
1462 int rc = 0;
1463 struct cifsTconInfo *tcon;
1464 struct cifsFileInfo *smbfile = file->private_data;
1465 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1466
1467 xid = GetXid();
1468
1469 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1470 file->f_path.dentry->d_name.name, datasync);
1471
1472 tcon = tlink_tcon(smbfile->tlink);
1473 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1474 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1475
1da177e4
LT
1476 FreeXid(xid);
1477 return rc;
1478}
1479
1da177e4
LT
1480/*
1481 * As file closes, flush all cached write data for this inode checking
1482 * for write behind errors.
1483 */
75e1fcc0 1484int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1485{
fb8c4b14 1486 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1487 int rc = 0;
1488
eb4b756b 1489 if (file->f_mode & FMODE_WRITE)
d3f1322a 1490 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1491
b6b38f70 1492 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1493
1494 return rc;
1495}
1496
72432ffc
PS
1497static int
1498cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1499{
1500 int rc = 0;
1501 unsigned long i;
1502
1503 for (i = 0; i < num_pages; i++) {
1504 pages[i] = alloc_page(__GFP_HIGHMEM);
1505 if (!pages[i]) {
1506 /*
1507 * save number of pages we have already allocated and
1508 * return with ENOMEM error
1509 */
1510 num_pages = i;
1511 rc = -ENOMEM;
1512 goto error;
1513 }
1514 }
1515
1516 return rc;
1517
1518error:
1519 for (i = 0; i < num_pages; i++)
1520 put_page(pages[i]);
1521 return rc;
1522}
1523
1524static inline
1525size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1526{
1527 size_t num_pages;
1528 size_t clen;
1529
1530 clen = min_t(const size_t, len, wsize);
1531 num_pages = clen / PAGE_CACHE_SIZE;
1532 if (clen % PAGE_CACHE_SIZE)
1533 num_pages++;
1534
1535 if (cur_len)
1536 *cur_len = clen;
1537
1538 return num_pages;
1539}
1540
1541static ssize_t
1542cifs_iovec_write(struct file *file, const struct iovec *iov,
1543 unsigned long nr_segs, loff_t *poffset)
1544{
76429c14
PS
1545 unsigned int written;
1546 unsigned long num_pages, npages, i;
1547 size_t copied, len, cur_len;
1548 ssize_t total_written = 0;
72432ffc
PS
1549 struct kvec *to_send;
1550 struct page **pages;
1551 struct iov_iter it;
1552 struct inode *inode;
1553 struct cifsFileInfo *open_file;
1554 struct cifsTconInfo *pTcon;
1555 struct cifs_sb_info *cifs_sb;
1556 int xid, rc;
1557
1558 len = iov_length(iov, nr_segs);
1559 if (!len)
1560 return 0;
1561
1562 rc = generic_write_checks(file, poffset, &len, 0);
1563 if (rc)
1564 return rc;
1565
1566 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1567 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1568
1569 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1570 if (!pages)
1571 return -ENOMEM;
1572
1573 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1574 if (!to_send) {
1575 kfree(pages);
1576 return -ENOMEM;
1577 }
1578
1579 rc = cifs_write_allocate_pages(pages, num_pages);
1580 if (rc) {
1581 kfree(pages);
1582 kfree(to_send);
1583 return rc;
1584 }
1585
1586 xid = GetXid();
1587 open_file = file->private_data;
1588 pTcon = tlink_tcon(open_file->tlink);
1589 inode = file->f_path.dentry->d_inode;
1590
1591 iov_iter_init(&it, iov, nr_segs, len, 0);
1592 npages = num_pages;
1593
1594 do {
1595 size_t save_len = cur_len;
1596 for (i = 0; i < npages; i++) {
1597 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1598 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1599 copied);
1600 cur_len -= copied;
1601 iov_iter_advance(&it, copied);
1602 to_send[i+1].iov_base = kmap(pages[i]);
1603 to_send[i+1].iov_len = copied;
1604 }
1605
1606 cur_len = save_len - cur_len;
1607
1608 do {
1609 if (open_file->invalidHandle) {
1610 rc = cifs_reopen_file(open_file, false);
1611 if (rc != 0)
1612 break;
1613 }
1614 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
1615 cur_len, *poffset, &written,
1616 to_send, npages, 0);
1617 } while (rc == -EAGAIN);
1618
1619 for (i = 0; i < npages; i++)
1620 kunmap(pages[i]);
1621
1622 if (written) {
1623 len -= written;
1624 total_written += written;
1625 cifs_update_eof(CIFS_I(inode), *poffset, written);
1626 *poffset += written;
1627 } else if (rc < 0) {
1628 if (!total_written)
1629 total_written = rc;
1630 break;
1631 }
1632
1633 /* get length and number of kvecs of the next write */
1634 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1635 } while (len > 0);
1636
1637 if (total_written > 0) {
1638 spin_lock(&inode->i_lock);
1639 if (*poffset > inode->i_size)
1640 i_size_write(inode, *poffset);
1641 spin_unlock(&inode->i_lock);
1642 }
1643
1644 cifs_stats_bytes_written(pTcon, total_written);
1645 mark_inode_dirty_sync(inode);
1646
1647 for (i = 0; i < num_pages; i++)
1648 put_page(pages[i]);
1649 kfree(to_send);
1650 kfree(pages);
1651 FreeXid(xid);
1652 return total_written;
1653}
1654
0b81c1c4 1655ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
1656 unsigned long nr_segs, loff_t pos)
1657{
1658 ssize_t written;
1659 struct inode *inode;
1660
1661 inode = iocb->ki_filp->f_path.dentry->d_inode;
1662
1663 /*
1664 * BB - optimize the way when signing is disabled. We can drop this
1665 * extra memory-to-memory copying and use iovec buffers for constructing
1666 * write request.
1667 */
1668
1669 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1670 if (written > 0) {
1671 CIFS_I(inode)->invalid_mapping = true;
1672 iocb->ki_pos = pos;
1673 }
1674
1675 return written;
1676}
1677
1678ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1679 unsigned long nr_segs, loff_t pos)
1680{
1681 struct inode *inode;
1682
1683 inode = iocb->ki_filp->f_path.dentry->d_inode;
1684
1685 if (CIFS_I(inode)->clientCanCacheAll)
1686 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1687
1688 /*
1689 * In strict cache mode we need to write the data to the server exactly
1690 * from the pos to pos+len-1 rather than flush all affected pages
1691 * because it may cause a error with mandatory locks on these pages but
1692 * not on the region from pos to ppos+len-1.
1693 */
1694
1695 return cifs_user_writev(iocb, iov, nr_segs, pos);
1696}
1697
a70307ee
PS
1698static ssize_t
1699cifs_iovec_read(struct file *file, const struct iovec *iov,
1700 unsigned long nr_segs, loff_t *poffset)
1da177e4 1701{
a70307ee
PS
1702 int rc;
1703 int xid;
76429c14
PS
1704 ssize_t total_read;
1705 unsigned int bytes_read = 0;
a70307ee
PS
1706 size_t len, cur_len;
1707 int iov_offset = 0;
1da177e4
LT
1708 struct cifs_sb_info *cifs_sb;
1709 struct cifsTconInfo *pTcon;
1da177e4 1710 struct cifsFileInfo *open_file;
1da177e4 1711 struct smb_com_read_rsp *pSMBr;
a70307ee
PS
1712 char *read_data;
1713
1714 if (!nr_segs)
1715 return 0;
1716
1717 len = iov_length(iov, nr_segs);
1718 if (!len)
1719 return 0;
1da177e4
LT
1720
1721 xid = GetXid();
e6a00296 1722 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1723
c21dfb69 1724 open_file = file->private_data;
13cfb733 1725 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1726
ad7a2926 1727 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1728 cFYI(1, "attempting read on write only file instance");
ad7a2926 1729
a70307ee
PS
1730 for (total_read = 0; total_read < len; total_read += bytes_read) {
1731 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1da177e4 1732 rc = -EAGAIN;
a70307ee
PS
1733 read_data = NULL;
1734
1da177e4 1735 while (rc == -EAGAIN) {
ec637e3f 1736 int buf_type = CIFS_NO_BUFFER;
cdff08e7 1737 if (open_file->invalidHandle) {
15886177 1738 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1739 if (rc != 0)
1740 break;
1741 }
a70307ee
PS
1742 rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
1743 cur_len, *poffset, &bytes_read,
1744 &read_data, &buf_type);
1745 pSMBr = (struct smb_com_read_rsp *)read_data;
1746 if (read_data) {
1747 char *data_offset = read_data + 4 +
1748 le16_to_cpu(pSMBr->DataOffset);
1749 if (memcpy_toiovecend(iov, data_offset,
1750 iov_offset, bytes_read))
93544cc6 1751 rc = -EFAULT;
fb8c4b14 1752 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 1753 cifs_small_buf_release(read_data);
fb8c4b14 1754 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
1755 cifs_buf_release(read_data);
1756 read_data = NULL;
1757 iov_offset += bytes_read;
1da177e4
LT
1758 }
1759 }
a70307ee 1760
1da177e4
LT
1761 if (rc || (bytes_read == 0)) {
1762 if (total_read) {
1763 break;
1764 } else {
1765 FreeXid(xid);
1766 return rc;
1767 }
1768 } else {
a4544347 1769 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1770 *poffset += bytes_read;
1771 }
1772 }
a70307ee 1773
1da177e4
LT
1774 FreeXid(xid);
1775 return total_read;
1776}
1777
0b81c1c4 1778ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
1779 unsigned long nr_segs, loff_t pos)
1780{
1781 ssize_t read;
1782
1783 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1784 if (read > 0)
1785 iocb->ki_pos = pos;
1786
1787 return read;
1788}
1789
1790ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1791 unsigned long nr_segs, loff_t pos)
1792{
1793 struct inode *inode;
1794
1795 inode = iocb->ki_filp->f_path.dentry->d_inode;
1796
1797 if (CIFS_I(inode)->clientCanCacheRead)
1798 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1799
1800 /*
1801 * In strict cache mode we need to read from the server all the time
1802 * if we don't have level II oplock because the server can delay mtime
1803 * change - so we can't make a decision about inode invalidating.
1804 * And we can also fail with pagereading if there are mandatory locks
1805 * on pages affected by this read but not on the region from pos to
1806 * pos+len-1.
1807 */
1808
1809 return cifs_user_readv(iocb, iov, nr_segs, pos);
1810}
1da177e4
LT
1811
1812static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 1813 loff_t *poffset)
1da177e4
LT
1814{
1815 int rc = -EACCES;
1816 unsigned int bytes_read = 0;
1817 unsigned int total_read;
1818 unsigned int current_read_size;
1819 struct cifs_sb_info *cifs_sb;
1820 struct cifsTconInfo *pTcon;
1821 int xid;
1822 char *current_offset;
1823 struct cifsFileInfo *open_file;
ec637e3f 1824 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1825
1826 xid = GetXid();
e6a00296 1827 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1828
1829 if (file->private_data == NULL) {
0f3bc09e 1830 rc = -EBADF;
1da177e4 1831 FreeXid(xid);
0f3bc09e 1832 return rc;
1da177e4 1833 }
c21dfb69 1834 open_file = file->private_data;
13cfb733 1835 pTcon = tlink_tcon(open_file->tlink);
1da177e4
LT
1836
1837 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1838 cFYI(1, "attempting read on write only file instance");
1da177e4 1839
fb8c4b14 1840 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1841 read_size > total_read;
1842 total_read += bytes_read, current_offset += bytes_read) {
1843 current_read_size = min_t(const int, read_size - total_read,
1844 cifs_sb->rsize);
f9f5c817
SF
1845 /* For windows me and 9x we do not want to request more
1846 than it negotiated since it will refuse the read then */
fb8c4b14 1847 if ((pTcon->ses) &&
f9f5c817
SF
1848 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1849 current_read_size = min_t(const int, current_read_size,
1850 pTcon->ses->server->maxBuf - 128);
1851 }
1da177e4
LT
1852 rc = -EAGAIN;
1853 while (rc == -EAGAIN) {
cdff08e7 1854 if (open_file->invalidHandle) {
15886177 1855 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1856 if (rc != 0)
1857 break;
1858 }
bfa0d75a 1859 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1860 open_file->netfid,
1861 current_read_size, *poffset,
1862 &bytes_read, &current_offset,
1863 &buf_type);
1da177e4
LT
1864 }
1865 if (rc || (bytes_read == 0)) {
1866 if (total_read) {
1867 break;
1868 } else {
1869 FreeXid(xid);
1870 return rc;
1871 }
1872 } else {
a4544347 1873 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1874 *poffset += bytes_read;
1875 }
1876 }
1877 FreeXid(xid);
1878 return total_read;
1879}
1880
ca83ce3d
JL
1881/*
1882 * If the page is mmap'ed into a process' page tables, then we need to make
1883 * sure that it doesn't change while being written back.
1884 */
1885static int
1886cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1887{
1888 struct page *page = vmf->page;
1889
1890 lock_page(page);
1891 return VM_FAULT_LOCKED;
1892}
1893
1894static struct vm_operations_struct cifs_file_vm_ops = {
1895 .fault = filemap_fault,
1896 .page_mkwrite = cifs_page_mkwrite,
1897};
1898
7a6a19b1
PS
1899int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1900{
1901 int rc, xid;
1902 struct inode *inode = file->f_path.dentry->d_inode;
1903
1904 xid = GetXid();
1905
1906 if (!CIFS_I(inode)->clientCanCacheRead)
1907 cifs_invalidate_mapping(inode);
1908
1909 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
1910 if (rc == 0)
1911 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
1912 FreeXid(xid);
1913 return rc;
1914}
1915
1da177e4
LT
1916int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1917{
1da177e4
LT
1918 int rc, xid;
1919
1920 xid = GetXid();
abab095d 1921 rc = cifs_revalidate_file(file);
1da177e4 1922 if (rc) {
b6b38f70 1923 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1924 FreeXid(xid);
1925 return rc;
1926 }
1927 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
1928 if (rc == 0)
1929 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
1930 FreeXid(xid);
1931 return rc;
1932}
1933
1934
fb8c4b14 1935static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1936 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1937{
1938 struct page *page;
1939 char *target;
1940
1941 while (bytes_read > 0) {
1942 if (list_empty(pages))
1943 break;
1944
1945 page = list_entry(pages->prev, struct page, lru);
1946 list_del(&page->lru);
1947
315e995c 1948 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1949 GFP_KERNEL)) {
1950 page_cache_release(page);
b6b38f70 1951 cFYI(1, "Add page cache failed");
3079ca62
SF
1952 data += PAGE_CACHE_SIZE;
1953 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1954 continue;
1955 }
06b43672 1956 page_cache_release(page);
1da177e4 1957
fb8c4b14 1958 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1959
1960 if (PAGE_CACHE_SIZE > bytes_read) {
1961 memcpy(target, data, bytes_read);
1962 /* zero the tail end of this partial page */
fb8c4b14 1963 memset(target + bytes_read, 0,
1da177e4
LT
1964 PAGE_CACHE_SIZE - bytes_read);
1965 bytes_read = 0;
1966 } else {
1967 memcpy(target, data, PAGE_CACHE_SIZE);
1968 bytes_read -= PAGE_CACHE_SIZE;
1969 }
1970 kunmap_atomic(target, KM_USER0);
1971
1972 flush_dcache_page(page);
1973 SetPageUptodate(page);
1974 unlock_page(page);
1da177e4 1975 data += PAGE_CACHE_SIZE;
9dc06558
SJ
1976
1977 /* add page to FS-Cache */
1978 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
1979 }
1980 return;
1981}
1982
1983static int cifs_readpages(struct file *file, struct address_space *mapping,
1984 struct list_head *page_list, unsigned num_pages)
1985{
1986 int rc = -EACCES;
1987 int xid;
1988 loff_t offset;
1989 struct page *page;
1990 struct cifs_sb_info *cifs_sb;
1991 struct cifsTconInfo *pTcon;
2c2130e1 1992 unsigned int bytes_read = 0;
fb8c4b14 1993 unsigned int read_size, i;
1da177e4
LT
1994 char *smb_read_data = NULL;
1995 struct smb_com_read_rsp *pSMBr;
1da177e4 1996 struct cifsFileInfo *open_file;
ec637e3f 1997 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1998
1999 xid = GetXid();
2000 if (file->private_data == NULL) {
0f3bc09e 2001 rc = -EBADF;
1da177e4 2002 FreeXid(xid);
0f3bc09e 2003 return rc;
1da177e4 2004 }
c21dfb69 2005 open_file = file->private_data;
e6a00296 2006 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 2007 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 2008
56698236
SJ
2009 /*
2010 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2011 * immediately if the cookie is negative
2012 */
2013 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2014 &num_pages);
2015 if (rc == 0)
2016 goto read_complete;
2017
f19159dc 2018 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
2019 for (i = 0; i < num_pages; ) {
2020 unsigned contig_pages;
2021 struct page *tmp_page;
2022 unsigned long expected_index;
2023
2024 if (list_empty(page_list))
2025 break;
2026
2027 page = list_entry(page_list->prev, struct page, lru);
2028 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2029
2030 /* count adjacent pages that we will read into */
2031 contig_pages = 0;
fb8c4b14 2032 expected_index =
1da177e4 2033 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2034 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2035 if (tmp_page->index == expected_index) {
2036 contig_pages++;
2037 expected_index++;
2038 } else
fb8c4b14 2039 break;
1da177e4
LT
2040 }
2041 if (contig_pages + i > num_pages)
2042 contig_pages = num_pages - i;
2043
2044 /* for reads over a certain size could initiate async
2045 read ahead */
2046
2047 read_size = contig_pages * PAGE_CACHE_SIZE;
2048 /* Read size needs to be in multiples of one page */
2049 read_size = min_t(const unsigned int, read_size,
2050 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2051 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2052 read_size, contig_pages);
1da177e4
LT
2053 rc = -EAGAIN;
2054 while (rc == -EAGAIN) {
cdff08e7 2055 if (open_file->invalidHandle) {
15886177 2056 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2057 if (rc != 0)
2058 break;
2059 }
2060
bfa0d75a 2061 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2062 open_file->netfid,
2063 read_size, offset,
2064 &bytes_read, &smb_read_data,
2065 &buf_type);
a9d02ad4 2066 /* BB more RC checks ? */
fb8c4b14 2067 if (rc == -EAGAIN) {
1da177e4 2068 if (smb_read_data) {
fb8c4b14 2069 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2070 cifs_small_buf_release(smb_read_data);
fb8c4b14 2071 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2072 cifs_buf_release(smb_read_data);
1da177e4
LT
2073 smb_read_data = NULL;
2074 }
2075 }
2076 }
2077 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2078 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2079 break;
2080 } else if (bytes_read > 0) {
6f88cc2e 2081 task_io_account_read(bytes_read);
1da177e4
LT
2082 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2083 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2084 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2085 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2086
2087 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2088 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2089 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2090 i++; /* account for partial page */
2091
fb8c4b14 2092 /* server copy of file can have smaller size
1da177e4 2093 than client */
fb8c4b14
SF
2094 /* BB do we need to verify this common case ?
2095 this case is ok - if we are at server EOF
1da177e4
LT
2096 we will hit it on next read */
2097
05ac9d4b 2098 /* break; */
1da177e4
LT
2099 }
2100 } else {
b6b38f70 2101 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2102 "Cleaning remaining pages from readahead list",
b6b38f70 2103 bytes_read, offset);
fb8c4b14 2104 /* BB turn off caching and do new lookup on
1da177e4 2105 file size at server? */
1da177e4
LT
2106 break;
2107 }
2108 if (smb_read_data) {
fb8c4b14 2109 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2110 cifs_small_buf_release(smb_read_data);
fb8c4b14 2111 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2112 cifs_buf_release(smb_read_data);
1da177e4
LT
2113 smb_read_data = NULL;
2114 }
2115 bytes_read = 0;
2116 }
2117
1da177e4
LT
2118/* need to free smb_read_data buf before exit */
2119 if (smb_read_data) {
fb8c4b14 2120 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2121 cifs_small_buf_release(smb_read_data);
fb8c4b14 2122 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2123 cifs_buf_release(smb_read_data);
1da177e4 2124 smb_read_data = NULL;
fb8c4b14 2125 }
1da177e4 2126
56698236 2127read_complete:
1da177e4
LT
2128 FreeXid(xid);
2129 return rc;
2130}
2131
2132static int cifs_readpage_worker(struct file *file, struct page *page,
2133 loff_t *poffset)
2134{
2135 char *read_data;
2136 int rc;
2137
56698236
SJ
2138 /* Is the page cached? */
2139 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2140 if (rc == 0)
2141 goto read_complete;
2142
1da177e4
LT
2143 page_cache_get(page);
2144 read_data = kmap(page);
2145 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2146
1da177e4 2147 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2148
1da177e4
LT
2149 if (rc < 0)
2150 goto io_error;
2151 else
b6b38f70 2152 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2153
e6a00296
JJS
2154 file->f_path.dentry->d_inode->i_atime =
2155 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2156
1da177e4
LT
2157 if (PAGE_CACHE_SIZE > rc)
2158 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2159
2160 flush_dcache_page(page);
2161 SetPageUptodate(page);
9dc06558
SJ
2162
2163 /* send this page to the cache */
2164 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2165
1da177e4 2166 rc = 0;
fb8c4b14 2167
1da177e4 2168io_error:
fb8c4b14 2169 kunmap(page);
1da177e4 2170 page_cache_release(page);
56698236
SJ
2171
2172read_complete:
1da177e4
LT
2173 return rc;
2174}
2175
2176static int cifs_readpage(struct file *file, struct page *page)
2177{
2178 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2179 int rc = -EACCES;
2180 int xid;
2181
2182 xid = GetXid();
2183
2184 if (file->private_data == NULL) {
0f3bc09e 2185 rc = -EBADF;
1da177e4 2186 FreeXid(xid);
0f3bc09e 2187 return rc;
1da177e4
LT
2188 }
2189
b6b38f70
JP
2190 cFYI(1, "readpage %p at offset %d 0x%x\n",
2191 page, (int)offset, (int)offset);
1da177e4
LT
2192
2193 rc = cifs_readpage_worker(file, page, &offset);
2194
2195 unlock_page(page);
2196
2197 FreeXid(xid);
2198 return rc;
2199}
2200
a403a0a3
SF
2201static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2202{
2203 struct cifsFileInfo *open_file;
2204
4477288a 2205 spin_lock(&cifs_file_list_lock);
a403a0a3 2206 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2207 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2208 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2209 return 1;
2210 }
2211 }
4477288a 2212 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2213 return 0;
2214}
2215
1da177e4
LT
2216/* We do not want to update the file size from server for inodes
2217 open for write - to avoid races with writepage extending
2218 the file - in the future we could consider allowing
fb8c4b14 2219 refreshing the inode only on increases in the file size
1da177e4
LT
2220 but this is tricky to do without racing with writebehind
2221 page caching in the current Linux kernel design */
4b18f2a9 2222bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2223{
a403a0a3 2224 if (!cifsInode)
4b18f2a9 2225 return true;
50c2f753 2226
a403a0a3
SF
2227 if (is_inode_writable(cifsInode)) {
2228 /* This inode is open for write at least once */
c32a0b68
SF
2229 struct cifs_sb_info *cifs_sb;
2230
c32a0b68 2231 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2233 /* since no page cache to corrupt on directio
c32a0b68 2234 we can change size safely */
4b18f2a9 2235 return true;
c32a0b68
SF
2236 }
2237
fb8c4b14 2238 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2239 return true;
7ba52631 2240
4b18f2a9 2241 return false;
23e7dd7d 2242 } else
4b18f2a9 2243 return true;
1da177e4
LT
2244}
2245
d9414774
NP
2246static int cifs_write_begin(struct file *file, struct address_space *mapping,
2247 loff_t pos, unsigned len, unsigned flags,
2248 struct page **pagep, void **fsdata)
1da177e4 2249{
d9414774
NP
2250 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2251 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2252 loff_t page_start = pos & PAGE_MASK;
2253 loff_t i_size;
2254 struct page *page;
2255 int rc = 0;
d9414774 2256
b6b38f70 2257 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2258
54566b2c 2259 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2260 if (!page) {
2261 rc = -ENOMEM;
2262 goto out;
2263 }
8a236264 2264
a98ee8c1
JL
2265 if (PageUptodate(page))
2266 goto out;
8a236264 2267
a98ee8c1
JL
2268 /*
2269 * If we write a full page it will be up to date, no need to read from
2270 * the server. If the write is short, we'll end up doing a sync write
2271 * instead.
2272 */
2273 if (len == PAGE_CACHE_SIZE)
2274 goto out;
8a236264 2275
a98ee8c1
JL
2276 /*
2277 * optimize away the read when we have an oplock, and we're not
2278 * expecting to use any of the data we'd be reading in. That
2279 * is, when the page lies beyond the EOF, or straddles the EOF
2280 * and the write will cover all of the existing data.
2281 */
2282 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2283 i_size = i_size_read(mapping->host);
2284 if (page_start >= i_size ||
2285 (offset == 0 && (pos + len) >= i_size)) {
2286 zero_user_segments(page, 0, offset,
2287 offset + len,
2288 PAGE_CACHE_SIZE);
2289 /*
2290 * PageChecked means that the parts of the page
2291 * to which we're not writing are considered up
2292 * to date. Once the data is copied to the
2293 * page, it can be set uptodate.
2294 */
2295 SetPageChecked(page);
2296 goto out;
2297 }
2298 }
d9414774 2299
a98ee8c1
JL
2300 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2301 /*
2302 * might as well read a page, it is fast enough. If we get
2303 * an error, we don't need to return it. cifs_write_end will
2304 * do a sync write instead since PG_uptodate isn't set.
2305 */
2306 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2307 } else {
2308 /* we could try using another file handle if there is one -
2309 but how would we lock it to prevent close of that handle
2310 racing with this read? In any case
d9414774 2311 this will be written out by write_end so is fine */
1da177e4 2312 }
a98ee8c1
JL
2313out:
2314 *pagep = page;
2315 return rc;
1da177e4
LT
2316}
2317
85f2d6b4
SJ
2318static int cifs_release_page(struct page *page, gfp_t gfp)
2319{
2320 if (PagePrivate(page))
2321 return 0;
2322
2323 return cifs_fscache_release_page(page, gfp);
2324}
2325
2326static void cifs_invalidate_page(struct page *page, unsigned long offset)
2327{
2328 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2329
2330 if (offset == 0)
2331 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2332}
2333
9ad1506b
PS
2334static int cifs_launder_page(struct page *page)
2335{
2336 int rc = 0;
2337 loff_t range_start = page_offset(page);
2338 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2339 struct writeback_control wbc = {
2340 .sync_mode = WB_SYNC_ALL,
2341 .nr_to_write = 0,
2342 .range_start = range_start,
2343 .range_end = range_end,
2344 };
2345
2346 cFYI(1, "Launder page: %p", page);
2347
2348 if (clear_page_dirty_for_io(page))
2349 rc = cifs_writepage_locked(page, &wbc);
2350
2351 cifs_fscache_invalidate_page(page, page->mapping->host);
2352 return rc;
2353}
2354
9b646972 2355void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2356{
2357 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2358 oplock_break);
a5e18bc3 2359 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2360 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2361 int rc = 0;
3bc303c2
JL
2362
2363 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2364 if (cinode->clientCanCacheRead)
8737c930 2365 break_lease(inode, O_RDONLY);
d54ff732 2366 else
8737c930 2367 break_lease(inode, O_WRONLY);
3bc303c2
JL
2368 rc = filemap_fdatawrite(inode->i_mapping);
2369 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2370 rc = filemap_fdatawait(inode->i_mapping);
2371 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2372 invalidate_remote_inode(inode);
2373 }
b6b38f70 2374 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2375 }
2376
2377 /*
2378 * releasing stale oplock after recent reconnect of smb session using
2379 * a now incorrect file handle is not a data integrity issue but do
2380 * not bother sending an oplock release if session to server still is
2381 * disconnected since oplock already released by the server
2382 */
cdff08e7 2383 if (!cfile->oplock_break_cancelled) {
13cfb733 2384 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
12fed00d
PS
2385 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2386 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2387 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2388 }
9b646972
TH
2389
2390 /*
2391 * We might have kicked in before is_valid_oplock_break()
2392 * finished grabbing reference for us. Make sure it's done by
6573e9b7 2393 * waiting for cifs_file_list_lock.
9b646972 2394 */
4477288a
JL
2395 spin_lock(&cifs_file_list_lock);
2396 spin_unlock(&cifs_file_list_lock);
9b646972
TH
2397
2398 cifs_oplock_break_put(cfile);
3bc303c2
JL
2399}
2400
5f6dbc9e 2401/* must be called while holding cifs_file_list_lock */
9b646972 2402void cifs_oplock_break_get(struct cifsFileInfo *cfile)
3bc303c2 2403{
d7c86ff8 2404 cifs_sb_active(cfile->dentry->d_sb);
3bc303c2 2405 cifsFileInfo_get(cfile);
3bc303c2
JL
2406}
2407
9b646972 2408void cifs_oplock_break_put(struct cifsFileInfo *cfile)
3bc303c2 2409{
ebe2e91e
JL
2410 struct super_block *sb = cfile->dentry->d_sb;
2411
3bc303c2 2412 cifsFileInfo_put(cfile);
ebe2e91e 2413 cifs_sb_deactive(sb);
3bc303c2
JL
2414}
2415
f5e54d6e 2416const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2417 .readpage = cifs_readpage,
2418 .readpages = cifs_readpages,
2419 .writepage = cifs_writepage,
37c0eb46 2420 .writepages = cifs_writepages,
d9414774
NP
2421 .write_begin = cifs_write_begin,
2422 .write_end = cifs_write_end,
1da177e4 2423 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2424 .releasepage = cifs_release_page,
2425 .invalidatepage = cifs_invalidate_page,
9ad1506b 2426 .launder_page = cifs_launder_page,
1da177e4 2427};
273d81d6
DK
2428
2429/*
2430 * cifs_readpages requires the server to support a buffer large enough to
2431 * contain the header plus one complete page of data. Otherwise, we need
2432 * to leave cifs_readpages out of the address space operations.
2433 */
f5e54d6e 2434const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2435 .readpage = cifs_readpage,
2436 .writepage = cifs_writepage,
2437 .writepages = cifs_writepages,
d9414774
NP
2438 .write_begin = cifs_write_begin,
2439 .write_end = cifs_write_end,
273d81d6 2440 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2441 .releasepage = cifs_release_page,
2442 .invalidatepage = cifs_invalidate_page,
9ad1506b 2443 .launder_page = cifs_launder_page,
273d81d6 2444};
This page took 0.520581 seconds and 5 git commands to generate.