CIFS: Separate pushing mandatory locks and lock_sem handling
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
174{
175 int rc;
fb1214e4 176 int desired_access;
eeb910a6 177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6 179 FILE_ALL_INFO *buf;
b8c32dbb 180 struct TCP_Server_Info *server = tcon->ses->server;
eeb910a6 181
b8c32dbb 182 if (!server->ops->open)
fb1214e4
PS
183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
3d3ea8e6
SP
219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
b8c32dbb
PS
222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
eeb910a6
PS
225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
fb1214e4 234 xid, &fid->netfid);
eeb910a6
PS
235
236out:
237 kfree(buf);
238 return rc;
239}
240
15ecb436 241struct cifsFileInfo *
fb1214e4 242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
4b4de76e
PS
247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
f45d3416 249 struct cifs_fid_locks *fdlocks;
233839b1 250 struct cifs_tcon *tcon = tlink_tcon(tlink);
4b4de76e
PS
251
252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
255
f45d3416
PS
256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
1b4b55a1 265 down_write(&cinode->lock_sem);
f45d3416 266 list_add(&fdlocks->llist, &cinode->llist);
1b4b55a1 267 up_write(&cinode->lock_sem);
f45d3416 268
4b4de76e 269 cfile->count = 1;
4b4de76e
PS
270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 277 mutex_init(&cfile->fh_mutex);
15ecb436 278
4477288a 279 spin_lock(&cifs_file_list_lock);
233839b1
PS
280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
15ecb436
JL
287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
4b4de76e 289 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 290 else
4b4de76e 291 list_add_tail(&cfile->flist, &cinode->openFileList);
4477288a 292 spin_unlock(&cifs_file_list_lock);
15ecb436 293
4b4de76e
PS
294 file->private_data = cfile;
295 return cfile;
15ecb436
JL
296}
297
764a1b1a
JL
298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
cdff08e7
SF
307/*
308 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
cdff08e7 311 */
b33879aa
JL
312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
e66673e3 314 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
233839b1 316 struct TCP_Server_Info *server = tcon->ses->server;
e66673e3 317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7 319 struct cifsLockInfo *li, *tmp;
233839b1
PS
320 struct cifs_fid fid;
321 struct cifs_pending_open open;
cdff08e7
SF
322
323 spin_lock(&cifs_file_list_lock);
5f6dbc9e 324 if (--cifs_file->count > 0) {
cdff08e7
SF
325 spin_unlock(&cifs_file_list_lock);
326 return;
327 }
328
233839b1
PS
329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
cdff08e7
SF
335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
25364138
PS
342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
4f8ba8a0
PS
347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
c6723628 349 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
350 }
351 spin_unlock(&cifs_file_list_lock);
352
ad635942
JL
353 cancel_work_sync(&cifs_file->oplock_break);
354
cdff08e7 355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 356 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 357 unsigned int xid;
0ff78a22 358
6d5786a3 359 xid = get_xid();
0ff78a22 360 if (server->ops->close)
760ad0ca
PS
361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
cdff08e7
SF
363 }
364
233839b1
PS
365 cifs_del_pending_open(&open);
366
f45d3416
PS
367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
369 * is closed anyway.
370 */
1b4b55a1 371 down_write(&cifsi->lock_sem);
f45d3416 372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 373 list_del(&li->llist);
85160e03 374 cifs_del_lock_waiters(li);
cdff08e7 375 kfree(li);
b33879aa 376 }
f45d3416
PS
377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
1b4b55a1 379 up_write(&cifsi->lock_sem);
cdff08e7
SF
380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
b33879aa
JL
384}
385
1da177e4 386int cifs_open(struct inode *inode, struct file *file)
233839b1 387
1da177e4
LT
388{
389 int rc = -EACCES;
6d5786a3 390 unsigned int xid;
590a3fe0 391 __u32 oplock;
1da177e4 392 struct cifs_sb_info *cifs_sb;
b8c32dbb 393 struct TCP_Server_Info *server;
96daf2b0 394 struct cifs_tcon *tcon;
7ffec372 395 struct tcon_link *tlink;
fb1214e4 396 struct cifsFileInfo *cfile = NULL;
1da177e4 397 char *full_path = NULL;
7e12eddb 398 bool posix_open_ok = false;
fb1214e4 399 struct cifs_fid fid;
233839b1 400 struct cifs_pending_open open;
1da177e4 401
6d5786a3 402 xid = get_xid();
1da177e4
LT
403
404 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
6d5786a3 407 free_xid(xid);
7ffec372
JL
408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
b8c32dbb 411 server = tcon->ses->server;
1da177e4 412
e6a00296 413 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 414 if (full_path == NULL) {
0f3bc09e 415 rc = -ENOMEM;
232341ba 416 goto out;
1da177e4
LT
417 }
418
b6b38f70
JP
419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
276a74a4 421
233839b1 422 if (server->oplocks)
276a74a4
SF
423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
64cc2c63 427 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 430 /* can not refresh inode info since size could be stale */
2422f676 431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 432 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 433 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 434 if (rc == 0) {
b6b38f70 435 cFYI(1, "posix open succeeded");
7e12eddb 436 posix_open_ok = true;
64cc2c63
SF
437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
b6b38f70 439 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
b6b38f70 444 tcon->ses->serverNOS);
64cc2c63 445 tcon->broken_posix_open = true;
276a74a4
SF
446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
fb1214e4
PS
449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
276a74a4
SF
453 }
454
233839b1
PS
455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
7e12eddb 460 if (!posix_open_ok) {
b8c32dbb
PS
461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
7e12eddb 464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 465 file->f_flags, &oplock, &fid, xid);
233839b1
PS
466 if (rc) {
467 cifs_del_pending_open(&open);
7e12eddb 468 goto out;
233839b1 469 }
7e12eddb 470 }
47c78b7f 471
fb1214e4
PS
472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
b8c32dbb
PS
474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
233839b1 476 cifs_del_pending_open(&open);
1da177e4
LT
477 rc = -ENOMEM;
478 goto out;
479 }
1da177e4 480
9451a9a5
SJ
481 cifs_fscache_set_inode_cookie(inode, file);
482
7e12eddb 483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
7e12eddb
PS
488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
490 .uid = NO_CHANGE_64,
491 .gid = NO_CHANGE_64,
492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
fb1214e4
PS
497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
1da177e4
LT
499 }
500
501out:
1da177e4 502 kfree(full_path);
6d5786a3 503 free_xid(xid);
7ffec372 504 cifs_put_tlink(tlink);
1da177e4
LT
505 return rc;
506}
507
2ae78ba8
PS
508/*
509 * Try to reacquire byte range locks that were released when session
510 * to server was lost
511 */
1da177e4
LT
512static int cifs_relock_file(struct cifsFileInfo *cifsFile)
513{
514 int rc = 0;
515
2ae78ba8 516 /* BB list all locks open on this file and relock */
1da177e4
LT
517
518 return rc;
519}
520
2ae78ba8
PS
521static int
522cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
523{
524 int rc = -EACCES;
6d5786a3 525 unsigned int xid;
590a3fe0 526 __u32 oplock;
1da177e4 527 struct cifs_sb_info *cifs_sb;
96daf2b0 528 struct cifs_tcon *tcon;
2ae78ba8
PS
529 struct TCP_Server_Info *server;
530 struct cifsInodeInfo *cinode;
fb8c4b14 531 struct inode *inode;
1da177e4 532 char *full_path = NULL;
2ae78ba8 533 int desired_access;
1da177e4 534 int disposition = FILE_OPEN;
3d3ea8e6 535 int create_options = CREATE_NOT_DIR;
2ae78ba8 536 struct cifs_fid fid;
1da177e4 537
6d5786a3 538 xid = get_xid();
2ae78ba8
PS
539 mutex_lock(&cfile->fh_mutex);
540 if (!cfile->invalidHandle) {
541 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 542 rc = 0;
6d5786a3 543 free_xid(xid);
0f3bc09e 544 return rc;
1da177e4
LT
545 }
546
2ae78ba8 547 inode = cfile->dentry->d_inode;
1da177e4 548 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
549 tcon = tlink_tcon(cfile->tlink);
550 server = tcon->ses->server;
551
552 /*
553 * Can not grab rename sem here because various ops, including those
554 * that already have the rename sem can end up causing writepage to get
555 * called and if the server was down that means we end up here, and we
556 * can never tell if the caller already has the rename_sem.
557 */
558 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 559 if (full_path == NULL) {
3a9f462f 560 rc = -ENOMEM;
2ae78ba8 561 mutex_unlock(&cfile->fh_mutex);
6d5786a3 562 free_xid(xid);
3a9f462f 563 return rc;
1da177e4
LT
564 }
565
2ae78ba8
PS
566 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
567 full_path);
1da177e4 568
10b9b98e 569 if (tcon->ses->server->oplocks)
1da177e4
LT
570 oplock = REQ_OPLOCK;
571 else
4b18f2a9 572 oplock = 0;
1da177e4 573
29e20f9c 574 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 575 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 576 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
577 /*
578 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
579 * original open. Must mask them off for a reopen.
580 */
2ae78ba8 581 unsigned int oflags = cfile->f_flags &
15886177 582 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 583
2422f676 584 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8
PS
585 cifs_sb->mnt_file_mode /* ignored */,
586 oflags, &oplock, &fid.netfid, xid);
7fc8f4e9 587 if (rc == 0) {
b6b38f70 588 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
589 goto reopen_success;
590 }
2ae78ba8
PS
591 /*
592 * fallthrough to retry open the old way on errors, especially
593 * in the reconnect path it is important to retry hard
594 */
7fc8f4e9
SF
595 }
596
2ae78ba8 597 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 598
3d3ea8e6
SP
599 if (backup_cred(cifs_sb))
600 create_options |= CREATE_OPEN_BACKUP_INTENT;
601
b8c32dbb
PS
602 if (server->ops->get_lease_key)
603 server->ops->get_lease_key(inode, &fid);
604
2ae78ba8
PS
605 /*
606 * Can not refresh inode by passing in file_info buf to be returned by
607 * CIFSSMBOpen and then calling get_inode_info with returned buf since
608 * file might have write behind data that needs to be flushed and server
609 * version of file size can be stale. If we knew for sure that inode was
610 * not dirty locally we could do this.
611 */
612 rc = server->ops->open(xid, tcon, full_path, disposition,
613 desired_access, create_options, &fid, &oplock,
614 NULL, cifs_sb);
1da177e4 615 if (rc) {
2ae78ba8
PS
616 mutex_unlock(&cfile->fh_mutex);
617 cFYI(1, "cifs_reopen returned 0x%x", rc);
b6b38f70 618 cFYI(1, "oplock: %d", oplock);
15886177
JL
619 goto reopen_error_exit;
620 }
621
7fc8f4e9 622reopen_success:
2ae78ba8
PS
623 cfile->invalidHandle = false;
624 mutex_unlock(&cfile->fh_mutex);
625 cinode = CIFS_I(inode);
15886177
JL
626
627 if (can_flush) {
628 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 629 mapping_set_error(inode->i_mapping, rc);
15886177 630
15886177 631 if (tcon->unix_ext)
2ae78ba8
PS
632 rc = cifs_get_inode_info_unix(&inode, full_path,
633 inode->i_sb, xid);
15886177 634 else
2ae78ba8
PS
635 rc = cifs_get_inode_info(&inode, full_path, NULL,
636 inode->i_sb, xid, NULL);
637 }
638 /*
639 * Else we are writing out data to server already and could deadlock if
640 * we tried to flush data, and since we do not know if we have data that
641 * would invalidate the current end of file on the server we can not go
642 * to the server to get the new inode info.
643 */
644
645 server->ops->set_fid(cfile, &fid, oplock);
646 cifs_relock_file(cfile);
15886177
JL
647
648reopen_error_exit:
1da177e4 649 kfree(full_path);
6d5786a3 650 free_xid(xid);
1da177e4
LT
651 return rc;
652}
653
654int cifs_close(struct inode *inode, struct file *file)
655{
77970693
JL
656 if (file->private_data != NULL) {
657 cifsFileInfo_put(file->private_data);
658 file->private_data = NULL;
659 }
7ee1af76 660
cdff08e7
SF
661 /* return code from the ->release op is always ignored */
662 return 0;
1da177e4
LT
663}
664
665int cifs_closedir(struct inode *inode, struct file *file)
666{
667 int rc = 0;
6d5786a3 668 unsigned int xid;
4b4de76e 669 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
670 struct cifs_tcon *tcon;
671 struct TCP_Server_Info *server;
672 char *buf;
1da177e4 673
b6b38f70 674 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4 675
92fc65a7
PS
676 if (cfile == NULL)
677 return rc;
678
6d5786a3 679 xid = get_xid();
92fc65a7
PS
680 tcon = tlink_tcon(cfile->tlink);
681 server = tcon->ses->server;
1da177e4 682
92fc65a7
PS
683 cFYI(1, "Freeing private data in close dir");
684 spin_lock(&cifs_file_list_lock);
685 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
686 cfile->invalidHandle = true;
687 spin_unlock(&cifs_file_list_lock);
688 if (server->ops->close_dir)
689 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
690 else
691 rc = -ENOSYS;
692 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
693 /* not much we can do if it fails anyway, ignore rc */
694 rc = 0;
695 } else
696 spin_unlock(&cifs_file_list_lock);
697
698 buf = cfile->srch_inf.ntwrk_buf_start;
699 if (buf) {
700 cFYI(1, "closedir free smb buf in srch struct");
701 cfile->srch_inf.ntwrk_buf_start = NULL;
702 if (cfile->srch_inf.smallBuf)
703 cifs_small_buf_release(buf);
704 else
705 cifs_buf_release(buf);
1da177e4 706 }
92fc65a7
PS
707
708 cifs_put_tlink(cfile->tlink);
709 kfree(file->private_data);
710 file->private_data = NULL;
1da177e4 711 /* BB can we lock the filestruct while this is going on? */
6d5786a3 712 free_xid(xid);
1da177e4
LT
713 return rc;
714}
715
85160e03 716static struct cifsLockInfo *
fbd35aca 717cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 718{
a88b4707 719 struct cifsLockInfo *lock =
fb8c4b14 720 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
721 if (!lock)
722 return lock;
723 lock->offset = offset;
724 lock->length = length;
725 lock->type = type;
a88b4707
PS
726 lock->pid = current->tgid;
727 INIT_LIST_HEAD(&lock->blist);
728 init_waitqueue_head(&lock->block_q);
729 return lock;
85160e03
PS
730}
731
f7ba7fe6 732void
85160e03
PS
733cifs_del_lock_waiters(struct cifsLockInfo *lock)
734{
735 struct cifsLockInfo *li, *tmp;
736 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
737 list_del_init(&li->blist);
738 wake_up(&li->block_q);
739 }
740}
741
742static bool
f45d3416
PS
743cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
744 __u64 length, __u8 type, struct cifsFileInfo *cfile,
579f9053 745 struct cifsLockInfo **conf_lock, bool rw_check)
85160e03 746{
fbd35aca 747 struct cifsLockInfo *li;
f45d3416 748 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 749 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 750
f45d3416 751 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
752 if (offset + length <= li->offset ||
753 offset >= li->offset + li->length)
754 continue;
579f9053
PS
755 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
756 current->tgid == li->pid)
757 continue;
f45d3416
PS
758 if ((type & server->vals->shared_lock_type) &&
759 ((server->ops->compare_fids(cfile, cur_cfile) &&
760 current->tgid == li->pid) || type == li->type))
85160e03 761 continue;
579f9053
PS
762 if (conf_lock)
763 *conf_lock = li;
f45d3416 764 return true;
85160e03
PS
765 }
766 return false;
767}
768
579f9053 769bool
55157dfb 770cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
579f9053
PS
771 __u8 type, struct cifsLockInfo **conf_lock,
772 bool rw_check)
161ebf9f 773{
fbd35aca 774 bool rc = false;
f45d3416 775 struct cifs_fid_locks *cur;
55157dfb 776 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
fbd35aca 777
f45d3416
PS
778 list_for_each_entry(cur, &cinode->llist, llist) {
779 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
579f9053 780 cfile, conf_lock, rw_check);
fbd35aca
PS
781 if (rc)
782 break;
783 }
fbd35aca
PS
784
785 return rc;
161ebf9f
PS
786}
787
9a5101c8
PS
788/*
789 * Check if there is another lock that prevents us to set the lock (mandatory
790 * style). If such a lock exists, update the flock structure with its
791 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
792 * or leave it the same if we can't. Returns 0 if we don't need to request to
793 * the server or 1 otherwise.
794 */
85160e03 795static int
fbd35aca
PS
796cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
797 __u8 type, struct file_lock *flock)
85160e03
PS
798{
799 int rc = 0;
800 struct cifsLockInfo *conf_lock;
fbd35aca 801 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
106dc538 802 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
803 bool exist;
804
1b4b55a1 805 down_read(&cinode->lock_sem);
85160e03 806
55157dfb 807 exist = cifs_find_lock_conflict(cfile, offset, length, type,
579f9053 808 &conf_lock, false);
85160e03
PS
809 if (exist) {
810 flock->fl_start = conf_lock->offset;
811 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
812 flock->fl_pid = conf_lock->pid;
106dc538 813 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
814 flock->fl_type = F_RDLCK;
815 else
816 flock->fl_type = F_WRLCK;
817 } else if (!cinode->can_cache_brlcks)
818 rc = 1;
819 else
820 flock->fl_type = F_UNLCK;
821
1b4b55a1 822 up_read(&cinode->lock_sem);
85160e03
PS
823 return rc;
824}
825
161ebf9f 826static void
fbd35aca 827cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 828{
fbd35aca 829 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1b4b55a1 830 down_write(&cinode->lock_sem);
f45d3416 831 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 832 up_write(&cinode->lock_sem);
7ee1af76
JA
833}
834
9a5101c8
PS
835/*
836 * Set the byte-range lock (mandatory style). Returns:
837 * 1) 0, if we set the lock and don't need to request to the server;
838 * 2) 1, if no locks prevent us but we need to request to the server;
839 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
840 */
85160e03 841static int
fbd35aca 842cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 843 bool wait)
85160e03 844{
161ebf9f 845 struct cifsLockInfo *conf_lock;
fbd35aca 846 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
85160e03
PS
847 bool exist;
848 int rc = 0;
849
85160e03
PS
850try_again:
851 exist = false;
1b4b55a1 852 down_write(&cinode->lock_sem);
85160e03 853
55157dfb 854 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
579f9053 855 lock->type, &conf_lock, false);
85160e03 856 if (!exist && cinode->can_cache_brlcks) {
f45d3416 857 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 858 up_write(&cinode->lock_sem);
85160e03
PS
859 return rc;
860 }
861
862 if (!exist)
863 rc = 1;
864 else if (!wait)
865 rc = -EACCES;
866 else {
867 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 868 up_write(&cinode->lock_sem);
85160e03
PS
869 rc = wait_event_interruptible(lock->block_q,
870 (lock->blist.prev == &lock->blist) &&
871 (lock->blist.next == &lock->blist));
872 if (!rc)
873 goto try_again;
1b4b55a1 874 down_write(&cinode->lock_sem);
a88b4707 875 list_del_init(&lock->blist);
85160e03
PS
876 }
877
1b4b55a1 878 up_write(&cinode->lock_sem);
85160e03
PS
879 return rc;
880}
881
9a5101c8
PS
882/*
883 * Check if there is another lock that prevents us to set the lock (posix
884 * style). If such a lock exists, update the flock structure with its
885 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
886 * or leave it the same if we can't. Returns 0 if we don't need to request to
887 * the server or 1 otherwise.
888 */
85160e03 889static int
4f6bcec9
PS
890cifs_posix_lock_test(struct file *file, struct file_lock *flock)
891{
892 int rc = 0;
893 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
894 unsigned char saved_type = flock->fl_type;
895
50792760
PS
896 if ((flock->fl_flags & FL_POSIX) == 0)
897 return 1;
898
1b4b55a1 899 down_read(&cinode->lock_sem);
4f6bcec9
PS
900 posix_test_lock(file, flock);
901
902 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
903 flock->fl_type = saved_type;
904 rc = 1;
905 }
906
1b4b55a1 907 up_read(&cinode->lock_sem);
4f6bcec9
PS
908 return rc;
909}
910
9a5101c8
PS
911/*
912 * Set the byte-range lock (posix style). Returns:
913 * 1) 0, if we set the lock and don't need to request to the server;
914 * 2) 1, if we need to request to the server;
915 * 3) <0, if the error occurs while setting the lock.
916 */
4f6bcec9
PS
917static int
918cifs_posix_lock_set(struct file *file, struct file_lock *flock)
919{
920 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
921 int rc = 1;
922
923 if ((flock->fl_flags & FL_POSIX) == 0)
924 return rc;
4f6bcec9 925
66189be7 926try_again:
1b4b55a1 927 down_write(&cinode->lock_sem);
4f6bcec9 928 if (!cinode->can_cache_brlcks) {
1b4b55a1 929 up_write(&cinode->lock_sem);
50792760 930 return rc;
4f6bcec9 931 }
66189be7
PS
932
933 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 934 up_write(&cinode->lock_sem);
66189be7
PS
935 if (rc == FILE_LOCK_DEFERRED) {
936 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
937 if (!rc)
938 goto try_again;
939 locks_delete_block(flock);
940 }
9ebb389d 941 return rc;
4f6bcec9
PS
942}
943
d39a4f71 944int
4f6bcec9 945cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 946{
6d5786a3
PS
947 unsigned int xid;
948 int rc = 0, stored_rc;
85160e03
PS
949 struct cifsLockInfo *li, *tmp;
950 struct cifs_tcon *tcon;
0013fb4c 951 unsigned int num, max_num, max_buf;
32b9aaf1
PS
952 LOCKING_ANDX_RANGE *buf, *cur;
953 int types[] = {LOCKING_ANDX_LARGE_FILES,
954 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
955 int i;
85160e03 956
6d5786a3 957 xid = get_xid();
85160e03
PS
958 tcon = tlink_tcon(cfile->tlink);
959
0013fb4c
PS
960 /*
961 * Accessing maxBuf is racy with cifs_reconnect - need to store value
962 * and check it for zero before using.
963 */
964 max_buf = tcon->ses->server->maxBuf;
965 if (!max_buf) {
6d5786a3 966 free_xid(xid);
0013fb4c
PS
967 return -EINVAL;
968 }
969
970 max_num = (max_buf - sizeof(struct smb_hdr)) /
971 sizeof(LOCKING_ANDX_RANGE);
32b9aaf1
PS
972 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
973 if (!buf) {
6d5786a3 974 free_xid(xid);
e2f2886a 975 return -ENOMEM;
32b9aaf1
PS
976 }
977
978 for (i = 0; i < 2; i++) {
979 cur = buf;
980 num = 0;
f45d3416 981 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
982 if (li->type != types[i])
983 continue;
984 cur->Pid = cpu_to_le16(li->pid);
985 cur->LengthLow = cpu_to_le32((u32)li->length);
986 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
987 cur->OffsetLow = cpu_to_le32((u32)li->offset);
988 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
989 if (++num == max_num) {
4b4de76e
PS
990 stored_rc = cifs_lockv(xid, tcon,
991 cfile->fid.netfid,
04a6aa8a
PS
992 (__u8)li->type, 0, num,
993 buf);
32b9aaf1
PS
994 if (stored_rc)
995 rc = stored_rc;
996 cur = buf;
997 num = 0;
998 } else
999 cur++;
1000 }
1001
1002 if (num) {
4b4de76e 1003 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 1004 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
1005 if (stored_rc)
1006 rc = stored_rc;
1007 }
85160e03
PS
1008 }
1009
32b9aaf1 1010 kfree(buf);
6d5786a3 1011 free_xid(xid);
85160e03
PS
1012 return rc;
1013}
1014
4f6bcec9
PS
1015/* copied from fs/locks.c with a name change */
1016#define cifs_for_each_lock(inode, lockp) \
1017 for (lockp = &inode->i_flock; *lockp != NULL; \
1018 lockp = &(*lockp)->fl_next)
1019
d5751469
PS
1020struct lock_to_push {
1021 struct list_head llist;
1022 __u64 offset;
1023 __u64 length;
1024 __u32 pid;
1025 __u16 netfid;
1026 __u8 type;
1027};
1028
4f6bcec9 1029static int
b8db928b 1030cifs_push_posix_locks(struct cifsFileInfo *cfile)
4f6bcec9 1031{
4f6bcec9
PS
1032 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1033 struct file_lock *flock, **before;
d5751469 1034 unsigned int count = 0, i = 0;
4f6bcec9 1035 int rc = 0, xid, type;
d5751469
PS
1036 struct list_head locks_to_send, *el;
1037 struct lock_to_push *lck, *tmp;
4f6bcec9 1038 __u64 length;
4f6bcec9 1039
6d5786a3 1040 xid = get_xid();
4f6bcec9 1041
d5751469
PS
1042 lock_flocks();
1043 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1044 if ((*before)->fl_flags & FL_POSIX)
1045 count++;
1046 }
1047 unlock_flocks();
1048
4f6bcec9
PS
1049 INIT_LIST_HEAD(&locks_to_send);
1050
d5751469 1051 /*
ce85852b 1052 * Allocating count locks is enough because no FL_POSIX locks can be
1b4b55a1 1053 * added to the list while we are holding cinode->lock_sem that
ce85852b 1054 * protects locking operations of this inode.
d5751469
PS
1055 */
1056 for (; i < count; i++) {
1057 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1058 if (!lck) {
1059 rc = -ENOMEM;
1060 goto err_out;
1061 }
1062 list_add_tail(&lck->llist, &locks_to_send);
1063 }
1064
d5751469 1065 el = locks_to_send.next;
4f6bcec9
PS
1066 lock_flocks();
1067 cifs_for_each_lock(cfile->dentry->d_inode, before) {
ce85852b
PS
1068 flock = *before;
1069 if ((flock->fl_flags & FL_POSIX) == 0)
1070 continue;
d5751469 1071 if (el == &locks_to_send) {
ce85852b
PS
1072 /*
1073 * The list ended. We don't have enough allocated
1074 * structures - something is really wrong.
1075 */
d5751469
PS
1076 cERROR(1, "Can't push all brlocks!");
1077 break;
1078 }
4f6bcec9
PS
1079 length = 1 + flock->fl_end - flock->fl_start;
1080 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1081 type = CIFS_RDLCK;
1082 else
1083 type = CIFS_WRLCK;
d5751469 1084 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1085 lck->pid = flock->fl_pid;
4b4de76e 1086 lck->netfid = cfile->fid.netfid;
d5751469
PS
1087 lck->length = length;
1088 lck->type = type;
1089 lck->offset = flock->fl_start;
d5751469 1090 el = el->next;
4f6bcec9 1091 }
4f6bcec9
PS
1092 unlock_flocks();
1093
1094 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1095 int stored_rc;
1096
4f6bcec9 1097 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1098 lck->offset, lck->length, NULL,
4f6bcec9
PS
1099 lck->type, 0);
1100 if (stored_rc)
1101 rc = stored_rc;
1102 list_del(&lck->llist);
1103 kfree(lck);
1104 }
1105
d5751469 1106out:
6d5786a3 1107 free_xid(xid);
4f6bcec9 1108 return rc;
d5751469
PS
1109err_out:
1110 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1111 list_del(&lck->llist);
1112 kfree(lck);
1113 }
1114 goto out;
4f6bcec9
PS
1115}
1116
9ec3c882 1117static int
b8db928b 1118cifs_push_locks(struct cifsFileInfo *cfile)
9ec3c882 1119{
b8db928b 1120 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
9ec3c882 1121 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
b8db928b 1122 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
9ec3c882
PS
1123 int rc = 0;
1124
1125 /* we are going to update can_cache_brlcks here - need a write access */
1126 down_write(&cinode->lock_sem);
1127 if (!cinode->can_cache_brlcks) {
1128 up_write(&cinode->lock_sem);
1129 return rc;
1130 }
4f6bcec9 1131
29e20f9c 1132 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1133 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1134 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
b8db928b
PS
1135 rc = cifs_push_posix_locks(cfile);
1136 else
1137 rc = tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9 1138
b8db928b
PS
1139 cinode->can_cache_brlcks = false;
1140 up_write(&cinode->lock_sem);
1141 return rc;
4f6bcec9
PS
1142}
1143
03776f45 1144static void
04a6aa8a 1145cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1146 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1147{
03776f45 1148 if (flock->fl_flags & FL_POSIX)
b6b38f70 1149 cFYI(1, "Posix");
03776f45 1150 if (flock->fl_flags & FL_FLOCK)
b6b38f70 1151 cFYI(1, "Flock");
03776f45 1152 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 1153 cFYI(1, "Blocking lock");
03776f45 1154 *wait_flag = true;
1da177e4 1155 }
03776f45 1156 if (flock->fl_flags & FL_ACCESS)
b6b38f70 1157 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
1158 "not implemented yet");
1159 if (flock->fl_flags & FL_LEASE)
b6b38f70 1160 cFYI(1, "Lease on file - not implemented yet");
03776f45 1161 if (flock->fl_flags &
3d6d854a
JL
1162 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1163 FL_ACCESS | FL_LEASE | FL_CLOSE)))
03776f45 1164 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 1165
106dc538 1166 *type = server->vals->large_lock_type;
03776f45 1167 if (flock->fl_type == F_WRLCK) {
b6b38f70 1168 cFYI(1, "F_WRLCK ");
106dc538 1169 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1170 *lock = 1;
1171 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1172 cFYI(1, "F_UNLCK");
106dc538 1173 *type |= server->vals->unlock_lock_type;
03776f45
PS
1174 *unlock = 1;
1175 /* Check if unlock includes more than one lock range */
1176 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1177 cFYI(1, "F_RDLCK");
106dc538 1178 *type |= server->vals->shared_lock_type;
03776f45
PS
1179 *lock = 1;
1180 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1181 cFYI(1, "F_EXLCK");
106dc538 1182 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1183 *lock = 1;
1184 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1185 cFYI(1, "F_SHLCK");
106dc538 1186 *type |= server->vals->shared_lock_type;
03776f45 1187 *lock = 1;
1da177e4 1188 } else
b6b38f70 1189 cFYI(1, "Unknown type of lock");
03776f45 1190}
1da177e4 1191
03776f45 1192static int
04a6aa8a 1193cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1194 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1195{
1196 int rc = 0;
1197 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1198 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1199 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1200 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1201 __u16 netfid = cfile->fid.netfid;
f05337c6 1202
03776f45
PS
1203 if (posix_lck) {
1204 int posix_lock_type;
4f6bcec9
PS
1205
1206 rc = cifs_posix_lock_test(file, flock);
1207 if (!rc)
1208 return rc;
1209
106dc538 1210 if (type & server->vals->shared_lock_type)
03776f45
PS
1211 posix_lock_type = CIFS_RDLCK;
1212 else
1213 posix_lock_type = CIFS_WRLCK;
4f6bcec9 1214 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
c5fd363d 1215 flock->fl_start, length, flock,
4f6bcec9 1216 posix_lock_type, wait_flag);
03776f45
PS
1217 return rc;
1218 }
1da177e4 1219
fbd35aca 1220 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1221 if (!rc)
1222 return rc;
1223
03776f45 1224 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1225 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1226 1, 0, false);
03776f45 1227 if (rc == 0) {
d39a4f71
PS
1228 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1229 type, 0, 1, false);
03776f45
PS
1230 flock->fl_type = F_UNLCK;
1231 if (rc != 0)
1232 cERROR(1, "Error unlocking previously locked "
106dc538 1233 "range %d during test of lock", rc);
a88b4707 1234 return 0;
1da177e4 1235 }
7ee1af76 1236
106dc538 1237 if (type & server->vals->shared_lock_type) {
03776f45 1238 flock->fl_type = F_WRLCK;
a88b4707 1239 return 0;
7ee1af76
JA
1240 }
1241
d39a4f71
PS
1242 type &= ~server->vals->exclusive_lock_type;
1243
1244 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1245 type | server->vals->shared_lock_type,
1246 1, 0, false);
03776f45 1247 if (rc == 0) {
d39a4f71
PS
1248 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1249 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1250 flock->fl_type = F_RDLCK;
1251 if (rc != 0)
1252 cERROR(1, "Error unlocking previously locked "
1253 "range %d during test of lock", rc);
1254 } else
1255 flock->fl_type = F_WRLCK;
1256
a88b4707 1257 return 0;
03776f45
PS
1258}
1259
f7ba7fe6 1260void
9ee305b7
PS
1261cifs_move_llist(struct list_head *source, struct list_head *dest)
1262{
1263 struct list_head *li, *tmp;
1264 list_for_each_safe(li, tmp, source)
1265 list_move(li, dest);
1266}
1267
f7ba7fe6 1268void
9ee305b7
PS
1269cifs_free_llist(struct list_head *llist)
1270{
1271 struct cifsLockInfo *li, *tmp;
1272 list_for_each_entry_safe(li, tmp, llist, llist) {
1273 cifs_del_lock_waiters(li);
1274 list_del(&li->llist);
1275 kfree(li);
1276 }
1277}
1278
d39a4f71 1279int
6d5786a3
PS
1280cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1281 unsigned int xid)
9ee305b7
PS
1282{
1283 int rc = 0, stored_rc;
1284 int types[] = {LOCKING_ANDX_LARGE_FILES,
1285 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1286 unsigned int i;
0013fb4c 1287 unsigned int max_num, num, max_buf;
9ee305b7
PS
1288 LOCKING_ANDX_RANGE *buf, *cur;
1289 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1290 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1291 struct cifsLockInfo *li, *tmp;
1292 __u64 length = 1 + flock->fl_end - flock->fl_start;
1293 struct list_head tmp_llist;
1294
1295 INIT_LIST_HEAD(&tmp_llist);
1296
0013fb4c
PS
1297 /*
1298 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1299 * and check it for zero before using.
1300 */
1301 max_buf = tcon->ses->server->maxBuf;
1302 if (!max_buf)
1303 return -EINVAL;
1304
1305 max_num = (max_buf - sizeof(struct smb_hdr)) /
1306 sizeof(LOCKING_ANDX_RANGE);
9ee305b7
PS
1307 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1308 if (!buf)
1309 return -ENOMEM;
1310
1b4b55a1 1311 down_write(&cinode->lock_sem);
9ee305b7
PS
1312 for (i = 0; i < 2; i++) {
1313 cur = buf;
1314 num = 0;
f45d3416 1315 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1316 if (flock->fl_start > li->offset ||
1317 (flock->fl_start + length) <
1318 (li->offset + li->length))
1319 continue;
1320 if (current->tgid != li->pid)
1321 continue;
9ee305b7
PS
1322 if (types[i] != li->type)
1323 continue;
ea319d57 1324 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1325 /*
1326 * We can cache brlock requests - simply remove
fbd35aca 1327 * a lock from the file's list.
9ee305b7
PS
1328 */
1329 list_del(&li->llist);
1330 cifs_del_lock_waiters(li);
1331 kfree(li);
ea319d57 1332 continue;
9ee305b7 1333 }
ea319d57
PS
1334 cur->Pid = cpu_to_le16(li->pid);
1335 cur->LengthLow = cpu_to_le32((u32)li->length);
1336 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1337 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1338 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1339 /*
1340 * We need to save a lock here to let us add it again to
1341 * the file's list if the unlock range request fails on
1342 * the server.
1343 */
1344 list_move(&li->llist, &tmp_llist);
1345 if (++num == max_num) {
4b4de76e
PS
1346 stored_rc = cifs_lockv(xid, tcon,
1347 cfile->fid.netfid,
ea319d57
PS
1348 li->type, num, 0, buf);
1349 if (stored_rc) {
1350 /*
1351 * We failed on the unlock range
1352 * request - add all locks from the tmp
1353 * list to the head of the file's list.
1354 */
1355 cifs_move_llist(&tmp_llist,
f45d3416 1356 &cfile->llist->locks);
ea319d57
PS
1357 rc = stored_rc;
1358 } else
1359 /*
1360 * The unlock range request succeed -
1361 * free the tmp list.
1362 */
1363 cifs_free_llist(&tmp_llist);
1364 cur = buf;
1365 num = 0;
1366 } else
1367 cur++;
9ee305b7
PS
1368 }
1369 if (num) {
4b4de76e 1370 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1371 types[i], num, 0, buf);
1372 if (stored_rc) {
f45d3416
PS
1373 cifs_move_llist(&tmp_llist,
1374 &cfile->llist->locks);
9ee305b7
PS
1375 rc = stored_rc;
1376 } else
1377 cifs_free_llist(&tmp_llist);
1378 }
1379 }
1380
1b4b55a1 1381 up_write(&cinode->lock_sem);
9ee305b7
PS
1382 kfree(buf);
1383 return rc;
1384}
1385
03776f45 1386static int
f45d3416 1387cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1388 bool wait_flag, bool posix_lck, int lock, int unlock,
1389 unsigned int xid)
03776f45
PS
1390{
1391 int rc = 0;
1392 __u64 length = 1 + flock->fl_end - flock->fl_start;
1393 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1394 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1395 struct TCP_Server_Info *server = tcon->ses->server;
03776f45
PS
1396
1397 if (posix_lck) {
08547b03 1398 int posix_lock_type;
4f6bcec9
PS
1399
1400 rc = cifs_posix_lock_set(file, flock);
1401 if (!rc || rc < 0)
1402 return rc;
1403
106dc538 1404 if (type & server->vals->shared_lock_type)
08547b03
SF
1405 posix_lock_type = CIFS_RDLCK;
1406 else
1407 posix_lock_type = CIFS_WRLCK;
50c2f753 1408
03776f45 1409 if (unlock == 1)
beb84dc8 1410 posix_lock_type = CIFS_UNLCK;
7ee1af76 1411
f45d3416
PS
1412 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1413 current->tgid, flock->fl_start, length,
1414 NULL, posix_lock_type, wait_flag);
03776f45
PS
1415 goto out;
1416 }
7ee1af76 1417
03776f45 1418 if (lock) {
161ebf9f
PS
1419 struct cifsLockInfo *lock;
1420
fbd35aca 1421 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1422 if (!lock)
1423 return -ENOMEM;
1424
fbd35aca 1425 rc = cifs_lock_add_if(cfile, lock, wait_flag);
85160e03 1426 if (rc < 0)
161ebf9f
PS
1427 kfree(lock);
1428 if (rc <= 0)
85160e03
PS
1429 goto out;
1430
d39a4f71
PS
1431 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1432 type, 1, 0, wait_flag);
161ebf9f
PS
1433 if (rc) {
1434 kfree(lock);
1435 goto out;
03776f45 1436 }
161ebf9f 1437
fbd35aca 1438 cifs_lock_add(cfile, lock);
9ee305b7 1439 } else if (unlock)
d39a4f71 1440 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1441
03776f45
PS
1442out:
1443 if (flock->fl_flags & FL_POSIX)
9ebb389d 1444 posix_lock_file_wait(file, flock);
03776f45
PS
1445 return rc;
1446}
1447
1448int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1449{
1450 int rc, xid;
1451 int lock = 0, unlock = 0;
1452 bool wait_flag = false;
1453 bool posix_lck = false;
1454 struct cifs_sb_info *cifs_sb;
1455 struct cifs_tcon *tcon;
1456 struct cifsInodeInfo *cinode;
1457 struct cifsFileInfo *cfile;
1458 __u16 netfid;
04a6aa8a 1459 __u32 type;
03776f45
PS
1460
1461 rc = -EACCES;
6d5786a3 1462 xid = get_xid();
03776f45
PS
1463
1464 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1465 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1466 flock->fl_start, flock->fl_end);
1467
03776f45
PS
1468 cfile = (struct cifsFileInfo *)file->private_data;
1469 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1470
1471 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1472 tcon->ses->server);
1473
1474 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
4b4de76e 1475 netfid = cfile->fid.netfid;
03776f45
PS
1476 cinode = CIFS_I(file->f_path.dentry->d_inode);
1477
29e20f9c 1478 if (cap_unix(tcon->ses) &&
03776f45
PS
1479 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1480 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1481 posix_lck = true;
1482 /*
1483 * BB add code here to normalize offset and length to account for
1484 * negative length which we can not accept over the wire.
1485 */
1486 if (IS_GETLK(cmd)) {
4f6bcec9 1487 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1488 free_xid(xid);
03776f45
PS
1489 return rc;
1490 }
1491
1492 if (!lock && !unlock) {
1493 /*
1494 * if no lock or unlock then nothing to do since we do not
1495 * know what it is
1496 */
6d5786a3 1497 free_xid(xid);
03776f45 1498 return -EOPNOTSUPP;
7ee1af76
JA
1499 }
1500
03776f45
PS
1501 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1502 xid);
6d5786a3 1503 free_xid(xid);
1da177e4
LT
1504 return rc;
1505}
1506
597b027f
JL
1507/*
1508 * update the file size (if needed) after a write. Should be called with
1509 * the inode->i_lock held
1510 */
72432ffc 1511void
fbec9ab9
JL
1512cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1513 unsigned int bytes_written)
1514{
1515 loff_t end_of_write = offset + bytes_written;
1516
1517 if (end_of_write > cifsi->server_eof)
1518 cifsi->server_eof = end_of_write;
1519}
1520
ba9ad725
PS
1521static ssize_t
1522cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1523 size_t write_size, loff_t *offset)
1da177e4
LT
1524{
1525 int rc = 0;
1526 unsigned int bytes_written = 0;
1527 unsigned int total_written;
1528 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1529 struct cifs_tcon *tcon;
1530 struct TCP_Server_Info *server;
6d5786a3 1531 unsigned int xid;
7da4b49a
JL
1532 struct dentry *dentry = open_file->dentry;
1533 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1534 struct cifs_io_parms io_parms;
1da177e4 1535
7da4b49a 1536 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1537
b6b38f70 1538 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
ba9ad725 1539 *offset, dentry->d_name.name);
1da177e4 1540
ba9ad725
PS
1541 tcon = tlink_tcon(open_file->tlink);
1542 server = tcon->ses->server;
1543
1544 if (!server->ops->sync_write)
1545 return -ENOSYS;
50c2f753 1546
6d5786a3 1547 xid = get_xid();
1da177e4 1548
1da177e4
LT
1549 for (total_written = 0; write_size > total_written;
1550 total_written += bytes_written) {
1551 rc = -EAGAIN;
1552 while (rc == -EAGAIN) {
ca83ce3d
JL
1553 struct kvec iov[2];
1554 unsigned int len;
1555
1da177e4 1556 if (open_file->invalidHandle) {
1da177e4
LT
1557 /* we could deadlock if we called
1558 filemap_fdatawait from here so tell
fb8c4b14 1559 reopen_file not to flush data to
1da177e4 1560 server now */
15886177 1561 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1562 if (rc != 0)
1563 break;
1564 }
ca83ce3d
JL
1565
1566 len = min((size_t)cifs_sb->wsize,
1567 write_size - total_written);
1568 /* iov[0] is reserved for smb header */
1569 iov[1].iov_base = (char *)write_data + total_written;
1570 iov[1].iov_len = len;
fa2989f4 1571 io_parms.pid = pid;
ba9ad725
PS
1572 io_parms.tcon = tcon;
1573 io_parms.offset = *offset;
fa2989f4 1574 io_parms.length = len;
ba9ad725
PS
1575 rc = server->ops->sync_write(xid, open_file, &io_parms,
1576 &bytes_written, iov, 1);
1da177e4
LT
1577 }
1578 if (rc || (bytes_written == 0)) {
1579 if (total_written)
1580 break;
1581 else {
6d5786a3 1582 free_xid(xid);
1da177e4
LT
1583 return rc;
1584 }
fbec9ab9 1585 } else {
597b027f 1586 spin_lock(&dentry->d_inode->i_lock);
ba9ad725 1587 cifs_update_eof(cifsi, *offset, bytes_written);
597b027f 1588 spin_unlock(&dentry->d_inode->i_lock);
ba9ad725 1589 *offset += bytes_written;
fbec9ab9 1590 }
1da177e4
LT
1591 }
1592
ba9ad725 1593 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1594
7da4b49a
JL
1595 if (total_written > 0) {
1596 spin_lock(&dentry->d_inode->i_lock);
ba9ad725
PS
1597 if (*offset > dentry->d_inode->i_size)
1598 i_size_write(dentry->d_inode, *offset);
7da4b49a 1599 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1600 }
7da4b49a 1601 mark_inode_dirty_sync(dentry->d_inode);
6d5786a3 1602 free_xid(xid);
1da177e4
LT
1603 return total_written;
1604}
1605
6508d904
JL
1606struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1607 bool fsuid_only)
630f3f0c
SF
1608{
1609 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1610 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1611
1612 /* only filter by fsuid on multiuser mounts */
1613 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1614 fsuid_only = false;
630f3f0c 1615
4477288a 1616 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1617 /* we could simply get the first_list_entry since write-only entries
1618 are always at the end of the list but since the first entry might
1619 have a close pending, we go through the whole list */
1620 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1621 if (fsuid_only && open_file->uid != current_fsuid())
1622 continue;
2e396b83 1623 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1624 if (!open_file->invalidHandle) {
1625 /* found a good file */
1626 /* lock it so it will not be closed on us */
764a1b1a 1627 cifsFileInfo_get_locked(open_file);
4477288a 1628 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1629 return open_file;
1630 } /* else might as well continue, and look for
1631 another, or simply have the caller reopen it
1632 again rather than trying to fix this handle */
1633 } else /* write only file */
1634 break; /* write only files are last so must be done */
1635 }
4477288a 1636 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1637 return NULL;
1638}
630f3f0c 1639
6508d904
JL
1640struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1641 bool fsuid_only)
6148a742 1642{
2c0c2a08 1643 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1644 struct cifs_sb_info *cifs_sb;
2846d386 1645 bool any_available = false;
dd99cd80 1646 int rc;
2c0c2a08 1647 unsigned int refind = 0;
6148a742 1648
60808233
SF
1649 /* Having a null inode here (because mapping->host was set to zero by
1650 the VFS or MM) should not happen but we had reports of on oops (due to
1651 it being zero) during stress testcases so we need to check for it */
1652
fb8c4b14 1653 if (cifs_inode == NULL) {
b6b38f70 1654 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1655 dump_stack();
1656 return NULL;
1657 }
1658
d3892294
JL
1659 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1660
6508d904
JL
1661 /* only filter by fsuid on multiuser mounts */
1662 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1663 fsuid_only = false;
1664
4477288a 1665 spin_lock(&cifs_file_list_lock);
9b22b0b7 1666refind_writable:
2c0c2a08
SP
1667 if (refind > MAX_REOPEN_ATT) {
1668 spin_unlock(&cifs_file_list_lock);
1669 return NULL;
1670 }
6148a742 1671 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1672 if (!any_available && open_file->pid != current->tgid)
1673 continue;
1674 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1675 continue;
2e396b83 1676 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1677 if (!open_file->invalidHandle) {
1678 /* found a good writable file */
764a1b1a 1679 cifsFileInfo_get_locked(open_file);
4477288a 1680 spin_unlock(&cifs_file_list_lock);
9b22b0b7 1681 return open_file;
2c0c2a08
SP
1682 } else {
1683 if (!inv_file)
1684 inv_file = open_file;
9b22b0b7 1685 }
6148a742
SF
1686 }
1687 }
2846d386
JL
1688 /* couldn't find useable FH with same pid, try any available */
1689 if (!any_available) {
1690 any_available = true;
1691 goto refind_writable;
1692 }
2c0c2a08
SP
1693
1694 if (inv_file) {
1695 any_available = false;
764a1b1a 1696 cifsFileInfo_get_locked(inv_file);
2c0c2a08
SP
1697 }
1698
4477288a 1699 spin_unlock(&cifs_file_list_lock);
2c0c2a08
SP
1700
1701 if (inv_file) {
1702 rc = cifs_reopen_file(inv_file, false);
1703 if (!rc)
1704 return inv_file;
1705 else {
1706 spin_lock(&cifs_file_list_lock);
1707 list_move_tail(&inv_file->flist,
1708 &cifs_inode->openFileList);
1709 spin_unlock(&cifs_file_list_lock);
1710 cifsFileInfo_put(inv_file);
1711 spin_lock(&cifs_file_list_lock);
1712 ++refind;
1713 goto refind_writable;
1714 }
1715 }
1716
6148a742
SF
1717 return NULL;
1718}
1719
1da177e4
LT
1720static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1721{
1722 struct address_space *mapping = page->mapping;
1723 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1724 char *write_data;
1725 int rc = -EFAULT;
1726 int bytes_written = 0;
1da177e4 1727 struct inode *inode;
6148a742 1728 struct cifsFileInfo *open_file;
1da177e4
LT
1729
1730 if (!mapping || !mapping->host)
1731 return -EFAULT;
1732
1733 inode = page->mapping->host;
1da177e4
LT
1734
1735 offset += (loff_t)from;
1736 write_data = kmap(page);
1737 write_data += from;
1738
1739 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1740 kunmap(page);
1741 return -EIO;
1742 }
1743
1744 /* racing with truncate? */
1745 if (offset > mapping->host->i_size) {
1746 kunmap(page);
1747 return 0; /* don't care */
1748 }
1749
1750 /* check to make sure that we are not extending the file */
1751 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1752 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1753
6508d904 1754 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1755 if (open_file) {
fa2989f4
PS
1756 bytes_written = cifs_write(open_file, open_file->pid,
1757 write_data, to - from, &offset);
6ab409b5 1758 cifsFileInfo_put(open_file);
1da177e4 1759 /* Does mm or vfs already set times? */
6148a742 1760 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1761 if ((bytes_written > 0) && (offset))
6148a742 1762 rc = 0;
bb5a9a04
SF
1763 else if (bytes_written < 0)
1764 rc = bytes_written;
6148a742 1765 } else {
b6b38f70 1766 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1767 rc = -EIO;
1768 }
1769
1770 kunmap(page);
1771 return rc;
1772}
1773
1da177e4 1774static int cifs_writepages(struct address_space *mapping,
37c0eb46 1775 struct writeback_control *wbc)
1da177e4 1776{
c3d17b63
JL
1777 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1778 bool done = false, scanned = false, range_whole = false;
1779 pgoff_t end, index;
1780 struct cifs_writedata *wdata;
c9de5c80 1781 struct TCP_Server_Info *server;
37c0eb46 1782 struct page *page;
37c0eb46 1783 int rc = 0;
50c2f753 1784
37c0eb46 1785 /*
c3d17b63 1786 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1787 * one page at a time via cifs_writepage
1788 */
1789 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1790 return generic_writepages(mapping, wbc);
1791
111ebb6e 1792 if (wbc->range_cyclic) {
37c0eb46 1793 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1794 end = -1;
1795 } else {
1796 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1797 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1798 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1799 range_whole = true;
1800 scanned = true;
37c0eb46
SF
1801 }
1802retry:
c3d17b63
JL
1803 while (!done && index <= end) {
1804 unsigned int i, nr_pages, found_pages;
1805 pgoff_t next = 0, tofind;
1806 struct page **pages;
1807
1808 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1809 end - index) + 1;
1810
c2e87640
JL
1811 wdata = cifs_writedata_alloc((unsigned int)tofind,
1812 cifs_writev_complete);
c3d17b63
JL
1813 if (!wdata) {
1814 rc = -ENOMEM;
1815 break;
1816 }
1817
1818 /*
1819 * find_get_pages_tag seems to return a max of 256 on each
1820 * iteration, so we must call it several times in order to
1821 * fill the array or the wsize is effectively limited to
1822 * 256 * PAGE_CACHE_SIZE.
1823 */
1824 found_pages = 0;
1825 pages = wdata->pages;
1826 do {
1827 nr_pages = find_get_pages_tag(mapping, &index,
1828 PAGECACHE_TAG_DIRTY,
1829 tofind, pages);
1830 found_pages += nr_pages;
1831 tofind -= nr_pages;
1832 pages += nr_pages;
1833 } while (nr_pages && tofind && index <= end);
1834
1835 if (found_pages == 0) {
1836 kref_put(&wdata->refcount, cifs_writedata_release);
1837 break;
1838 }
1839
1840 nr_pages = 0;
1841 for (i = 0; i < found_pages; i++) {
1842 page = wdata->pages[i];
37c0eb46
SF
1843 /*
1844 * At this point we hold neither mapping->tree_lock nor
1845 * lock on the page itself: the page may be truncated or
1846 * invalidated (changing page->mapping to NULL), or even
1847 * swizzled back from swapper_space to tmpfs file
1848 * mapping
1849 */
1850
c3d17b63 1851 if (nr_pages == 0)
37c0eb46 1852 lock_page(page);
529ae9aa 1853 else if (!trylock_page(page))
37c0eb46
SF
1854 break;
1855
1856 if (unlikely(page->mapping != mapping)) {
1857 unlock_page(page);
1858 break;
1859 }
1860
111ebb6e 1861 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1862 done = true;
37c0eb46
SF
1863 unlock_page(page);
1864 break;
1865 }
1866
1867 if (next && (page->index != next)) {
1868 /* Not next consecutive page */
1869 unlock_page(page);
1870 break;
1871 }
1872
1873 if (wbc->sync_mode != WB_SYNC_NONE)
1874 wait_on_page_writeback(page);
1875
1876 if (PageWriteback(page) ||
cb876f45 1877 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1878 unlock_page(page);
1879 break;
1880 }
84d2f07e 1881
cb876f45
LT
1882 /*
1883 * This actually clears the dirty bit in the radix tree.
1884 * See cifs_writepage() for more commentary.
1885 */
1886 set_page_writeback(page);
1887
3a98b861 1888 if (page_offset(page) >= i_size_read(mapping->host)) {
c3d17b63 1889 done = true;
84d2f07e 1890 unlock_page(page);
cb876f45 1891 end_page_writeback(page);
84d2f07e
SF
1892 break;
1893 }
1894
c3d17b63
JL
1895 wdata->pages[i] = page;
1896 next = page->index + 1;
1897 ++nr_pages;
1898 }
37c0eb46 1899
c3d17b63
JL
1900 /* reset index to refind any pages skipped */
1901 if (nr_pages == 0)
1902 index = wdata->pages[0]->index + 1;
84d2f07e 1903
c3d17b63
JL
1904 /* put any pages we aren't going to use */
1905 for (i = nr_pages; i < found_pages; i++) {
1906 page_cache_release(wdata->pages[i]);
1907 wdata->pages[i] = NULL;
1908 }
37c0eb46 1909
c3d17b63
JL
1910 /* nothing to write? */
1911 if (nr_pages == 0) {
1912 kref_put(&wdata->refcount, cifs_writedata_release);
1913 continue;
37c0eb46 1914 }
fbec9ab9 1915
c3d17b63
JL
1916 wdata->sync_mode = wbc->sync_mode;
1917 wdata->nr_pages = nr_pages;
1918 wdata->offset = page_offset(wdata->pages[0]);
eddb079d
JL
1919 wdata->pagesz = PAGE_CACHE_SIZE;
1920 wdata->tailsz =
3a98b861
JL
1921 min(i_size_read(mapping->host) -
1922 page_offset(wdata->pages[nr_pages - 1]),
eddb079d
JL
1923 (loff_t)PAGE_CACHE_SIZE);
1924 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1925 wdata->tailsz;
941b853d 1926
c3d17b63
JL
1927 do {
1928 if (wdata->cfile != NULL)
1929 cifsFileInfo_put(wdata->cfile);
1930 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1931 false);
1932 if (!wdata->cfile) {
1933 cERROR(1, "No writable handles for inode");
1934 rc = -EBADF;
1935 break;
941b853d 1936 }
fe5f5d2e 1937 wdata->pid = wdata->cfile->pid;
c9de5c80
PS
1938 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1939 rc = server->ops->async_writev(wdata);
c3d17b63 1940 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1941
c3d17b63
JL
1942 for (i = 0; i < nr_pages; ++i)
1943 unlock_page(wdata->pages[i]);
f3983c21 1944
c3d17b63
JL
1945 /* send failure -- clean up the mess */
1946 if (rc != 0) {
1947 for (i = 0; i < nr_pages; ++i) {
941b853d 1948 if (rc == -EAGAIN)
c3d17b63
JL
1949 redirty_page_for_writepage(wbc,
1950 wdata->pages[i]);
1951 else
1952 SetPageError(wdata->pages[i]);
1953 end_page_writeback(wdata->pages[i]);
1954 page_cache_release(wdata->pages[i]);
37c0eb46 1955 }
941b853d
JL
1956 if (rc != -EAGAIN)
1957 mapping_set_error(mapping, rc);
c3d17b63
JL
1958 }
1959 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1960
c3d17b63
JL
1961 wbc->nr_to_write -= nr_pages;
1962 if (wbc->nr_to_write <= 0)
1963 done = true;
b066a48c 1964
c3d17b63 1965 index = next;
37c0eb46 1966 }
c3d17b63 1967
37c0eb46
SF
1968 if (!scanned && !done) {
1969 /*
1970 * We hit the last page and there is more work to be done: wrap
1971 * back to the start of the file
1972 */
c3d17b63 1973 scanned = true;
37c0eb46
SF
1974 index = 0;
1975 goto retry;
1976 }
c3d17b63 1977
111ebb6e 1978 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1979 mapping->writeback_index = index;
1980
1da177e4
LT
1981 return rc;
1982}
1da177e4 1983
9ad1506b
PS
1984static int
1985cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1986{
9ad1506b 1987 int rc;
6d5786a3 1988 unsigned int xid;
1da177e4 1989
6d5786a3 1990 xid = get_xid();
1da177e4
LT
1991/* BB add check for wbc flags */
1992 page_cache_get(page);
ad7a2926 1993 if (!PageUptodate(page))
b6b38f70 1994 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1995
1996 /*
1997 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1998 *
1999 * A writepage() implementation always needs to do either this,
2000 * or re-dirty the page with "redirty_page_for_writepage()" in
2001 * the case of a failure.
2002 *
2003 * Just unlocking the page will cause the radix tree tag-bits
2004 * to fail to update with the state of the page correctly.
2005 */
fb8c4b14 2006 set_page_writeback(page);
9ad1506b 2007retry_write:
1da177e4 2008 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
2009 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2010 goto retry_write;
2011 else if (rc == -EAGAIN)
2012 redirty_page_for_writepage(wbc, page);
2013 else if (rc != 0)
2014 SetPageError(page);
2015 else
2016 SetPageUptodate(page);
cb876f45
LT
2017 end_page_writeback(page);
2018 page_cache_release(page);
6d5786a3 2019 free_xid(xid);
1da177e4
LT
2020 return rc;
2021}
2022
9ad1506b
PS
2023static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2024{
2025 int rc = cifs_writepage_locked(page, wbc);
2026 unlock_page(page);
2027 return rc;
2028}
2029
d9414774
NP
2030static int cifs_write_end(struct file *file, struct address_space *mapping,
2031 loff_t pos, unsigned len, unsigned copied,
2032 struct page *page, void *fsdata)
1da177e4 2033{
d9414774
NP
2034 int rc;
2035 struct inode *inode = mapping->host;
d4ffff1f
PS
2036 struct cifsFileInfo *cfile = file->private_data;
2037 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2038 __u32 pid;
2039
2040 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2041 pid = cfile->pid;
2042 else
2043 pid = current->tgid;
1da177e4 2044
b6b38f70
JP
2045 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2046 page, pos, copied);
d9414774 2047
a98ee8c1
JL
2048 if (PageChecked(page)) {
2049 if (copied == len)
2050 SetPageUptodate(page);
2051 ClearPageChecked(page);
2052 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 2053 SetPageUptodate(page);
ad7a2926 2054
1da177e4 2055 if (!PageUptodate(page)) {
d9414774
NP
2056 char *page_data;
2057 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
6d5786a3 2058 unsigned int xid;
d9414774 2059
6d5786a3 2060 xid = get_xid();
1da177e4
LT
2061 /* this is probably better than directly calling
2062 partialpage_write since in this function the file handle is
2063 known which we might as well leverage */
2064 /* BB check if anything else missing out of ppw
2065 such as updating last write time */
2066 page_data = kmap(page);
d4ffff1f 2067 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2068 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2069 kunmap(page);
d9414774 2070
6d5786a3 2071 free_xid(xid);
fb8c4b14 2072 } else {
d9414774
NP
2073 rc = copied;
2074 pos += copied;
1da177e4
LT
2075 set_page_dirty(page);
2076 }
2077
d9414774
NP
2078 if (rc > 0) {
2079 spin_lock(&inode->i_lock);
2080 if (pos > inode->i_size)
2081 i_size_write(inode, pos);
2082 spin_unlock(&inode->i_lock);
2083 }
2084
2085 unlock_page(page);
2086 page_cache_release(page);
2087
1da177e4
LT
2088 return rc;
2089}
2090
02c24a82
JB
2091int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2092 int datasync)
1da177e4 2093{
6d5786a3 2094 unsigned int xid;
1da177e4 2095 int rc = 0;
96daf2b0 2096 struct cifs_tcon *tcon;
1d8c4c00 2097 struct TCP_Server_Info *server;
c21dfb69 2098 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 2099 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 2100 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2101
02c24a82
JB
2102 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2103 if (rc)
2104 return rc;
2105 mutex_lock(&inode->i_mutex);
2106
6d5786a3 2107 xid = get_xid();
1da177e4 2108
b6b38f70 2109 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 2110 file->f_path.dentry->d_name.name, datasync);
50c2f753 2111
6feb9891
PS
2112 if (!CIFS_I(inode)->clientCanCacheRead) {
2113 rc = cifs_invalidate_mapping(inode);
2114 if (rc) {
2115 cFYI(1, "rc: %d during invalidate phase", rc);
2116 rc = 0; /* don't care about it in fsync */
2117 }
2118 }
eb4b756b 2119
8be7e6ba 2120 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2121 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2122 server = tcon->ses->server;
2123 if (server->ops->flush)
2124 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2125 else
2126 rc = -ENOSYS;
2127 }
8be7e6ba 2128
6d5786a3 2129 free_xid(xid);
02c24a82 2130 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2131 return rc;
2132}
2133
02c24a82 2134int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2135{
6d5786a3 2136 unsigned int xid;
8be7e6ba 2137 int rc = 0;
96daf2b0 2138 struct cifs_tcon *tcon;
1d8c4c00 2139 struct TCP_Server_Info *server;
8be7e6ba
PS
2140 struct cifsFileInfo *smbfile = file->private_data;
2141 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
2142 struct inode *inode = file->f_mapping->host;
2143
2144 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2145 if (rc)
2146 return rc;
2147 mutex_lock(&inode->i_mutex);
8be7e6ba 2148
6d5786a3 2149 xid = get_xid();
8be7e6ba
PS
2150
2151 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2152 file->f_path.dentry->d_name.name, datasync);
2153
2154 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2155 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2156 server = tcon->ses->server;
2157 if (server->ops->flush)
2158 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2159 else
2160 rc = -ENOSYS;
2161 }
b298f223 2162
6d5786a3 2163 free_xid(xid);
02c24a82 2164 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2165 return rc;
2166}
2167
1da177e4
LT
2168/*
2169 * As file closes, flush all cached write data for this inode checking
2170 * for write behind errors.
2171 */
75e1fcc0 2172int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2173{
fb8c4b14 2174 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
2175 int rc = 0;
2176
eb4b756b 2177 if (file->f_mode & FMODE_WRITE)
d3f1322a 2178 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2179
b6b38f70 2180 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
2181
2182 return rc;
2183}
2184
72432ffc
PS
2185static int
2186cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2187{
2188 int rc = 0;
2189 unsigned long i;
2190
2191 for (i = 0; i < num_pages; i++) {
e94f7ba1 2192 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2193 if (!pages[i]) {
2194 /*
2195 * save number of pages we have already allocated and
2196 * return with ENOMEM error
2197 */
2198 num_pages = i;
2199 rc = -ENOMEM;
e94f7ba1 2200 break;
72432ffc
PS
2201 }
2202 }
2203
e94f7ba1
JL
2204 if (rc) {
2205 for (i = 0; i < num_pages; i++)
2206 put_page(pages[i]);
2207 }
72432ffc
PS
2208 return rc;
2209}
2210
2211static inline
2212size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2213{
2214 size_t num_pages;
2215 size_t clen;
2216
2217 clen = min_t(const size_t, len, wsize);
a7103b99 2218 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2219
2220 if (cur_len)
2221 *cur_len = clen;
2222
2223 return num_pages;
2224}
2225
da82f7e7
JL
2226static void
2227cifs_uncached_writev_complete(struct work_struct *work)
2228{
2229 int i;
2230 struct cifs_writedata *wdata = container_of(work,
2231 struct cifs_writedata, work);
2232 struct inode *inode = wdata->cfile->dentry->d_inode;
2233 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2234
2235 spin_lock(&inode->i_lock);
2236 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2237 if (cifsi->server_eof > inode->i_size)
2238 i_size_write(inode, cifsi->server_eof);
2239 spin_unlock(&inode->i_lock);
2240
2241 complete(&wdata->done);
2242
2243 if (wdata->result != -EAGAIN) {
2244 for (i = 0; i < wdata->nr_pages; i++)
2245 put_page(wdata->pages[i]);
2246 }
2247
2248 kref_put(&wdata->refcount, cifs_writedata_release);
2249}
2250
2251/* attempt to send write to server, retry on any -EAGAIN errors */
2252static int
2253cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2254{
2255 int rc;
c9de5c80
PS
2256 struct TCP_Server_Info *server;
2257
2258 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
da82f7e7
JL
2259
2260 do {
2261 if (wdata->cfile->invalidHandle) {
2262 rc = cifs_reopen_file(wdata->cfile, false);
2263 if (rc != 0)
2264 continue;
2265 }
c9de5c80 2266 rc = server->ops->async_writev(wdata);
da82f7e7
JL
2267 } while (rc == -EAGAIN);
2268
2269 return rc;
2270}
2271
72432ffc
PS
2272static ssize_t
2273cifs_iovec_write(struct file *file, const struct iovec *iov,
2274 unsigned long nr_segs, loff_t *poffset)
2275{
da82f7e7 2276 unsigned long nr_pages, i;
76429c14
PS
2277 size_t copied, len, cur_len;
2278 ssize_t total_written = 0;
3af9d8f2 2279 loff_t offset;
72432ffc 2280 struct iov_iter it;
72432ffc 2281 struct cifsFileInfo *open_file;
da82f7e7 2282 struct cifs_tcon *tcon;
72432ffc 2283 struct cifs_sb_info *cifs_sb;
da82f7e7
JL
2284 struct cifs_writedata *wdata, *tmp;
2285 struct list_head wdata_list;
2286 int rc;
2287 pid_t pid;
72432ffc
PS
2288
2289 len = iov_length(iov, nr_segs);
2290 if (!len)
2291 return 0;
2292
2293 rc = generic_write_checks(file, poffset, &len, 0);
2294 if (rc)
2295 return rc;
2296
da82f7e7 2297 INIT_LIST_HEAD(&wdata_list);
72432ffc 2298 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
72432ffc 2299 open_file = file->private_data;
da82f7e7 2300 tcon = tlink_tcon(open_file->tlink);
c9de5c80
PS
2301
2302 if (!tcon->ses->server->ops->async_writev)
2303 return -ENOSYS;
2304
3af9d8f2 2305 offset = *poffset;
d4ffff1f
PS
2306
2307 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2308 pid = open_file->pid;
2309 else
2310 pid = current->tgid;
2311
72432ffc 2312 iov_iter_init(&it, iov, nr_segs, len, 0);
72432ffc 2313 do {
da82f7e7
JL
2314 size_t save_len;
2315
2316 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2317 wdata = cifs_writedata_alloc(nr_pages,
2318 cifs_uncached_writev_complete);
2319 if (!wdata) {
2320 rc = -ENOMEM;
2321 break;
2322 }
2323
2324 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2325 if (rc) {
2326 kfree(wdata);
2327 break;
2328 }
2329
2330 save_len = cur_len;
2331 for (i = 0; i < nr_pages; i++) {
2332 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2333 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2334 0, copied);
72432ffc
PS
2335 cur_len -= copied;
2336 iov_iter_advance(&it, copied);
72432ffc 2337 }
72432ffc
PS
2338 cur_len = save_len - cur_len;
2339
da82f7e7
JL
2340 wdata->sync_mode = WB_SYNC_ALL;
2341 wdata->nr_pages = nr_pages;
2342 wdata->offset = (__u64)offset;
2343 wdata->cfile = cifsFileInfo_get(open_file);
2344 wdata->pid = pid;
2345 wdata->bytes = cur_len;
eddb079d
JL
2346 wdata->pagesz = PAGE_SIZE;
2347 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
da82f7e7
JL
2348 rc = cifs_uncached_retry_writev(wdata);
2349 if (rc) {
2350 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2351 break;
2352 }
2353
da82f7e7
JL
2354 list_add_tail(&wdata->list, &wdata_list);
2355 offset += cur_len;
2356 len -= cur_len;
72432ffc
PS
2357 } while (len > 0);
2358
da82f7e7
JL
2359 /*
2360 * If at least one write was successfully sent, then discard any rc
2361 * value from the later writes. If the other write succeeds, then
2362 * we'll end up returning whatever was written. If it fails, then
2363 * we'll get a new rc value from that.
2364 */
2365 if (!list_empty(&wdata_list))
2366 rc = 0;
2367
2368 /*
2369 * Wait for and collect replies for any successful sends in order of
2370 * increasing offset. Once an error is hit or we get a fatal signal
2371 * while waiting, then return without waiting for any more replies.
2372 */
2373restart_loop:
2374 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2375 if (!rc) {
2376 /* FIXME: freezable too? */
2377 rc = wait_for_completion_killable(&wdata->done);
2378 if (rc)
2379 rc = -EINTR;
2380 else if (wdata->result)
2381 rc = wdata->result;
2382 else
2383 total_written += wdata->bytes;
2384
2385 /* resend call if it's a retryable error */
2386 if (rc == -EAGAIN) {
2387 rc = cifs_uncached_retry_writev(wdata);
2388 goto restart_loop;
2389 }
2390 }
2391 list_del_init(&wdata->list);
2392 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2393 }
2394
da82f7e7
JL
2395 if (total_written > 0)
2396 *poffset += total_written;
72432ffc 2397
da82f7e7
JL
2398 cifs_stats_bytes_written(tcon, total_written);
2399 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2400}
2401
0b81c1c4 2402ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2403 unsigned long nr_segs, loff_t pos)
2404{
2405 ssize_t written;
2406 struct inode *inode;
2407
2408 inode = iocb->ki_filp->f_path.dentry->d_inode;
2409
2410 /*
2411 * BB - optimize the way when signing is disabled. We can drop this
2412 * extra memory-to-memory copying and use iovec buffers for constructing
2413 * write request.
2414 */
2415
2416 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2417 if (written > 0) {
2418 CIFS_I(inode)->invalid_mapping = true;
2419 iocb->ki_pos = pos;
2420 }
2421
2422 return written;
2423}
2424
579f9053
PS
2425static ssize_t
2426cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2427 unsigned long nr_segs, loff_t pos)
72432ffc 2428{
579f9053
PS
2429 struct file *file = iocb->ki_filp;
2430 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2431 struct inode *inode = file->f_mapping->host;
2432 struct cifsInodeInfo *cinode = CIFS_I(inode);
2433 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2434 ssize_t rc = -EACCES;
72432ffc 2435
579f9053 2436 BUG_ON(iocb->ki_pos != pos);
72432ffc 2437
579f9053
PS
2438 sb_start_write(inode->i_sb);
2439
2440 /*
2441 * We need to hold the sem to be sure nobody modifies lock list
2442 * with a brlock that prevents writing.
2443 */
2444 down_read(&cinode->lock_sem);
2445 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2446 server->vals->exclusive_lock_type, NULL,
2447 true)) {
2448 mutex_lock(&inode->i_mutex);
2449 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2450 &iocb->ki_pos);
2451 mutex_unlock(&inode->i_mutex);
2452 }
2453
2454 if (rc > 0 || rc == -EIOCBQUEUED) {
2455 ssize_t err;
2456
2457 err = generic_write_sync(file, pos, rc);
2458 if (err < 0 && rc > 0)
2459 rc = err;
2460 }
2461
2462 up_read(&cinode->lock_sem);
2463 sb_end_write(inode->i_sb);
2464 return rc;
2465}
2466
2467ssize_t
2468cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2469 unsigned long nr_segs, loff_t pos)
2470{
2471 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2472 struct cifsInodeInfo *cinode = CIFS_I(inode);
2473 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2474 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2475 iocb->ki_filp->private_data;
2476 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
72432ffc 2477
25078105 2478#ifdef CONFIG_CIFS_SMB2
72432ffc 2479 /*
25078105
PS
2480 * If we have an oplock for read and want to write a data to the file
2481 * we need to store it in the page cache and then push it to the server
2482 * to be sure the next read will get a valid data.
2483 */
2484 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2485 ssize_t written;
2486 int rc;
2487
2488 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2489 rc = filemap_fdatawrite(inode->i_mapping);
2490 if (rc)
2491 return (ssize_t)rc;
2492
2493 return written;
2494 }
2495#endif
2496
2497 /*
2498 * For non-oplocked files in strict cache mode we need to write the data
2499 * to the server exactly from the pos to pos+len-1 rather than flush all
2500 * affected pages because it may cause a error with mandatory locks on
2501 * these pages but not on the region from pos to ppos+len-1.
72432ffc
PS
2502 */
2503
579f9053
PS
2504 if (!cinode->clientCanCacheAll)
2505 return cifs_user_writev(iocb, iov, nr_segs, pos);
2506
2507 if (cap_unix(tcon->ses) &&
2508 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2509 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2510 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2511
2512 return cifs_writev(iocb, iov, nr_segs, pos);
72432ffc
PS
2513}
2514
0471ca3f 2515static struct cifs_readdata *
f4e49cd2 2516cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2517{
2518 struct cifs_readdata *rdata;
f4e49cd2 2519
c5fab6f4
JL
2520 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2521 GFP_KERNEL);
0471ca3f 2522 if (rdata != NULL) {
6993f74a 2523 kref_init(&rdata->refcount);
1c892549
JL
2524 INIT_LIST_HEAD(&rdata->list);
2525 init_completion(&rdata->done);
0471ca3f 2526 INIT_WORK(&rdata->work, complete);
0471ca3f 2527 }
f4e49cd2 2528
0471ca3f
JL
2529 return rdata;
2530}
2531
6993f74a
JL
2532void
2533cifs_readdata_release(struct kref *refcount)
0471ca3f 2534{
6993f74a
JL
2535 struct cifs_readdata *rdata = container_of(refcount,
2536 struct cifs_readdata, refcount);
2537
2538 if (rdata->cfile)
2539 cifsFileInfo_put(rdata->cfile);
2540
0471ca3f
JL
2541 kfree(rdata);
2542}
2543
1c892549 2544static int
c5fab6f4 2545cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2546{
2547 int rc = 0;
c5fab6f4 2548 struct page *page;
1c892549
JL
2549 unsigned int i;
2550
c5fab6f4 2551 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2552 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2553 if (!page) {
2554 rc = -ENOMEM;
2555 break;
2556 }
c5fab6f4 2557 rdata->pages[i] = page;
1c892549
JL
2558 }
2559
2560 if (rc) {
c5fab6f4
JL
2561 for (i = 0; i < nr_pages; i++) {
2562 put_page(rdata->pages[i]);
2563 rdata->pages[i] = NULL;
1c892549
JL
2564 }
2565 }
2566 return rc;
2567}
2568
2569static void
2570cifs_uncached_readdata_release(struct kref *refcount)
2571{
1c892549
JL
2572 struct cifs_readdata *rdata = container_of(refcount,
2573 struct cifs_readdata, refcount);
c5fab6f4 2574 unsigned int i;
1c892549 2575
c5fab6f4
JL
2576 for (i = 0; i < rdata->nr_pages; i++) {
2577 put_page(rdata->pages[i]);
2578 rdata->pages[i] = NULL;
1c892549
JL
2579 }
2580 cifs_readdata_release(refcount);
2581}
2582
2a1bb138
JL
2583static int
2584cifs_retry_async_readv(struct cifs_readdata *rdata)
2585{
2586 int rc;
fc9c5966
PS
2587 struct TCP_Server_Info *server;
2588
2589 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2a1bb138
JL
2590
2591 do {
2592 if (rdata->cfile->invalidHandle) {
2593 rc = cifs_reopen_file(rdata->cfile, true);
2594 if (rc != 0)
2595 continue;
2596 }
fc9c5966 2597 rc = server->ops->async_readv(rdata);
2a1bb138
JL
2598 } while (rc == -EAGAIN);
2599
2600 return rc;
2601}
2602
1c892549
JL
2603/**
2604 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2605 * @rdata: the readdata response with list of pages holding data
2606 * @iov: vector in which we should copy the data
2607 * @nr_segs: number of segments in vector
2608 * @offset: offset into file of the first iovec
2609 * @copied: used to return the amount of data copied to the iov
2610 *
2611 * This function copies data from a list of pages in a readdata response into
2612 * an array of iovecs. It will first calculate where the data should go
2613 * based on the info in the readdata and then copy the data into that spot.
2614 */
2615static ssize_t
2616cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2617 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2618{
2619 int rc = 0;
2620 struct iov_iter ii;
2621 size_t pos = rdata->offset - offset;
1c892549
JL
2622 ssize_t remaining = rdata->bytes;
2623 unsigned char *pdata;
c5fab6f4 2624 unsigned int i;
1c892549
JL
2625
2626 /* set up iov_iter and advance to the correct offset */
2627 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2628 iov_iter_advance(&ii, pos);
2629
2630 *copied = 0;
c5fab6f4 2631 for (i = 0; i < rdata->nr_pages; i++) {
1c892549 2632 ssize_t copy;
c5fab6f4 2633 struct page *page = rdata->pages[i];
1c892549
JL
2634
2635 /* copy a whole page or whatever's left */
2636 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2637
2638 /* ...but limit it to whatever space is left in the iov */
2639 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2640
2641 /* go while there's data to be copied and no errors */
2642 if (copy && !rc) {
2643 pdata = kmap(page);
2644 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2645 (int)copy);
2646 kunmap(page);
2647 if (!rc) {
2648 *copied += copy;
2649 remaining -= copy;
2650 iov_iter_advance(&ii, copy);
2651 }
2652 }
1c892549
JL
2653 }
2654
2655 return rc;
2656}
2657
2658static void
2659cifs_uncached_readv_complete(struct work_struct *work)
2660{
2661 struct cifs_readdata *rdata = container_of(work,
2662 struct cifs_readdata, work);
1c892549
JL
2663
2664 complete(&rdata->done);
2665 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2666}
2667
2668static int
8321fec4
JL
2669cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2670 struct cifs_readdata *rdata, unsigned int len)
1c892549 2671{
8321fec4 2672 int total_read = 0, result = 0;
c5fab6f4
JL
2673 unsigned int i;
2674 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2675 struct kvec iov;
1c892549 2676
8321fec4 2677 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2678 for (i = 0; i < nr_pages; i++) {
2679 struct page *page = rdata->pages[i];
2680
8321fec4 2681 if (len >= PAGE_SIZE) {
1c892549 2682 /* enough data to fill the page */
8321fec4
JL
2683 iov.iov_base = kmap(page);
2684 iov.iov_len = PAGE_SIZE;
2685 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2686 i, iov.iov_base, iov.iov_len);
2687 len -= PAGE_SIZE;
2688 } else if (len > 0) {
1c892549 2689 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2690 iov.iov_base = kmap(page);
2691 iov.iov_len = len;
2692 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2693 i, iov.iov_base, iov.iov_len);
2694 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2695 rdata->tailsz = len;
2696 len = 0;
1c892549
JL
2697 } else {
2698 /* no need to hold page hostage */
c5fab6f4
JL
2699 rdata->pages[i] = NULL;
2700 rdata->nr_pages--;
1c892549 2701 put_page(page);
8321fec4 2702 continue;
1c892549 2703 }
8321fec4
JL
2704
2705 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2706 kunmap(page);
2707 if (result < 0)
2708 break;
2709
2710 total_read += result;
1c892549
JL
2711 }
2712
8321fec4 2713 return total_read > 0 ? total_read : result;
1c892549
JL
2714}
2715
a70307ee
PS
2716static ssize_t
2717cifs_iovec_read(struct file *file, const struct iovec *iov,
2718 unsigned long nr_segs, loff_t *poffset)
1da177e4 2719{
1c892549 2720 ssize_t rc;
a70307ee 2721 size_t len, cur_len;
1c892549
JL
2722 ssize_t total_read = 0;
2723 loff_t offset = *poffset;
2724 unsigned int npages;
1da177e4 2725 struct cifs_sb_info *cifs_sb;
1c892549 2726 struct cifs_tcon *tcon;
1da177e4 2727 struct cifsFileInfo *open_file;
1c892549
JL
2728 struct cifs_readdata *rdata, *tmp;
2729 struct list_head rdata_list;
2730 pid_t pid;
a70307ee
PS
2731
2732 if (!nr_segs)
2733 return 0;
2734
2735 len = iov_length(iov, nr_segs);
2736 if (!len)
2737 return 0;
1da177e4 2738
1c892549 2739 INIT_LIST_HEAD(&rdata_list);
e6a00296 2740 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
c21dfb69 2741 open_file = file->private_data;
1c892549 2742 tcon = tlink_tcon(open_file->tlink);
1da177e4 2743
fc9c5966
PS
2744 if (!tcon->ses->server->ops->async_readv)
2745 return -ENOSYS;
2746
d4ffff1f
PS
2747 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2748 pid = open_file->pid;
2749 else
2750 pid = current->tgid;
2751
ad7a2926 2752 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2753 cFYI(1, "attempting read on write only file instance");
ad7a2926 2754
1c892549
JL
2755 do {
2756 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2757 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2758
1c892549
JL
2759 /* allocate a readdata struct */
2760 rdata = cifs_readdata_alloc(npages,
2761 cifs_uncached_readv_complete);
2762 if (!rdata) {
2763 rc = -ENOMEM;
2764 goto error;
1da177e4 2765 }
a70307ee 2766
c5fab6f4 2767 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2768 if (rc)
2769 goto error;
2770
2771 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2772 rdata->nr_pages = npages;
1c892549
JL
2773 rdata->offset = offset;
2774 rdata->bytes = cur_len;
2775 rdata->pid = pid;
8321fec4
JL
2776 rdata->pagesz = PAGE_SIZE;
2777 rdata->read_into_pages = cifs_uncached_read_into_pages;
1c892549
JL
2778
2779 rc = cifs_retry_async_readv(rdata);
2780error:
2781 if (rc) {
2782 kref_put(&rdata->refcount,
2783 cifs_uncached_readdata_release);
2784 break;
2785 }
2786
2787 list_add_tail(&rdata->list, &rdata_list);
2788 offset += cur_len;
2789 len -= cur_len;
2790 } while (len > 0);
2791
2792 /* if at least one read request send succeeded, then reset rc */
2793 if (!list_empty(&rdata_list))
2794 rc = 0;
2795
2796 /* the loop below should proceed in the order of increasing offsets */
2797restart_loop:
2798 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2799 if (!rc) {
2800 ssize_t copied;
2801
2802 /* FIXME: freezable sleep too? */
2803 rc = wait_for_completion_killable(&rdata->done);
2804 if (rc)
2805 rc = -EINTR;
2806 else if (rdata->result)
2807 rc = rdata->result;
2808 else {
2809 rc = cifs_readdata_to_iov(rdata, iov,
2810 nr_segs, *poffset,
2811 &copied);
2812 total_read += copied;
2813 }
2814
2815 /* resend call if it's a retryable error */
2816 if (rc == -EAGAIN) {
2817 rc = cifs_retry_async_readv(rdata);
2818 goto restart_loop;
1da177e4 2819 }
1da177e4 2820 }
1c892549
JL
2821 list_del_init(&rdata->list);
2822 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 2823 }
a70307ee 2824
1c892549
JL
2825 cifs_stats_bytes_read(tcon, total_read);
2826 *poffset += total_read;
2827
09a4707e
PS
2828 /* mask nodata case */
2829 if (rc == -ENODATA)
2830 rc = 0;
2831
1c892549 2832 return total_read ? total_read : rc;
1da177e4
LT
2833}
2834
0b81c1c4 2835ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2836 unsigned long nr_segs, loff_t pos)
2837{
2838 ssize_t read;
2839
2840 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2841 if (read > 0)
2842 iocb->ki_pos = pos;
2843
2844 return read;
2845}
2846
579f9053
PS
2847ssize_t
2848cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2849 unsigned long nr_segs, loff_t pos)
a70307ee 2850{
579f9053
PS
2851 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2852 struct cifsInodeInfo *cinode = CIFS_I(inode);
2853 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2854 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2855 iocb->ki_filp->private_data;
2856 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2857 int rc = -EACCES;
a70307ee
PS
2858
2859 /*
2860 * In strict cache mode we need to read from the server all the time
2861 * if we don't have level II oplock because the server can delay mtime
2862 * change - so we can't make a decision about inode invalidating.
2863 * And we can also fail with pagereading if there are mandatory locks
2864 * on pages affected by this read but not on the region from pos to
2865 * pos+len-1.
2866 */
579f9053
PS
2867 if (!cinode->clientCanCacheRead)
2868 return cifs_user_readv(iocb, iov, nr_segs, pos);
a70307ee 2869
579f9053
PS
2870 if (cap_unix(tcon->ses) &&
2871 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2872 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2873 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2874
2875 /*
2876 * We need to hold the sem to be sure nobody modifies lock list
2877 * with a brlock that prevents reading.
2878 */
2879 down_read(&cinode->lock_sem);
2880 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2881 tcon->ses->server->vals->shared_lock_type,
2882 NULL, true))
2883 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2884 up_read(&cinode->lock_sem);
2885 return rc;
a70307ee 2886}
1da177e4 2887
f9c6e234
PS
2888static ssize_t
2889cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
2890{
2891 int rc = -EACCES;
2892 unsigned int bytes_read = 0;
2893 unsigned int total_read;
2894 unsigned int current_read_size;
5eba8ab3 2895 unsigned int rsize;
1da177e4 2896 struct cifs_sb_info *cifs_sb;
29e20f9c 2897 struct cifs_tcon *tcon;
f9c6e234 2898 struct TCP_Server_Info *server;
6d5786a3 2899 unsigned int xid;
f9c6e234 2900 char *cur_offset;
1da177e4 2901 struct cifsFileInfo *open_file;
d4ffff1f 2902 struct cifs_io_parms io_parms;
ec637e3f 2903 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2904 __u32 pid;
1da177e4 2905
6d5786a3 2906 xid = get_xid();
e6a00296 2907 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2908
5eba8ab3
JL
2909 /* FIXME: set up handlers for larger reads and/or convert to async */
2910 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2911
1da177e4 2912 if (file->private_data == NULL) {
0f3bc09e 2913 rc = -EBADF;
6d5786a3 2914 free_xid(xid);
0f3bc09e 2915 return rc;
1da177e4 2916 }
c21dfb69 2917 open_file = file->private_data;
29e20f9c 2918 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
2919 server = tcon->ses->server;
2920
2921 if (!server->ops->sync_read) {
2922 free_xid(xid);
2923 return -ENOSYS;
2924 }
1da177e4 2925
d4ffff1f
PS
2926 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2927 pid = open_file->pid;
2928 else
2929 pid = current->tgid;
2930
1da177e4 2931 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2932 cFYI(1, "attempting read on write only file instance");
1da177e4 2933
f9c6e234
PS
2934 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2935 total_read += bytes_read, cur_offset += bytes_read) {
5eba8ab3 2936 current_read_size = min_t(uint, read_size - total_read, rsize);
29e20f9c
PS
2937 /*
2938 * For windows me and 9x we do not want to request more than it
2939 * negotiated since it will refuse the read then.
2940 */
2941 if ((tcon->ses) && !(tcon->ses->capabilities &
2942 tcon->ses->server->vals->cap_large_files)) {
7748dd6e 2943 current_read_size = min_t(uint, current_read_size,
c974befa 2944 CIFSMaxBufSize);
f9f5c817 2945 }
1da177e4
LT
2946 rc = -EAGAIN;
2947 while (rc == -EAGAIN) {
cdff08e7 2948 if (open_file->invalidHandle) {
15886177 2949 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2950 if (rc != 0)
2951 break;
2952 }
d4ffff1f 2953 io_parms.pid = pid;
29e20f9c 2954 io_parms.tcon = tcon;
f9c6e234 2955 io_parms.offset = *offset;
d4ffff1f 2956 io_parms.length = current_read_size;
f9c6e234
PS
2957 rc = server->ops->sync_read(xid, open_file, &io_parms,
2958 &bytes_read, &cur_offset,
2959 &buf_type);
1da177e4
LT
2960 }
2961 if (rc || (bytes_read == 0)) {
2962 if (total_read) {
2963 break;
2964 } else {
6d5786a3 2965 free_xid(xid);
1da177e4
LT
2966 return rc;
2967 }
2968 } else {
29e20f9c 2969 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 2970 *offset += bytes_read;
1da177e4
LT
2971 }
2972 }
6d5786a3 2973 free_xid(xid);
1da177e4
LT
2974 return total_read;
2975}
2976
ca83ce3d
JL
2977/*
2978 * If the page is mmap'ed into a process' page tables, then we need to make
2979 * sure that it doesn't change while being written back.
2980 */
2981static int
2982cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2983{
2984 struct page *page = vmf->page;
2985
2986 lock_page(page);
2987 return VM_FAULT_LOCKED;
2988}
2989
2990static struct vm_operations_struct cifs_file_vm_ops = {
2991 .fault = filemap_fault,
2992 .page_mkwrite = cifs_page_mkwrite,
0b173bc4 2993 .remap_pages = generic_file_remap_pages,
ca83ce3d
JL
2994};
2995
7a6a19b1
PS
2996int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2997{
2998 int rc, xid;
2999 struct inode *inode = file->f_path.dentry->d_inode;
3000
6d5786a3 3001 xid = get_xid();
7a6a19b1 3002
6feb9891
PS
3003 if (!CIFS_I(inode)->clientCanCacheRead) {
3004 rc = cifs_invalidate_mapping(inode);
3005 if (rc)
3006 return rc;
3007 }
7a6a19b1
PS
3008
3009 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3010 if (rc == 0)
3011 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3012 free_xid(xid);
7a6a19b1
PS
3013 return rc;
3014}
3015
1da177e4
LT
3016int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3017{
1da177e4
LT
3018 int rc, xid;
3019
6d5786a3 3020 xid = get_xid();
abab095d 3021 rc = cifs_revalidate_file(file);
1da177e4 3022 if (rc) {
b6b38f70 3023 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
6d5786a3 3024 free_xid(xid);
1da177e4
LT
3025 return rc;
3026 }
3027 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3028 if (rc == 0)
3029 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3030 free_xid(xid);
1da177e4
LT
3031 return rc;
3032}
3033
0471ca3f
JL
3034static void
3035cifs_readv_complete(struct work_struct *work)
3036{
c5fab6f4 3037 unsigned int i;
0471ca3f
JL
3038 struct cifs_readdata *rdata = container_of(work,
3039 struct cifs_readdata, work);
0471ca3f 3040
c5fab6f4
JL
3041 for (i = 0; i < rdata->nr_pages; i++) {
3042 struct page *page = rdata->pages[i];
3043
0471ca3f
JL
3044 lru_cache_add_file(page);
3045
3046 if (rdata->result == 0) {
0471ca3f
JL
3047 flush_dcache_page(page);
3048 SetPageUptodate(page);
3049 }
3050
3051 unlock_page(page);
3052
3053 if (rdata->result == 0)
3054 cifs_readpage_to_fscache(rdata->mapping->host, page);
3055
3056 page_cache_release(page);
c5fab6f4 3057 rdata->pages[i] = NULL;
0471ca3f 3058 }
6993f74a 3059 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
3060}
3061
8d5ce4d2 3062static int
8321fec4
JL
3063cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3064 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 3065{
8321fec4 3066 int total_read = 0, result = 0;
c5fab6f4 3067 unsigned int i;
8d5ce4d2
JL
3068 u64 eof;
3069 pgoff_t eof_index;
c5fab6f4 3070 unsigned int nr_pages = rdata->nr_pages;
8321fec4 3071 struct kvec iov;
8d5ce4d2
JL
3072
3073 /* determine the eof that the server (probably) has */
3074 eof = CIFS_I(rdata->mapping->host)->server_eof;
3075 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3076 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3077
8321fec4 3078 rdata->tailsz = PAGE_CACHE_SIZE;
c5fab6f4
JL
3079 for (i = 0; i < nr_pages; i++) {
3080 struct page *page = rdata->pages[i];
3081
8321fec4 3082 if (len >= PAGE_CACHE_SIZE) {
8d5ce4d2 3083 /* enough data to fill the page */
8321fec4
JL
3084 iov.iov_base = kmap(page);
3085 iov.iov_len = PAGE_CACHE_SIZE;
8d5ce4d2 3086 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
3087 i, page->index, iov.iov_base, iov.iov_len);
3088 len -= PAGE_CACHE_SIZE;
3089 } else if (len > 0) {
8d5ce4d2 3090 /* enough for partial page, fill and zero the rest */
8321fec4
JL
3091 iov.iov_base = kmap(page);
3092 iov.iov_len = len;
8d5ce4d2 3093 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
3094 i, page->index, iov.iov_base, iov.iov_len);
3095 memset(iov.iov_base + len,
3096 '\0', PAGE_CACHE_SIZE - len);
3097 rdata->tailsz = len;
3098 len = 0;
8d5ce4d2
JL
3099 } else if (page->index > eof_index) {
3100 /*
3101 * The VFS will not try to do readahead past the
3102 * i_size, but it's possible that we have outstanding
3103 * writes with gaps in the middle and the i_size hasn't
3104 * caught up yet. Populate those with zeroed out pages
3105 * to prevent the VFS from repeatedly attempting to
3106 * fill them until the writes are flushed.
3107 */
3108 zero_user(page, 0, PAGE_CACHE_SIZE);
8d5ce4d2
JL
3109 lru_cache_add_file(page);
3110 flush_dcache_page(page);
3111 SetPageUptodate(page);
3112 unlock_page(page);
3113 page_cache_release(page);
c5fab6f4
JL
3114 rdata->pages[i] = NULL;
3115 rdata->nr_pages--;
8321fec4 3116 continue;
8d5ce4d2
JL
3117 } else {
3118 /* no need to hold page hostage */
8d5ce4d2
JL
3119 lru_cache_add_file(page);
3120 unlock_page(page);
3121 page_cache_release(page);
c5fab6f4
JL
3122 rdata->pages[i] = NULL;
3123 rdata->nr_pages--;
8321fec4 3124 continue;
8d5ce4d2 3125 }
8321fec4
JL
3126
3127 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3128 kunmap(page);
3129 if (result < 0)
3130 break;
3131
3132 total_read += result;
8d5ce4d2
JL
3133 }
3134
8321fec4 3135 return total_read > 0 ? total_read : result;
8d5ce4d2
JL
3136}
3137
1da177e4
LT
3138static int cifs_readpages(struct file *file, struct address_space *mapping,
3139 struct list_head *page_list, unsigned num_pages)
3140{
690c5e31
JL
3141 int rc;
3142 struct list_head tmplist;
3143 struct cifsFileInfo *open_file = file->private_data;
3144 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3145 unsigned int rsize = cifs_sb->rsize;
3146 pid_t pid;
1da177e4 3147
690c5e31
JL
3148 /*
3149 * Give up immediately if rsize is too small to read an entire page.
3150 * The VFS will fall back to readpage. We should never reach this
3151 * point however since we set ra_pages to 0 when the rsize is smaller
3152 * than a cache page.
3153 */
3154 if (unlikely(rsize < PAGE_CACHE_SIZE))
3155 return 0;
bfa0d75a 3156
56698236
SJ
3157 /*
3158 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3159 * immediately if the cookie is negative
3160 */
3161 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3162 &num_pages);
3163 if (rc == 0)
690c5e31 3164 return rc;
56698236 3165
d4ffff1f
PS
3166 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3167 pid = open_file->pid;
3168 else
3169 pid = current->tgid;
3170
690c5e31
JL
3171 rc = 0;
3172 INIT_LIST_HEAD(&tmplist);
1da177e4 3173
690c5e31
JL
3174 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3175 mapping, num_pages);
3176
3177 /*
3178 * Start with the page at end of list and move it to private
3179 * list. Do the same with any following pages until we hit
3180 * the rsize limit, hit an index discontinuity, or run out of
3181 * pages. Issue the async read and then start the loop again
3182 * until the list is empty.
3183 *
3184 * Note that list order is important. The page_list is in
3185 * the order of declining indexes. When we put the pages in
3186 * the rdata->pages, then we want them in increasing order.
3187 */
3188 while (!list_empty(page_list)) {
c5fab6f4 3189 unsigned int i;
690c5e31
JL
3190 unsigned int bytes = PAGE_CACHE_SIZE;
3191 unsigned int expected_index;
3192 unsigned int nr_pages = 1;
3193 loff_t offset;
3194 struct page *page, *tpage;
3195 struct cifs_readdata *rdata;
1da177e4
LT
3196
3197 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
3198
3199 /*
3200 * Lock the page and put it in the cache. Since no one else
3201 * should have access to this page, we're safe to simply set
3202 * PG_locked without checking it first.
3203 */
3204 __set_page_locked(page);
3205 rc = add_to_page_cache_locked(page, mapping,
3206 page->index, GFP_KERNEL);
3207
3208 /* give up if we can't stick it in the cache */
3209 if (rc) {
3210 __clear_page_locked(page);
3211 break;
3212 }
3213
3214 /* move first page to the tmplist */
1da177e4 3215 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 3216 list_move_tail(&page->lru, &tmplist);
1da177e4 3217
690c5e31
JL
3218 /* now try and add more pages onto the request */
3219 expected_index = page->index + 1;
3220 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3221 /* discontinuity ? */
3222 if (page->index != expected_index)
fb8c4b14 3223 break;
690c5e31
JL
3224
3225 /* would this page push the read over the rsize? */
3226 if (bytes + PAGE_CACHE_SIZE > rsize)
3227 break;
3228
3229 __set_page_locked(page);
3230 if (add_to_page_cache_locked(page, mapping,
3231 page->index, GFP_KERNEL)) {
3232 __clear_page_locked(page);
3233 break;
3234 }
3235 list_move_tail(&page->lru, &tmplist);
3236 bytes += PAGE_CACHE_SIZE;
3237 expected_index++;
3238 nr_pages++;
1da177e4 3239 }
690c5e31 3240
0471ca3f 3241 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3242 if (!rdata) {
3243 /* best to give up if we're out of mem */
3244 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3245 list_del(&page->lru);
3246 lru_cache_add_file(page);
3247 unlock_page(page);
3248 page_cache_release(page);
3249 }
3250 rc = -ENOMEM;
3251 break;
3252 }
3253
6993f74a 3254 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3255 rdata->mapping = mapping;
3256 rdata->offset = offset;
3257 rdata->bytes = bytes;
3258 rdata->pid = pid;
8321fec4
JL
3259 rdata->pagesz = PAGE_CACHE_SIZE;
3260 rdata->read_into_pages = cifs_readpages_read_into_pages;
c5fab6f4
JL
3261
3262 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3263 list_del(&page->lru);
3264 rdata->pages[rdata->nr_pages++] = page;
3265 }
690c5e31 3266
2a1bb138 3267 rc = cifs_retry_async_readv(rdata);
690c5e31 3268 if (rc != 0) {
c5fab6f4
JL
3269 for (i = 0; i < rdata->nr_pages; i++) {
3270 page = rdata->pages[i];
690c5e31
JL
3271 lru_cache_add_file(page);
3272 unlock_page(page);
3273 page_cache_release(page);
1da177e4 3274 }
6993f74a 3275 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3276 break;
3277 }
6993f74a
JL
3278
3279 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3280 }
3281
1da177e4
LT
3282 return rc;
3283}
3284
3285static int cifs_readpage_worker(struct file *file, struct page *page,
3286 loff_t *poffset)
3287{
3288 char *read_data;
3289 int rc;
3290
56698236
SJ
3291 /* Is the page cached? */
3292 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3293 if (rc == 0)
3294 goto read_complete;
3295
1da177e4
LT
3296 page_cache_get(page);
3297 read_data = kmap(page);
3298 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3299
1da177e4 3300 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 3301
1da177e4
LT
3302 if (rc < 0)
3303 goto io_error;
3304 else
b6b38f70 3305 cFYI(1, "Bytes read %d", rc);
fb8c4b14 3306
e6a00296
JJS
3307 file->f_path.dentry->d_inode->i_atime =
3308 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 3309
1da177e4
LT
3310 if (PAGE_CACHE_SIZE > rc)
3311 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3312
3313 flush_dcache_page(page);
3314 SetPageUptodate(page);
9dc06558
SJ
3315
3316 /* send this page to the cache */
3317 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3318
1da177e4 3319 rc = 0;
fb8c4b14 3320
1da177e4 3321io_error:
fb8c4b14 3322 kunmap(page);
1da177e4 3323 page_cache_release(page);
56698236
SJ
3324
3325read_complete:
1da177e4
LT
3326 return rc;
3327}
3328
3329static int cifs_readpage(struct file *file, struct page *page)
3330{
3331 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3332 int rc = -EACCES;
6d5786a3 3333 unsigned int xid;
1da177e4 3334
6d5786a3 3335 xid = get_xid();
1da177e4
LT
3336
3337 if (file->private_data == NULL) {
0f3bc09e 3338 rc = -EBADF;
6d5786a3 3339 free_xid(xid);
0f3bc09e 3340 return rc;
1da177e4
LT
3341 }
3342
ac3aa2f8 3343 cFYI(1, "readpage %p at offset %d 0x%x",
b6b38f70 3344 page, (int)offset, (int)offset);
1da177e4
LT
3345
3346 rc = cifs_readpage_worker(file, page, &offset);
3347
3348 unlock_page(page);
3349
6d5786a3 3350 free_xid(xid);
1da177e4
LT
3351 return rc;
3352}
3353
a403a0a3
SF
3354static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3355{
3356 struct cifsFileInfo *open_file;
3357
4477288a 3358 spin_lock(&cifs_file_list_lock);
a403a0a3 3359 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3360 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 3361 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3362 return 1;
3363 }
3364 }
4477288a 3365 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3366 return 0;
3367}
3368
1da177e4
LT
3369/* We do not want to update the file size from server for inodes
3370 open for write - to avoid races with writepage extending
3371 the file - in the future we could consider allowing
fb8c4b14 3372 refreshing the inode only on increases in the file size
1da177e4
LT
3373 but this is tricky to do without racing with writebehind
3374 page caching in the current Linux kernel design */
4b18f2a9 3375bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3376{
a403a0a3 3377 if (!cifsInode)
4b18f2a9 3378 return true;
50c2f753 3379
a403a0a3
SF
3380 if (is_inode_writable(cifsInode)) {
3381 /* This inode is open for write at least once */
c32a0b68
SF
3382 struct cifs_sb_info *cifs_sb;
3383
c32a0b68 3384 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3386 /* since no page cache to corrupt on directio
c32a0b68 3387 we can change size safely */
4b18f2a9 3388 return true;
c32a0b68
SF
3389 }
3390
fb8c4b14 3391 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3392 return true;
7ba52631 3393
4b18f2a9 3394 return false;
23e7dd7d 3395 } else
4b18f2a9 3396 return true;
1da177e4
LT
3397}
3398
d9414774
NP
3399static int cifs_write_begin(struct file *file, struct address_space *mapping,
3400 loff_t pos, unsigned len, unsigned flags,
3401 struct page **pagep, void **fsdata)
1da177e4 3402{
d9414774
NP
3403 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3404 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
3405 loff_t page_start = pos & PAGE_MASK;
3406 loff_t i_size;
3407 struct page *page;
3408 int rc = 0;
d9414774 3409
b6b38f70 3410 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 3411
54566b2c 3412 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3413 if (!page) {
3414 rc = -ENOMEM;
3415 goto out;
3416 }
8a236264 3417
a98ee8c1
JL
3418 if (PageUptodate(page))
3419 goto out;
8a236264 3420
a98ee8c1
JL
3421 /*
3422 * If we write a full page it will be up to date, no need to read from
3423 * the server. If the write is short, we'll end up doing a sync write
3424 * instead.
3425 */
3426 if (len == PAGE_CACHE_SIZE)
3427 goto out;
8a236264 3428
a98ee8c1
JL
3429 /*
3430 * optimize away the read when we have an oplock, and we're not
3431 * expecting to use any of the data we'd be reading in. That
3432 * is, when the page lies beyond the EOF, or straddles the EOF
3433 * and the write will cover all of the existing data.
3434 */
3435 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3436 i_size = i_size_read(mapping->host);
3437 if (page_start >= i_size ||
3438 (offset == 0 && (pos + len) >= i_size)) {
3439 zero_user_segments(page, 0, offset,
3440 offset + len,
3441 PAGE_CACHE_SIZE);
3442 /*
3443 * PageChecked means that the parts of the page
3444 * to which we're not writing are considered up
3445 * to date. Once the data is copied to the
3446 * page, it can be set uptodate.
3447 */
3448 SetPageChecked(page);
3449 goto out;
3450 }
3451 }
d9414774 3452
a98ee8c1
JL
3453 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3454 /*
3455 * might as well read a page, it is fast enough. If we get
3456 * an error, we don't need to return it. cifs_write_end will
3457 * do a sync write instead since PG_uptodate isn't set.
3458 */
3459 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
3460 } else {
3461 /* we could try using another file handle if there is one -
3462 but how would we lock it to prevent close of that handle
3463 racing with this read? In any case
d9414774 3464 this will be written out by write_end so is fine */
1da177e4 3465 }
a98ee8c1
JL
3466out:
3467 *pagep = page;
3468 return rc;
1da177e4
LT
3469}
3470
85f2d6b4
SJ
3471static int cifs_release_page(struct page *page, gfp_t gfp)
3472{
3473 if (PagePrivate(page))
3474 return 0;
3475
3476 return cifs_fscache_release_page(page, gfp);
3477}
3478
3479static void cifs_invalidate_page(struct page *page, unsigned long offset)
3480{
3481 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3482
3483 if (offset == 0)
3484 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3485}
3486
9ad1506b
PS
3487static int cifs_launder_page(struct page *page)
3488{
3489 int rc = 0;
3490 loff_t range_start = page_offset(page);
3491 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3492 struct writeback_control wbc = {
3493 .sync_mode = WB_SYNC_ALL,
3494 .nr_to_write = 0,
3495 .range_start = range_start,
3496 .range_end = range_end,
3497 };
3498
3499 cFYI(1, "Launder page: %p", page);
3500
3501 if (clear_page_dirty_for_io(page))
3502 rc = cifs_writepage_locked(page, &wbc);
3503
3504 cifs_fscache_invalidate_page(page, page->mapping->host);
3505 return rc;
3506}
3507
9b646972 3508void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3509{
3510 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3511 oplock_break);
a5e18bc3 3512 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3513 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3514 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
eb4b756b 3515 int rc = 0;
3bc303c2
JL
3516
3517 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 3518 if (cinode->clientCanCacheRead)
8737c930 3519 break_lease(inode, O_RDONLY);
d54ff732 3520 else
8737c930 3521 break_lease(inode, O_WRONLY);
3bc303c2
JL
3522 rc = filemap_fdatawrite(inode->i_mapping);
3523 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
3524 rc = filemap_fdatawait(inode->i_mapping);
3525 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
3526 invalidate_remote_inode(inode);
3527 }
b6b38f70 3528 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
3529 }
3530
85160e03
PS
3531 rc = cifs_push_locks(cfile);
3532 if (rc)
3533 cERROR(1, "Push locks rc = %d", rc);
3534
3bc303c2
JL
3535 /*
3536 * releasing stale oplock after recent reconnect of smb session using
3537 * a now incorrect file handle is not a data integrity issue but do
3538 * not bother sending an oplock release if session to server still is
3539 * disconnected since oplock already released by the server
3540 */
cdff08e7 3541 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3542 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3543 cinode);
b6b38f70 3544 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 3545 }
3bc303c2
JL
3546}
3547
f5e54d6e 3548const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3549 .readpage = cifs_readpage,
3550 .readpages = cifs_readpages,
3551 .writepage = cifs_writepage,
37c0eb46 3552 .writepages = cifs_writepages,
d9414774
NP
3553 .write_begin = cifs_write_begin,
3554 .write_end = cifs_write_end,
1da177e4 3555 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3556 .releasepage = cifs_release_page,
3557 .invalidatepage = cifs_invalidate_page,
9ad1506b 3558 .launder_page = cifs_launder_page,
1da177e4 3559};
273d81d6
DK
3560
3561/*
3562 * cifs_readpages requires the server to support a buffer large enough to
3563 * contain the header plus one complete page of data. Otherwise, we need
3564 * to leave cifs_readpages out of the address space operations.
3565 */
f5e54d6e 3566const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3567 .readpage = cifs_readpage,
3568 .writepage = cifs_writepage,
3569 .writepages = cifs_writepages,
d9414774
NP
3570 .write_begin = cifs_write_begin,
3571 .write_end = cifs_write_end,
273d81d6 3572 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3573 .releasepage = cifs_release_page,
3574 .invalidatepage = cifs_invalidate_page,
9ad1506b 3575 .launder_page = cifs_launder_page,
273d81d6 3576};
This page took 0.656537 seconds and 5 git commands to generate.