[CIFS] don't explicitly do a FindClose on rewind when directory search has ended
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14
SF
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2007
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
1da177e4
LT
33#include <asm/div64.h>
34#include "cifsfs.h"
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_unicode.h"
39#include "cifs_debug.h"
40#include "cifs_fs_sb.h"
41
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
fb8c4b14 48 private_data->pid = current->tgid;
1da177e4 49 init_MUTEX(&private_data->fh_sem);
796e5661 50 mutex_init(&private_data->lock_mutex);
7ee1af76 51 INIT_LIST_HEAD(&private_data->llist);
1da177e4
LT
52 private_data->pfile = file; /* needed for writepage */
53 private_data->pInode = inode;
4b18f2a9
SF
54 private_data->invalidHandle = false;
55 private_data->closePend = false;
23e7dd7d
SF
56 /* we have to track num writers to the inode, since writepages
57 does not tell us which handle the write is for so there can
58 be a close (overlapping with write) of the filehandle that
59 cifs_writepages chose to use */
fb8c4b14 60 atomic_set(&private_data->wrtPending, 0);
1da177e4
LT
61
62 return private_data;
63}
64
65static inline int cifs_convert_flags(unsigned int flags)
66{
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 return GENERIC_READ;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 return GENERIC_WRITE;
71 else if ((flags & O_ACCMODE) == O_RDWR) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 return (GENERIC_READ | GENERIC_WRITE);
76 }
77
78 return 0x20197;
79}
80
81static inline int cifs_get_disposition(unsigned int flags)
82{
83 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
84 return FILE_CREATE;
85 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
86 return FILE_OVERWRITE_IF;
87 else if ((flags & O_CREAT) == O_CREAT)
88 return FILE_OPEN_IF;
55aa2e09
SF
89 else if ((flags & O_TRUNC) == O_TRUNC)
90 return FILE_OVERWRITE;
1da177e4
LT
91 else
92 return FILE_OPEN;
93}
94
95/* all arguments to this function must be checked for validity in caller */
96static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
97 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
98 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
99 char *full_path, int xid)
100{
101 struct timespec temp;
102 int rc;
103
104 /* want handles we can use to read with first
105 in the list so we do not have to walk the
106 list to search for one in prepare_write */
107 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
fb8c4b14 108 list_add_tail(&pCifsFile->flist,
1da177e4
LT
109 &pCifsInode->openFileList);
110 } else {
111 list_add(&pCifsFile->flist,
112 &pCifsInode->openFileList);
113 }
114 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
115 if (pCifsInode->clientCanCacheRead) {
116 /* we have the inode open somewhere else
117 no need to discard cache data */
118 goto client_can_cache;
119 }
120
121 /* BB need same check in cifs_create too? */
122 /* if not oplocked, invalidate inode pages if mtime or file
123 size changed */
124 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
e6a00296
JJS
125 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
126 (file->f_path.dentry->d_inode->i_size ==
1da177e4
LT
127 (loff_t)le64_to_cpu(buf->EndOfFile))) {
128 cFYI(1, ("inode unchanged on server"));
129 } else {
e6a00296 130 if (file->f_path.dentry->d_inode->i_mapping) {
1da177e4
LT
131 /* BB no need to lock inode until after invalidate
132 since namei code should already have it locked? */
cea21805
JL
133 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
134 if (rc != 0)
135 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
1da177e4
LT
136 }
137 cFYI(1, ("invalidating remote inode since open detected it "
138 "changed"));
e6a00296 139 invalidate_remote_inode(file->f_path.dentry->d_inode);
1da177e4
LT
140 }
141
142client_can_cache:
c18c842b 143 if (pTcon->unix_ext)
e6a00296 144 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
1da177e4
LT
145 full_path, inode->i_sb, xid);
146 else
e6a00296 147 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
8b1327f6 148 full_path, buf, inode->i_sb, xid, NULL);
1da177e4
LT
149
150 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
151 pCifsInode->clientCanCacheAll = true;
152 pCifsInode->clientCanCacheRead = true;
1da177e4 153 cFYI(1, ("Exclusive Oplock granted on inode %p",
e6a00296 154 file->f_path.dentry->d_inode));
1da177e4 155 } else if ((*oplock & 0xF) == OPLOCK_READ)
4b18f2a9 156 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
157
158 return rc;
159}
160
161int cifs_open(struct inode *inode, struct file *file)
162{
163 int rc = -EACCES;
164 int xid, oplock;
165 struct cifs_sb_info *cifs_sb;
166 struct cifsTconInfo *pTcon;
167 struct cifsFileInfo *pCifsFile;
168 struct cifsInodeInfo *pCifsInode;
169 struct list_head *tmp;
170 char *full_path = NULL;
171 int desiredAccess;
172 int disposition;
173 __u16 netfid;
174 FILE_ALL_INFO *buf = NULL;
175
176 xid = GetXid();
177
178 cifs_sb = CIFS_SB(inode->i_sb);
179 pTcon = cifs_sb->tcon;
180
181 if (file->f_flags & O_CREAT) {
182 /* search inode for this file and fill in file->private_data */
e6a00296 183 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4
LT
184 read_lock(&GlobalSMBSeslock);
185 list_for_each(tmp, &pCifsInode->openFileList) {
186 pCifsFile = list_entry(tmp, struct cifsFileInfo,
187 flist);
188 if ((pCifsFile->pfile == NULL) &&
189 (pCifsFile->pid == current->tgid)) {
190 /* mode set in cifs_create */
191
192 /* needed for writepage */
193 pCifsFile->pfile = file;
50c2f753 194
1da177e4
LT
195 file->private_data = pCifsFile;
196 break;
197 }
198 }
199 read_unlock(&GlobalSMBSeslock);
200 if (file->private_data != NULL) {
201 rc = 0;
202 FreeXid(xid);
203 return rc;
204 } else {
205 if (file->f_flags & O_EXCL)
206 cERROR(1, ("could not find file instance for "
26a21b98 207 "new file %p", file));
1da177e4
LT
208 }
209 }
210
e6a00296 211 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4
LT
212 if (full_path == NULL) {
213 FreeXid(xid);
214 return -ENOMEM;
215 }
216
7521a3c5 217 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
1da177e4
LT
218 inode, file->f_flags, full_path));
219 desiredAccess = cifs_convert_flags(file->f_flags);
220
221/*********************************************************************
222 * open flag mapping table:
fb8c4b14 223 *
1da177e4 224 * POSIX Flag CIFS Disposition
fb8c4b14 225 * ---------- ----------------
1da177e4
LT
226 * O_CREAT FILE_OPEN_IF
227 * O_CREAT | O_EXCL FILE_CREATE
228 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
229 * O_TRUNC FILE_OVERWRITE
230 * none of the above FILE_OPEN
231 *
232 * Note that there is not a direct match between disposition
fb8c4b14 233 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
234 * O_CREAT | O_TRUNC is similar but truncates the existing
235 * file rather than creating a new file as FILE_SUPERSEDE does
236 * (which uses the attributes / metadata passed in on open call)
237 *?
fb8c4b14 238 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
239 *? and the read write flags match reasonably. O_LARGEFILE
240 *? is irrelevant because largefile support is always used
241 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
242 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
243 *********************************************************************/
244
245 disposition = cifs_get_disposition(file->f_flags);
246
247 if (oplockEnabled)
248 oplock = REQ_OPLOCK;
249 else
4b18f2a9 250 oplock = 0;
1da177e4
LT
251
252 /* BB pass O_SYNC flag through on file attributes .. BB */
253
254 /* Also refresh inode by passing in file_info buf returned by SMBOpen
255 and calling get_inode_info with returned buf (at least helps
256 non-Unix server case) */
257
fb8c4b14
SF
258 /* BB we can not do this if this is the second open of a file
259 and the first handle has writebehind data, we might be
1da177e4
LT
260 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
261 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
262 if (!buf) {
263 rc = -ENOMEM;
264 goto out;
265 }
5bafd765
SF
266
267 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
fb8c4b14 268 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
5bafd765 269 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
270 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
271 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
272 else
273 rc = -EIO; /* no NT SMB support fall into legacy open below */
274
a9d02ad4
SF
275 if (rc == -EIO) {
276 /* Old server, try legacy style OpenX */
277 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
278 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
279 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
280 & CIFS_MOUNT_MAP_SPECIAL_CHR);
281 }
1da177e4 282 if (rc) {
26a21b98 283 cFYI(1, ("cifs_open returned 0x%x", rc));
1da177e4
LT
284 goto out;
285 }
286 file->private_data =
287 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
288 if (file->private_data == NULL) {
289 rc = -ENOMEM;
290 goto out;
291 }
292 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
1da177e4
LT
293 write_lock(&GlobalSMBSeslock);
294 list_add(&pCifsFile->tlist, &pTcon->openFileList);
295
e6a00296 296 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4
LT
297 if (pCifsInode) {
298 rc = cifs_open_inode_helper(inode, file, pCifsInode,
299 pCifsFile, pTcon,
300 &oplock, buf, full_path, xid);
301 } else {
302 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
303 }
304
fb8c4b14 305 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
306 /* time to set mode which we can not set earlier due to
307 problems creating new read-only files */
c18c842b 308 if (pTcon->unix_ext) {
1da177e4
LT
309 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
310 inode->i_mode,
311 (__u64)-1, (__u64)-1, 0 /* dev */,
737b758c 312 cifs_sb->local_nls,
fb8c4b14 313 cifs_sb->mnt_cifs_flags &
737b758c 314 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
315 } else {
316 /* BB implement via Windows security descriptors eg
317 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
318 -1, -1, local_nls);
319 in the meantime could set r/o dos attribute when
320 perms are eg: mode & 0222 == 0 */
321 }
322 }
323
324out:
325 kfree(buf);
326 kfree(full_path);
327 FreeXid(xid);
328 return rc;
329}
330
0418726b 331/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
332/* to server was lost */
333static int cifs_relock_file(struct cifsFileInfo *cifsFile)
334{
335 int rc = 0;
336
337/* BB list all locks open on this file and relock */
338
339 return rc;
340}
341
4b18f2a9 342static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
343{
344 int rc = -EACCES;
345 int xid, oplock;
346 struct cifs_sb_info *cifs_sb;
347 struct cifsTconInfo *pTcon;
348 struct cifsFileInfo *pCifsFile;
349 struct cifsInodeInfo *pCifsInode;
fb8c4b14 350 struct inode *inode;
1da177e4
LT
351 char *full_path = NULL;
352 int desiredAccess;
353 int disposition = FILE_OPEN;
354 __u16 netfid;
355
ad7a2926 356 if (file->private_data)
1da177e4 357 pCifsFile = (struct cifsFileInfo *)file->private_data;
ad7a2926 358 else
1da177e4
LT
359 return -EBADF;
360
361 xid = GetXid();
362 down(&pCifsFile->fh_sem);
4b18f2a9 363 if (!pCifsFile->invalidHandle) {
1da177e4
LT
364 up(&pCifsFile->fh_sem);
365 FreeXid(xid);
366 return 0;
367 }
368
e6a00296 369 if (file->f_path.dentry == NULL) {
3a9f462f
SF
370 cERROR(1, ("no valid name if dentry freed"));
371 dump_stack();
372 rc = -EBADF;
373 goto reopen_error_exit;
374 }
375
376 inode = file->f_path.dentry->d_inode;
fb8c4b14 377 if (inode == NULL) {
3a9f462f
SF
378 cERROR(1, ("inode not valid"));
379 dump_stack();
380 rc = -EBADF;
381 goto reopen_error_exit;
1da177e4 382 }
50c2f753 383
1da177e4
LT
384 cifs_sb = CIFS_SB(inode->i_sb);
385 pTcon = cifs_sb->tcon;
3a9f462f 386
1da177e4
LT
387/* can not grab rename sem here because various ops, including
388 those that already have the rename sem can end up causing writepage
389 to get called and if the server was down that means we end up here,
390 and we can never tell if the caller already has the rename_sem */
e6a00296 391 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 392 if (full_path == NULL) {
3a9f462f
SF
393 rc = -ENOMEM;
394reopen_error_exit:
1da177e4
LT
395 up(&pCifsFile->fh_sem);
396 FreeXid(xid);
3a9f462f 397 return rc;
1da177e4
LT
398 }
399
3a9f462f 400 cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
fb8c4b14 401 inode, file->f_flags, full_path));
1da177e4
LT
402 desiredAccess = cifs_convert_flags(file->f_flags);
403
404 if (oplockEnabled)
405 oplock = REQ_OPLOCK;
406 else
4b18f2a9 407 oplock = 0;
1da177e4
LT
408
409 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
410 by SMBOpen and then calling get_inode_info with returned buf
411 since file might have write behind data that needs to be flushed
1da177e4
LT
412 and server version of file size can be stale. If we knew for sure
413 that inode was not dirty locally we could do this */
414
1da177e4
LT
415 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
416 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 417 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 418 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
419 if (rc) {
420 up(&pCifsFile->fh_sem);
26a21b98
SF
421 cFYI(1, ("cifs_open returned 0x%x", rc));
422 cFYI(1, ("oplock: %d", oplock));
1da177e4
LT
423 } else {
424 pCifsFile->netfid = netfid;
4b18f2a9 425 pCifsFile->invalidHandle = false;
1da177e4
LT
426 up(&pCifsFile->fh_sem);
427 pCifsInode = CIFS_I(inode);
428 if (pCifsInode) {
429 if (can_flush) {
cea21805
JL
430 rc = filemap_write_and_wait(inode->i_mapping);
431 if (rc != 0)
432 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
433 /* temporarily disable caching while we
434 go to server to get inode info */
4b18f2a9
SF
435 pCifsInode->clientCanCacheAll = false;
436 pCifsInode->clientCanCacheRead = false;
c18c842b 437 if (pTcon->unix_ext)
1da177e4
LT
438 rc = cifs_get_inode_info_unix(&inode,
439 full_path, inode->i_sb, xid);
440 else
441 rc = cifs_get_inode_info(&inode,
442 full_path, NULL, inode->i_sb,
8b1327f6 443 xid, NULL);
1da177e4
LT
444 } /* else we are writing out data to server already
445 and could deadlock if we tried to flush data, and
446 since we do not know if we have data that would
447 invalidate the current end of file on the server
448 we can not go to the server to get the new inod
449 info */
450 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
451 pCifsInode->clientCanCacheAll = true;
452 pCifsInode->clientCanCacheRead = true;
1da177e4 453 cFYI(1, ("Exclusive Oplock granted on inode %p",
e6a00296 454 file->f_path.dentry->d_inode));
1da177e4 455 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
456 pCifsInode->clientCanCacheRead = true;
457 pCifsInode->clientCanCacheAll = false;
1da177e4 458 } else {
4b18f2a9
SF
459 pCifsInode->clientCanCacheRead = false;
460 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
461 }
462 cifs_relock_file(pCifsFile);
463 }
464 }
465
466 kfree(full_path);
467 FreeXid(xid);
468 return rc;
469}
470
471int cifs_close(struct inode *inode, struct file *file)
472{
473 int rc = 0;
15745320 474 int xid, timeout;
1da177e4
LT
475 struct cifs_sb_info *cifs_sb;
476 struct cifsTconInfo *pTcon;
477 struct cifsFileInfo *pSMBFile =
478 (struct cifsFileInfo *)file->private_data;
479
480 xid = GetXid();
481
482 cifs_sb = CIFS_SB(inode->i_sb);
483 pTcon = cifs_sb->tcon;
484 if (pSMBFile) {
7ee1af76
JA
485 struct cifsLockInfo *li, *tmp;
486
4b18f2a9 487 pSMBFile->closePend = true;
1da177e4
LT
488 if (pTcon) {
489 /* no sense reconnecting to close a file that is
490 already closed */
491 if (pTcon->tidStatus != CifsNeedReconnect) {
15745320 492 timeout = 2;
fb8c4b14 493 while ((atomic_read(&pSMBFile->wrtPending) != 0)
15745320 494 && (timeout <= 2048)) {
23e7dd7d
SF
495 /* Give write a better chance to get to
496 server ahead of the close. We do not
497 want to add a wait_q here as it would
498 increase the memory utilization as
499 the struct would be in each open file,
fb8c4b14 500 but this should give enough time to
23e7dd7d 501 clear the socket */
90c81e0b
SF
502 cFYI(DBG2,
503 ("close delay, write pending"));
23e7dd7d
SF
504 msleep(timeout);
505 timeout *= 4;
4891d539 506 }
fb8c4b14 507 if (atomic_read(&pSMBFile->wrtPending))
63135e08
SF
508 cERROR(1,
509 ("close with pending writes"));
1da177e4
LT
510 rc = CIFSSMBClose(xid, pTcon,
511 pSMBFile->netfid);
1da177e4
LT
512 }
513 }
7ee1af76
JA
514
515 /* Delete any outstanding lock records.
516 We'll lose them when the file is closed anyway. */
796e5661 517 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
518 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
519 list_del(&li->llist);
520 kfree(li);
521 }
796e5661 522 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 523
cbe0476f 524 write_lock(&GlobalSMBSeslock);
1da177e4
LT
525 list_del(&pSMBFile->flist);
526 list_del(&pSMBFile->tlist);
cbe0476f 527 write_unlock(&GlobalSMBSeslock);
15745320
SF
528 timeout = 10;
529 /* We waited above to give the SMBWrite a chance to issue
530 on the wire (so we do not get SMBWrite returning EBADF
531 if writepages is racing with close. Note that writepages
532 does not specify a file handle, so it is possible for a file
533 to be opened twice, and the application close the "wrong"
534 file handle - in these cases we delay long enough to allow
535 the SMBWrite to get on the wire before the SMB Close.
536 We allow total wait here over 45 seconds, more than
537 oplock break time, and more than enough to allow any write
538 to complete on the server, or to time out on the client */
539 while ((atomic_read(&pSMBFile->wrtPending) != 0)
540 && (timeout <= 50000)) {
541 cERROR(1, ("writes pending, delay free of handle"));
542 msleep(timeout);
543 timeout *= 8;
544 }
1da177e4
LT
545 kfree(pSMBFile->search_resume_name);
546 kfree(file->private_data);
547 file->private_data = NULL;
548 } else
549 rc = -EBADF;
550
4efa53f0 551 read_lock(&GlobalSMBSeslock);
1da177e4
LT
552 if (list_empty(&(CIFS_I(inode)->openFileList))) {
553 cFYI(1, ("closing last open instance for inode %p", inode));
554 /* if the file is not open we do not know if we can cache info
555 on this inode, much less write behind and read ahead */
4b18f2a9
SF
556 CIFS_I(inode)->clientCanCacheRead = false;
557 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 558 }
4efa53f0 559 read_unlock(&GlobalSMBSeslock);
fb8c4b14 560 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
561 rc = CIFS_I(inode)->write_behind_rc;
562 FreeXid(xid);
563 return rc;
564}
565
566int cifs_closedir(struct inode *inode, struct file *file)
567{
568 int rc = 0;
569 int xid;
570 struct cifsFileInfo *pCFileStruct =
571 (struct cifsFileInfo *)file->private_data;
572 char *ptmp;
573
26a21b98 574 cFYI(1, ("Closedir inode = 0x%p", inode));
1da177e4
LT
575
576 xid = GetXid();
577
578 if (pCFileStruct) {
579 struct cifsTconInfo *pTcon;
fb8c4b14
SF
580 struct cifs_sb_info *cifs_sb =
581 CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
582
583 pTcon = cifs_sb->tcon;
584
585 cFYI(1, ("Freeing private data in close dir"));
4b18f2a9
SF
586 if (!pCFileStruct->srch_inf.endOfSearch &&
587 !pCFileStruct->invalidHandle) {
588 pCFileStruct->invalidHandle = true;
1da177e4
LT
589 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
590 cFYI(1, ("Closing uncompleted readdir with rc %d",
591 rc));
592 /* not much we can do if it fails anyway, ignore rc */
593 rc = 0;
594 }
595 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
596 if (ptmp) {
ec637e3f 597 cFYI(1, ("closedir free smb buf in srch struct"));
1da177e4 598 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 599 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
600 cifs_small_buf_release(ptmp);
601 else
602 cifs_buf_release(ptmp);
1da177e4
LT
603 }
604 ptmp = pCFileStruct->search_resume_name;
605 if (ptmp) {
ec637e3f 606 cFYI(1, ("closedir free resume name"));
1da177e4
LT
607 pCFileStruct->search_resume_name = NULL;
608 kfree(ptmp);
609 }
610 kfree(file->private_data);
611 file->private_data = NULL;
612 }
613 /* BB can we lock the filestruct while this is going on? */
614 FreeXid(xid);
615 return rc;
616}
617
7ee1af76
JA
618static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
619 __u64 offset, __u8 lockType)
620{
fb8c4b14
SF
621 struct cifsLockInfo *li =
622 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
623 if (li == NULL)
624 return -ENOMEM;
625 li->offset = offset;
626 li->length = len;
627 li->type = lockType;
796e5661 628 mutex_lock(&fid->lock_mutex);
7ee1af76 629 list_add(&li->llist, &fid->llist);
796e5661 630 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
631 return 0;
632}
633
1da177e4
LT
634int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
635{
636 int rc, xid;
1da177e4
LT
637 __u32 numLock = 0;
638 __u32 numUnlock = 0;
639 __u64 length;
4b18f2a9 640 bool wait_flag = false;
1da177e4
LT
641 struct cifs_sb_info *cifs_sb;
642 struct cifsTconInfo *pTcon;
08547b03
SF
643 __u16 netfid;
644 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
4b18f2a9 645 bool posix_locking;
1da177e4
LT
646
647 length = 1 + pfLock->fl_end - pfLock->fl_start;
648 rc = -EACCES;
649 xid = GetXid();
650
651 cFYI(1, ("Lock parm: 0x%x flockflags: "
652 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14
SF
653 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
654 pfLock->fl_end));
1da177e4
LT
655
656 if (pfLock->fl_flags & FL_POSIX)
d47d7c1a 657 cFYI(1, ("Posix"));
1da177e4 658 if (pfLock->fl_flags & FL_FLOCK)
d47d7c1a 659 cFYI(1, ("Flock"));
1da177e4 660 if (pfLock->fl_flags & FL_SLEEP) {
d47d7c1a 661 cFYI(1, ("Blocking lock"));
4b18f2a9 662 wait_flag = true;
1da177e4
LT
663 }
664 if (pfLock->fl_flags & FL_ACCESS)
665 cFYI(1, ("Process suspended by mandatory locking - "
26a21b98 666 "not implemented yet"));
1da177e4
LT
667 if (pfLock->fl_flags & FL_LEASE)
668 cFYI(1, ("Lease on file - not implemented yet"));
fb8c4b14 669 if (pfLock->fl_flags &
1da177e4
LT
670 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
671 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
672
673 if (pfLock->fl_type == F_WRLCK) {
674 cFYI(1, ("F_WRLCK "));
675 numLock = 1;
676 } else if (pfLock->fl_type == F_UNLCK) {
d47d7c1a 677 cFYI(1, ("F_UNLCK"));
1da177e4 678 numUnlock = 1;
d47d7c1a
SF
679 /* Check if unlock includes more than
680 one lock range */
1da177e4 681 } else if (pfLock->fl_type == F_RDLCK) {
d47d7c1a 682 cFYI(1, ("F_RDLCK"));
1da177e4
LT
683 lockType |= LOCKING_ANDX_SHARED_LOCK;
684 numLock = 1;
685 } else if (pfLock->fl_type == F_EXLCK) {
d47d7c1a 686 cFYI(1, ("F_EXLCK"));
1da177e4
LT
687 numLock = 1;
688 } else if (pfLock->fl_type == F_SHLCK) {
d47d7c1a 689 cFYI(1, ("F_SHLCK"));
1da177e4
LT
690 lockType |= LOCKING_ANDX_SHARED_LOCK;
691 numLock = 1;
692 } else
d47d7c1a 693 cFYI(1, ("Unknown type of lock"));
1da177e4 694
e6a00296 695 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
696 pTcon = cifs_sb->tcon;
697
698 if (file->private_data == NULL) {
699 FreeXid(xid);
700 return -EBADF;
701 }
08547b03
SF
702 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
703
7ee1af76
JA
704 posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
705 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
1da177e4 706
08547b03
SF
707 /* BB add code here to normalize offset and length to
708 account for negative length which we can not accept over the
709 wire */
1da177e4 710 if (IS_GETLK(cmd)) {
fb8c4b14 711 if (posix_locking) {
08547b03 712 int posix_lock_type;
fb8c4b14 713 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
714 posix_lock_type = CIFS_RDLCK;
715 else
716 posix_lock_type = CIFS_WRLCK;
717 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
fc94cdb9 718 length, pfLock,
08547b03
SF
719 posix_lock_type, wait_flag);
720 FreeXid(xid);
721 return rc;
722 }
723
724 /* BB we could chain these into one lock request BB */
725 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
726 0, 1, lockType, 0 /* wait flag */ );
1da177e4 727 if (rc == 0) {
fb8c4b14 728 rc = CIFSSMBLock(xid, pTcon, netfid, length,
1da177e4
LT
729 pfLock->fl_start, 1 /* numUnlock */ ,
730 0 /* numLock */ , lockType,
731 0 /* wait flag */ );
732 pfLock->fl_type = F_UNLCK;
733 if (rc != 0)
734 cERROR(1, ("Error unlocking previously locked "
08547b03 735 "range %d during test of lock", rc));
1da177e4
LT
736 rc = 0;
737
738 } else {
739 /* if rc == ERR_SHARING_VIOLATION ? */
740 rc = 0; /* do not change lock type to unlock
741 since range in use */
742 }
743
744 FreeXid(xid);
745 return rc;
746 }
7ee1af76
JA
747
748 if (!numLock && !numUnlock) {
749 /* if no lock or unlock then nothing
750 to do since we do not know what it is */
751 FreeXid(xid);
752 return -EOPNOTSUPP;
753 }
754
755 if (posix_locking) {
08547b03 756 int posix_lock_type;
fb8c4b14 757 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
758 posix_lock_type = CIFS_RDLCK;
759 else
760 posix_lock_type = CIFS_WRLCK;
50c2f753 761
fb8c4b14 762 if (numUnlock == 1)
beb84dc8 763 posix_lock_type = CIFS_UNLCK;
7ee1af76 764
08547b03 765 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
fc94cdb9 766 length, pfLock,
08547b03 767 posix_lock_type, wait_flag);
7ee1af76 768 } else {
fb8c4b14
SF
769 struct cifsFileInfo *fid =
770 (struct cifsFileInfo *)file->private_data;
7ee1af76
JA
771
772 if (numLock) {
fb8c4b14
SF
773 rc = CIFSSMBLock(xid, pTcon, netfid, length,
774 pfLock->fl_start,
7ee1af76
JA
775 0, numLock, lockType, wait_flag);
776
777 if (rc == 0) {
778 /* For Windows locks we must store them. */
779 rc = store_file_lock(fid, length,
780 pfLock->fl_start, lockType);
781 }
782 } else if (numUnlock) {
783 /* For each stored lock that this unlock overlaps
784 completely, unlock it. */
785 int stored_rc = 0;
786 struct cifsLockInfo *li, *tmp;
787
6b70c955 788 rc = 0;
796e5661 789 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
790 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
791 if (pfLock->fl_start <= li->offset &&
c19eb710 792 (pfLock->fl_start + length) >=
39db810c 793 (li->offset + li->length)) {
fb8c4b14
SF
794 stored_rc = CIFSSMBLock(xid, pTcon,
795 netfid,
7ee1af76 796 li->length, li->offset,
4b18f2a9 797 1, 0, li->type, false);
7ee1af76
JA
798 if (stored_rc)
799 rc = stored_rc;
800
801 list_del(&li->llist);
802 kfree(li);
803 }
804 }
796e5661 805 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
806 }
807 }
808
d634cc15 809 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
810 posix_lock_file_wait(file, pfLock);
811 FreeXid(xid);
812 return rc;
813}
814
815ssize_t cifs_user_write(struct file *file, const char __user *write_data,
816 size_t write_size, loff_t *poffset)
817{
818 int rc = 0;
819 unsigned int bytes_written = 0;
820 unsigned int total_written;
821 struct cifs_sb_info *cifs_sb;
822 struct cifsTconInfo *pTcon;
823 int xid, long_op;
824 struct cifsFileInfo *open_file;
825
e6a00296 826 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
827
828 pTcon = cifs_sb->tcon;
829
830 /* cFYI(1,
831 (" write %d bytes to offset %lld of %s", write_size,
e6a00296 832 *poffset, file->f_path.dentry->d_name.name)); */
1da177e4
LT
833
834 if (file->private_data == NULL)
835 return -EBADF;
c33f8d32 836 open_file = (struct cifsFileInfo *) file->private_data;
50c2f753 837
1da177e4 838 xid = GetXid();
1da177e4 839
e6a00296 840 if (*poffset > file->f_path.dentry->d_inode->i_size)
133672ef 841 long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
1da177e4 842 else
133672ef 843 long_op = CIFS_LONG_OP;
1da177e4
LT
844
845 for (total_written = 0; write_size > total_written;
846 total_written += bytes_written) {
847 rc = -EAGAIN;
848 while (rc == -EAGAIN) {
849 if (file->private_data == NULL) {
850 /* file has been closed on us */
851 FreeXid(xid);
852 /* if we have gotten here we have written some data
853 and blocked, and the file has been freed on us while
854 we blocked so return what we managed to write */
855 return total_written;
fb8c4b14 856 }
1da177e4
LT
857 if (open_file->closePend) {
858 FreeXid(xid);
859 if (total_written)
860 return total_written;
861 else
862 return -EBADF;
863 }
864 if (open_file->invalidHandle) {
1da177e4
LT
865 /* we could deadlock if we called
866 filemap_fdatawait from here so tell
867 reopen_file not to flush data to server
868 now */
4b18f2a9 869 rc = cifs_reopen_file(file, false);
1da177e4
LT
870 if (rc != 0)
871 break;
872 }
873
874 rc = CIFSSMBWrite(xid, pTcon,
875 open_file->netfid,
876 min_t(const int, cifs_sb->wsize,
877 write_size - total_written),
878 *poffset, &bytes_written,
879 NULL, write_data + total_written, long_op);
880 }
881 if (rc || (bytes_written == 0)) {
882 if (total_written)
883 break;
884 else {
885 FreeXid(xid);
886 return rc;
887 }
888 } else
889 *poffset += bytes_written;
133672ef 890 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
891 15 seconds is plenty */
892 }
893
a4544347 894 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
895
896 /* since the write may have blocked check these pointers again */
3677db10
SF
897 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
898 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
899/* Do not update local mtime - server will set its actual value on write
900 * inode->i_ctime = inode->i_mtime =
3677db10
SF
901 * current_fs_time(inode->i_sb);*/
902 if (total_written > 0) {
903 spin_lock(&inode->i_lock);
904 if (*poffset > file->f_path.dentry->d_inode->i_size)
905 i_size_write(file->f_path.dentry->d_inode,
1da177e4 906 *poffset);
3677db10 907 spin_unlock(&inode->i_lock);
1da177e4 908 }
fb8c4b14 909 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
910 }
911 FreeXid(xid);
912 return total_written;
913}
914
915static ssize_t cifs_write(struct file *file, const char *write_data,
916 size_t write_size, loff_t *poffset)
917{
918 int rc = 0;
919 unsigned int bytes_written = 0;
920 unsigned int total_written;
921 struct cifs_sb_info *cifs_sb;
922 struct cifsTconInfo *pTcon;
923 int xid, long_op;
924 struct cifsFileInfo *open_file;
925
e6a00296 926 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
927
928 pTcon = cifs_sb->tcon;
929
fb8c4b14 930 cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
e6a00296 931 *poffset, file->f_path.dentry->d_name.name));
1da177e4
LT
932
933 if (file->private_data == NULL)
934 return -EBADF;
c33f8d32 935 open_file = (struct cifsFileInfo *)file->private_data;
50c2f753 936
1da177e4 937 xid = GetXid();
1da177e4 938
e6a00296 939 if (*poffset > file->f_path.dentry->d_inode->i_size)
133672ef 940 long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
1da177e4 941 else
133672ef 942 long_op = CIFS_LONG_OP;
1da177e4
LT
943
944 for (total_written = 0; write_size > total_written;
945 total_written += bytes_written) {
946 rc = -EAGAIN;
947 while (rc == -EAGAIN) {
948 if (file->private_data == NULL) {
949 /* file has been closed on us */
950 FreeXid(xid);
951 /* if we have gotten here we have written some data
952 and blocked, and the file has been freed on us
fb8c4b14 953 while we blocked so return what we managed to
1da177e4
LT
954 write */
955 return total_written;
fb8c4b14 956 }
1da177e4
LT
957 if (open_file->closePend) {
958 FreeXid(xid);
959 if (total_written)
960 return total_written;
961 else
962 return -EBADF;
963 }
964 if (open_file->invalidHandle) {
1da177e4
LT
965 /* we could deadlock if we called
966 filemap_fdatawait from here so tell
fb8c4b14 967 reopen_file not to flush data to
1da177e4 968 server now */
4b18f2a9 969 rc = cifs_reopen_file(file, false);
1da177e4
LT
970 if (rc != 0)
971 break;
972 }
fb8c4b14
SF
973 if (experimEnabled || (pTcon->ses->server &&
974 ((pTcon->ses->server->secMode &
08775834 975 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 976 == 0))) {
3e84469d
SF
977 struct kvec iov[2];
978 unsigned int len;
979
0ae0efad 980 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
981 write_size - total_written);
982 /* iov[0] is reserved for smb header */
983 iov[1].iov_base = (char *)write_data +
984 total_written;
985 iov[1].iov_len = len;
d6e04ae6 986 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 987 open_file->netfid, len,
d6e04ae6 988 *poffset, &bytes_written,
3e84469d 989 iov, 1, long_op);
d6e04ae6 990 } else
60808233
SF
991 rc = CIFSSMBWrite(xid, pTcon,
992 open_file->netfid,
993 min_t(const int, cifs_sb->wsize,
994 write_size - total_written),
995 *poffset, &bytes_written,
996 write_data + total_written,
997 NULL, long_op);
1da177e4
LT
998 }
999 if (rc || (bytes_written == 0)) {
1000 if (total_written)
1001 break;
1002 else {
1003 FreeXid(xid);
1004 return rc;
1005 }
1006 } else
1007 *poffset += bytes_written;
133672ef 1008 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1009 15 seconds is plenty */
1010 }
1011
a4544347 1012 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1013
1014 /* since the write may have blocked check these pointers again */
3677db10 1015 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1016/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1017/* file->f_path.dentry->d_inode->i_ctime =
1018 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1019 if (total_written > 0) {
1020 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1021 if (*poffset > file->f_path.dentry->d_inode->i_size)
1022 i_size_write(file->f_path.dentry->d_inode,
1023 *poffset);
1024 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1025 }
3677db10 1026 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1027 }
1028 FreeXid(xid);
1029 return total_written;
1030}
1031
630f3f0c
SF
1032#ifdef CONFIG_CIFS_EXPERIMENTAL
1033struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1034{
1035 struct cifsFileInfo *open_file = NULL;
1036
1037 read_lock(&GlobalSMBSeslock);
1038 /* we could simply get the first_list_entry since write-only entries
1039 are always at the end of the list but since the first entry might
1040 have a close pending, we go through the whole list */
1041 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1042 if (open_file->closePend)
1043 continue;
1044 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1045 (open_file->pfile->f_flags & O_RDONLY))) {
1046 if (!open_file->invalidHandle) {
1047 /* found a good file */
1048 /* lock it so it will not be closed on us */
1049 atomic_inc(&open_file->wrtPending);
1050 read_unlock(&GlobalSMBSeslock);
1051 return open_file;
1052 } /* else might as well continue, and look for
1053 another, or simply have the caller reopen it
1054 again rather than trying to fix this handle */
1055 } else /* write only file */
1056 break; /* write only files are last so must be done */
1057 }
1058 read_unlock(&GlobalSMBSeslock);
1059 return NULL;
1060}
1061#endif
1062
dd99cd80 1063struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
1064{
1065 struct cifsFileInfo *open_file;
dd99cd80 1066 int rc;
6148a742 1067
60808233
SF
1068 /* Having a null inode here (because mapping->host was set to zero by
1069 the VFS or MM) should not happen but we had reports of on oops (due to
1070 it being zero) during stress testcases so we need to check for it */
1071
fb8c4b14
SF
1072 if (cifs_inode == NULL) {
1073 cERROR(1, ("Null inode passed to cifs_writeable_file"));
60808233
SF
1074 dump_stack();
1075 return NULL;
1076 }
1077
6148a742 1078 read_lock(&GlobalSMBSeslock);
9b22b0b7 1079refind_writable:
6148a742
SF
1080 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1081 if (open_file->closePend)
1082 continue;
1083 if (open_file->pfile &&
1084 ((open_file->pfile->f_flags & O_RDWR) ||
1085 (open_file->pfile->f_flags & O_WRONLY))) {
23e7dd7d 1086 atomic_inc(&open_file->wrtPending);
9b22b0b7
SF
1087
1088 if (!open_file->invalidHandle) {
1089 /* found a good writable file */
1090 read_unlock(&GlobalSMBSeslock);
1091 return open_file;
1092 }
8840dee9 1093
6148a742 1094 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1095 /* Had to unlock since following call can block */
4b18f2a9 1096 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1097 if (!rc) {
9b22b0b7
SF
1098 if (!open_file->closePend)
1099 return open_file;
1100 else { /* start over in case this was deleted */
1101 /* since the list could be modified */
37c0eb46 1102 read_lock(&GlobalSMBSeslock);
15745320 1103 atomic_dec(&open_file->wrtPending);
9b22b0b7 1104 goto refind_writable;
37c0eb46
SF
1105 }
1106 }
9b22b0b7
SF
1107
1108 /* if it fails, try another handle if possible -
1109 (we can not do this if closePending since
1110 loop could be modified - in which case we
1111 have to start at the beginning of the list
1112 again. Note that it would be bad
1113 to hold up writepages here (rather than
1114 in caller) with continuous retries */
1115 cFYI(1, ("wp failed on reopen file"));
1116 read_lock(&GlobalSMBSeslock);
1117 /* can not use this handle, no write
1118 pending on this one after all */
1119 atomic_dec(&open_file->wrtPending);
8840dee9 1120
9b22b0b7
SF
1121 if (open_file->closePend) /* list could have changed */
1122 goto refind_writable;
1123 /* else we simply continue to the next entry. Thus
1124 we do not loop on reopen errors. If we
1125 can not reopen the file, for example if we
1126 reconnected to a server with another client
1127 racing to delete or lock the file we would not
1128 make progress if we restarted before the beginning
1129 of the loop here. */
6148a742
SF
1130 }
1131 }
1132 read_unlock(&GlobalSMBSeslock);
1133 return NULL;
1134}
1135
1da177e4
LT
1136static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1137{
1138 struct address_space *mapping = page->mapping;
1139 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1140 char *write_data;
1141 int rc = -EFAULT;
1142 int bytes_written = 0;
1143 struct cifs_sb_info *cifs_sb;
1144 struct cifsTconInfo *pTcon;
1145 struct inode *inode;
6148a742 1146 struct cifsFileInfo *open_file;
1da177e4
LT
1147
1148 if (!mapping || !mapping->host)
1149 return -EFAULT;
1150
1151 inode = page->mapping->host;
1152 cifs_sb = CIFS_SB(inode->i_sb);
1153 pTcon = cifs_sb->tcon;
1154
1155 offset += (loff_t)from;
1156 write_data = kmap(page);
1157 write_data += from;
1158
1159 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1160 kunmap(page);
1161 return -EIO;
1162 }
1163
1164 /* racing with truncate? */
1165 if (offset > mapping->host->i_size) {
1166 kunmap(page);
1167 return 0; /* don't care */
1168 }
1169
1170 /* check to make sure that we are not extending the file */
1171 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1172 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1173
6148a742
SF
1174 open_file = find_writable_file(CIFS_I(mapping->host));
1175 if (open_file) {
1176 bytes_written = cifs_write(open_file->pfile, write_data,
1177 to-from, &offset);
23e7dd7d 1178 atomic_dec(&open_file->wrtPending);
1da177e4 1179 /* Does mm or vfs already set times? */
6148a742 1180 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1181 if ((bytes_written > 0) && (offset))
6148a742 1182 rc = 0;
bb5a9a04
SF
1183 else if (bytes_written < 0)
1184 rc = bytes_written;
6148a742 1185 } else {
1da177e4
LT
1186 cFYI(1, ("No writeable filehandles for inode"));
1187 rc = -EIO;
1188 }
1189
1190 kunmap(page);
1191 return rc;
1192}
1193
1da177e4 1194static int cifs_writepages(struct address_space *mapping,
37c0eb46 1195 struct writeback_control *wbc)
1da177e4 1196{
37c0eb46
SF
1197 struct backing_dev_info *bdi = mapping->backing_dev_info;
1198 unsigned int bytes_to_write;
1199 unsigned int bytes_written;
1200 struct cifs_sb_info *cifs_sb;
1201 int done = 0;
111ebb6e 1202 pgoff_t end;
37c0eb46 1203 pgoff_t index;
fb8c4b14
SF
1204 int range_whole = 0;
1205 struct kvec *iov;
84d2f07e 1206 int len;
37c0eb46
SF
1207 int n_iov = 0;
1208 pgoff_t next;
1209 int nr_pages;
1210 __u64 offset = 0;
23e7dd7d 1211 struct cifsFileInfo *open_file;
37c0eb46
SF
1212 struct page *page;
1213 struct pagevec pvec;
1214 int rc = 0;
1215 int scanned = 0;
1da177e4
LT
1216 int xid;
1217
37c0eb46 1218 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1219
37c0eb46
SF
1220 /*
1221 * If wsize is smaller that the page cache size, default to writing
1222 * one page at a time via cifs_writepage
1223 */
1224 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1225 return generic_writepages(mapping, wbc);
1226
fb8c4b14
SF
1227 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1228 if (cifs_sb->tcon->ses->server->secMode &
1229 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1230 if (!experimEnabled)
60808233 1231 return generic_writepages(mapping, wbc);
4a77118c 1232
9a0c8230 1233 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1234 if (iov == NULL)
9a0c8230
SF
1235 return generic_writepages(mapping, wbc);
1236
1237
37c0eb46
SF
1238 /*
1239 * BB: Is this meaningful for a non-block-device file system?
1240 * If it is, we should test it again after we do I/O
1241 */
1242 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1243 wbc->encountered_congestion = 1;
9a0c8230 1244 kfree(iov);
37c0eb46
SF
1245 return 0;
1246 }
1247
1da177e4
LT
1248 xid = GetXid();
1249
37c0eb46 1250 pagevec_init(&pvec, 0);
111ebb6e 1251 if (wbc->range_cyclic) {
37c0eb46 1252 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1253 end = -1;
1254 } else {
1255 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1256 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1257 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1258 range_whole = 1;
37c0eb46
SF
1259 scanned = 1;
1260 }
1261retry:
1262 while (!done && (index <= end) &&
1263 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1264 PAGECACHE_TAG_DIRTY,
1265 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1266 int first;
1267 unsigned int i;
1268
37c0eb46
SF
1269 first = -1;
1270 next = 0;
1271 n_iov = 0;
1272 bytes_to_write = 0;
1273
1274 for (i = 0; i < nr_pages; i++) {
1275 page = pvec.pages[i];
1276 /*
1277 * At this point we hold neither mapping->tree_lock nor
1278 * lock on the page itself: the page may be truncated or
1279 * invalidated (changing page->mapping to NULL), or even
1280 * swizzled back from swapper_space to tmpfs file
1281 * mapping
1282 */
1283
1284 if (first < 0)
1285 lock_page(page);
1286 else if (TestSetPageLocked(page))
1287 break;
1288
1289 if (unlikely(page->mapping != mapping)) {
1290 unlock_page(page);
1291 break;
1292 }
1293
111ebb6e 1294 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1295 done = 1;
1296 unlock_page(page);
1297 break;
1298 }
1299
1300 if (next && (page->index != next)) {
1301 /* Not next consecutive page */
1302 unlock_page(page);
1303 break;
1304 }
1305
1306 if (wbc->sync_mode != WB_SYNC_NONE)
1307 wait_on_page_writeback(page);
1308
1309 if (PageWriteback(page) ||
cb876f45 1310 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1311 unlock_page(page);
1312 break;
1313 }
84d2f07e 1314
cb876f45
LT
1315 /*
1316 * This actually clears the dirty bit in the radix tree.
1317 * See cifs_writepage() for more commentary.
1318 */
1319 set_page_writeback(page);
1320
84d2f07e
SF
1321 if (page_offset(page) >= mapping->host->i_size) {
1322 done = 1;
1323 unlock_page(page);
cb876f45 1324 end_page_writeback(page);
84d2f07e
SF
1325 break;
1326 }
1327
37c0eb46
SF
1328 /*
1329 * BB can we get rid of this? pages are held by pvec
1330 */
1331 page_cache_get(page);
1332
84d2f07e
SF
1333 len = min(mapping->host->i_size - page_offset(page),
1334 (loff_t)PAGE_CACHE_SIZE);
1335
37c0eb46
SF
1336 /* reserve iov[0] for the smb header */
1337 n_iov++;
1338 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1339 iov[n_iov].iov_len = len;
1340 bytes_to_write += len;
37c0eb46
SF
1341
1342 if (first < 0) {
1343 first = i;
1344 offset = page_offset(page);
1345 }
1346 next = page->index + 1;
1347 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1348 break;
1349 }
1350 if (n_iov) {
23e7dd7d
SF
1351 /* Search for a writable handle every time we call
1352 * CIFSSMBWrite2. We can't rely on the last handle
1353 * we used to still be valid
1354 */
1355 open_file = find_writable_file(CIFS_I(mapping->host));
1356 if (!open_file) {
1357 cERROR(1, ("No writable handles for inode"));
1358 rc = -EBADF;
1047abc1 1359 } else {
23e7dd7d
SF
1360 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1361 open_file->netfid,
1362 bytes_to_write, offset,
1363 &bytes_written, iov, n_iov,
133672ef 1364 CIFS_LONG_OP);
23e7dd7d
SF
1365 atomic_dec(&open_file->wrtPending);
1366 if (rc || bytes_written < bytes_to_write) {
63135e08 1367 cERROR(1, ("Write2 ret %d, wrote %d",
23e7dd7d
SF
1368 rc, bytes_written));
1369 /* BB what if continued retry is
1370 requested via mount flags? */
cea21805
JL
1371 if (rc == -ENOSPC)
1372 set_bit(AS_ENOSPC, &mapping->flags);
1373 else
1374 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1375 } else {
1376 cifs_stats_bytes_written(cifs_sb->tcon,
1377 bytes_written);
1378 }
37c0eb46
SF
1379 }
1380 for (i = 0; i < n_iov; i++) {
1381 page = pvec.pages[first + i];
eb9bdaa3
SF
1382 /* Should we also set page error on
1383 success rc but too little data written? */
1384 /* BB investigate retry logic on temporary
1385 server crash cases and how recovery works
fb8c4b14
SF
1386 when page marked as error */
1387 if (rc)
eb9bdaa3 1388 SetPageError(page);
37c0eb46
SF
1389 kunmap(page);
1390 unlock_page(page);
cb876f45 1391 end_page_writeback(page);
37c0eb46
SF
1392 page_cache_release(page);
1393 }
1394 if ((wbc->nr_to_write -= n_iov) <= 0)
1395 done = 1;
1396 index = next;
1397 }
1398 pagevec_release(&pvec);
1399 }
1400 if (!scanned && !done) {
1401 /*
1402 * We hit the last page and there is more work to be done: wrap
1403 * back to the start of the file
1404 */
1405 scanned = 1;
1406 index = 0;
1407 goto retry;
1408 }
111ebb6e 1409 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1410 mapping->writeback_index = index;
1411
1da177e4 1412 FreeXid(xid);
9a0c8230 1413 kfree(iov);
1da177e4
LT
1414 return rc;
1415}
1da177e4 1416
fb8c4b14 1417static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1418{
1419 int rc = -EFAULT;
1420 int xid;
1421
1422 xid = GetXid();
1423/* BB add check for wbc flags */
1424 page_cache_get(page);
ad7a2926 1425 if (!PageUptodate(page))
1da177e4 1426 cFYI(1, ("ppw - page not up to date"));
cb876f45
LT
1427
1428 /*
1429 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1430 *
1431 * A writepage() implementation always needs to do either this,
1432 * or re-dirty the page with "redirty_page_for_writepage()" in
1433 * the case of a failure.
1434 *
1435 * Just unlocking the page will cause the radix tree tag-bits
1436 * to fail to update with the state of the page correctly.
1437 */
fb8c4b14 1438 set_page_writeback(page);
1da177e4
LT
1439 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1440 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1441 unlock_page(page);
cb876f45
LT
1442 end_page_writeback(page);
1443 page_cache_release(page);
1da177e4
LT
1444 FreeXid(xid);
1445 return rc;
1446}
1447
1448static int cifs_commit_write(struct file *file, struct page *page,
1449 unsigned offset, unsigned to)
1450{
1451 int xid;
1452 int rc = 0;
1453 struct inode *inode = page->mapping->host;
1454 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1455 char *page_data;
1456
1457 xid = GetXid();
fb8c4b14 1458 cFYI(1, ("commit write for page %p up to position %lld for %d",
1da177e4 1459 page, position, to));
3677db10 1460 spin_lock(&inode->i_lock);
ad7a2926 1461 if (position > inode->i_size)
1da177e4 1462 i_size_write(inode, position);
ad7a2926 1463
3677db10 1464 spin_unlock(&inode->i_lock);
1da177e4
LT
1465 if (!PageUptodate(page)) {
1466 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1467 /* can not rely on (or let) writepage write this data */
1468 if (to < offset) {
1469 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1470 offset, to));
1471 FreeXid(xid);
1472 return rc;
1473 }
1474 /* this is probably better than directly calling
1475 partialpage_write since in this function the file handle is
1476 known which we might as well leverage */
1477 /* BB check if anything else missing out of ppw
1478 such as updating last write time */
1479 page_data = kmap(page);
1480 rc = cifs_write(file, page_data + offset, to-offset,
1481 &position);
1482 if (rc > 0)
1483 rc = 0;
1484 /* else if (rc < 0) should we set writebehind rc? */
1485 kunmap(page);
fb8c4b14 1486 } else {
1da177e4
LT
1487 set_page_dirty(page);
1488 }
1489
1490 FreeXid(xid);
1491 return rc;
1492}
1493
1494int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1495{
1496 int xid;
1497 int rc = 0;
e6a00296 1498 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1499
1500 xid = GetXid();
1501
fb8c4b14 1502 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1da177e4 1503 dentry->d_name.name, datasync));
50c2f753 1504
cea21805
JL
1505 rc = filemap_write_and_wait(inode->i_mapping);
1506 if (rc == 0) {
1507 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1508 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1509 }
1da177e4
LT
1510 FreeXid(xid);
1511 return rc;
1512}
1513
3978d717 1514/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1515{
1516 struct address_space *mapping;
1517 struct inode *inode;
1518 unsigned long index = page->index;
1519 unsigned int rpages = 0;
1520 int rc = 0;
1521
1522 cFYI(1, ("sync page %p",page));
1523 mapping = page->mapping;
1524 if (!mapping)
1525 return 0;
1526 inode = mapping->host;
1527 if (!inode)
3978d717 1528 return; */
1da177e4 1529
fb8c4b14 1530/* fill in rpages then
1da177e4
LT
1531 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1532
26a21b98 1533/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1da177e4 1534
3978d717 1535#if 0
1da177e4
LT
1536 if (rc < 0)
1537 return rc;
1538 return 0;
3978d717 1539#endif
1da177e4
LT
1540} */
1541
1542/*
1543 * As file closes, flush all cached write data for this inode checking
1544 * for write behind errors.
1545 */
75e1fcc0 1546int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1547{
fb8c4b14 1548 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1549 int rc = 0;
1550
1551 /* Rather than do the steps manually:
1552 lock the inode for writing
1553 loop through pages looking for write behind data (dirty pages)
1554 coalesce into contiguous 16K (or smaller) chunks to write to server
1555 send to server (prefer in parallel)
1556 deal with writebehind errors
1557 unlock inode for writing
1558 filemapfdatawrite appears easier for the time being */
1559
1560 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1561 /* reset wb rc if we were able to write out dirty pages */
1562 if (!rc) {
1563 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1564 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1565 }
50c2f753 1566
fb8c4b14 1567 cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1da177e4
LT
1568
1569 return rc;
1570}
1571
1572ssize_t cifs_user_read(struct file *file, char __user *read_data,
1573 size_t read_size, loff_t *poffset)
1574{
1575 int rc = -EACCES;
1576 unsigned int bytes_read = 0;
1577 unsigned int total_read = 0;
1578 unsigned int current_read_size;
1579 struct cifs_sb_info *cifs_sb;
1580 struct cifsTconInfo *pTcon;
1581 int xid;
1582 struct cifsFileInfo *open_file;
1583 char *smb_read_data;
1584 char __user *current_offset;
1585 struct smb_com_read_rsp *pSMBr;
1586
1587 xid = GetXid();
e6a00296 1588 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1589 pTcon = cifs_sb->tcon;
1590
1591 if (file->private_data == NULL) {
1592 FreeXid(xid);
1593 return -EBADF;
1594 }
1595 open_file = (struct cifsFileInfo *)file->private_data;
1596
ad7a2926 1597 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1da177e4 1598 cFYI(1, ("attempting read on write only file instance"));
ad7a2926 1599
1da177e4
LT
1600 for (total_read = 0, current_offset = read_data;
1601 read_size > total_read;
1602 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1603 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1604 cifs_sb->rsize);
1605 rc = -EAGAIN;
1606 smb_read_data = NULL;
1607 while (rc == -EAGAIN) {
ec637e3f 1608 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1609 if ((open_file->invalidHandle) &&
1da177e4 1610 (!open_file->closePend)) {
4b18f2a9 1611 rc = cifs_reopen_file(file, true);
1da177e4
LT
1612 if (rc != 0)
1613 break;
1614 }
bfa0d75a 1615 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1616 open_file->netfid,
1617 current_read_size, *poffset,
1618 &bytes_read, &smb_read_data,
1619 &buf_type);
1da177e4 1620 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1621 if (smb_read_data) {
93544cc6
SF
1622 if (copy_to_user(current_offset,
1623 smb_read_data +
1624 4 /* RFC1001 length field */ +
1625 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1626 bytes_read))
93544cc6 1627 rc = -EFAULT;
93544cc6 1628
fb8c4b14 1629 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1630 cifs_small_buf_release(smb_read_data);
fb8c4b14 1631 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1632 cifs_buf_release(smb_read_data);
1da177e4
LT
1633 smb_read_data = NULL;
1634 }
1635 }
1636 if (rc || (bytes_read == 0)) {
1637 if (total_read) {
1638 break;
1639 } else {
1640 FreeXid(xid);
1641 return rc;
1642 }
1643 } else {
a4544347 1644 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1645 *poffset += bytes_read;
1646 }
1647 }
1648 FreeXid(xid);
1649 return total_read;
1650}
1651
1652
1653static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1654 loff_t *poffset)
1655{
1656 int rc = -EACCES;
1657 unsigned int bytes_read = 0;
1658 unsigned int total_read;
1659 unsigned int current_read_size;
1660 struct cifs_sb_info *cifs_sb;
1661 struct cifsTconInfo *pTcon;
1662 int xid;
1663 char *current_offset;
1664 struct cifsFileInfo *open_file;
ec637e3f 1665 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1666
1667 xid = GetXid();
e6a00296 1668 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1669 pTcon = cifs_sb->tcon;
1670
1671 if (file->private_data == NULL) {
1672 FreeXid(xid);
1673 return -EBADF;
1674 }
1675 open_file = (struct cifsFileInfo *)file->private_data;
1676
1677 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1678 cFYI(1, ("attempting read on write only file instance"));
1679
fb8c4b14 1680 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1681 read_size > total_read;
1682 total_read += bytes_read, current_offset += bytes_read) {
1683 current_read_size = min_t(const int, read_size - total_read,
1684 cifs_sb->rsize);
f9f5c817
SF
1685 /* For windows me and 9x we do not want to request more
1686 than it negotiated since it will refuse the read then */
fb8c4b14 1687 if ((pTcon->ses) &&
f9f5c817
SF
1688 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1689 current_read_size = min_t(const int, current_read_size,
1690 pTcon->ses->server->maxBuf - 128);
1691 }
1da177e4
LT
1692 rc = -EAGAIN;
1693 while (rc == -EAGAIN) {
fb8c4b14 1694 if ((open_file->invalidHandle) &&
1da177e4 1695 (!open_file->closePend)) {
4b18f2a9 1696 rc = cifs_reopen_file(file, true);
1da177e4
LT
1697 if (rc != 0)
1698 break;
1699 }
bfa0d75a 1700 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1701 open_file->netfid,
1702 current_read_size, *poffset,
1703 &bytes_read, &current_offset,
1704 &buf_type);
1da177e4
LT
1705 }
1706 if (rc || (bytes_read == 0)) {
1707 if (total_read) {
1708 break;
1709 } else {
1710 FreeXid(xid);
1711 return rc;
1712 }
1713 } else {
a4544347 1714 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1715 *poffset += bytes_read;
1716 }
1717 }
1718 FreeXid(xid);
1719 return total_read;
1720}
1721
1722int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1723{
e6a00296 1724 struct dentry *dentry = file->f_path.dentry;
1da177e4
LT
1725 int rc, xid;
1726
1727 xid = GetXid();
1728 rc = cifs_revalidate(dentry);
1729 if (rc) {
1730 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1731 FreeXid(xid);
1732 return rc;
1733 }
1734 rc = generic_file_mmap(file, vma);
1735 FreeXid(xid);
1736 return rc;
1737}
1738
1739
fb8c4b14 1740static void cifs_copy_cache_pages(struct address_space *mapping,
1da177e4
LT
1741 struct list_head *pages, int bytes_read, char *data,
1742 struct pagevec *plru_pvec)
1743{
1744 struct page *page;
1745 char *target;
1746
1747 while (bytes_read > 0) {
1748 if (list_empty(pages))
1749 break;
1750
1751 page = list_entry(pages->prev, struct page, lru);
1752 list_del(&page->lru);
1753
1754 if (add_to_page_cache(page, mapping, page->index,
1755 GFP_KERNEL)) {
1756 page_cache_release(page);
1757 cFYI(1, ("Add page cache failed"));
3079ca62
SF
1758 data += PAGE_CACHE_SIZE;
1759 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1760 continue;
1761 }
1762
fb8c4b14 1763 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1764
1765 if (PAGE_CACHE_SIZE > bytes_read) {
1766 memcpy(target, data, bytes_read);
1767 /* zero the tail end of this partial page */
fb8c4b14 1768 memset(target + bytes_read, 0,
1da177e4
LT
1769 PAGE_CACHE_SIZE - bytes_read);
1770 bytes_read = 0;
1771 } else {
1772 memcpy(target, data, PAGE_CACHE_SIZE);
1773 bytes_read -= PAGE_CACHE_SIZE;
1774 }
1775 kunmap_atomic(target, KM_USER0);
1776
1777 flush_dcache_page(page);
1778 SetPageUptodate(page);
1779 unlock_page(page);
1780 if (!pagevec_add(plru_pvec, page))
1781 __pagevec_lru_add(plru_pvec);
1782 data += PAGE_CACHE_SIZE;
1783 }
1784 return;
1785}
1786
1787static int cifs_readpages(struct file *file, struct address_space *mapping,
1788 struct list_head *page_list, unsigned num_pages)
1789{
1790 int rc = -EACCES;
1791 int xid;
1792 loff_t offset;
1793 struct page *page;
1794 struct cifs_sb_info *cifs_sb;
1795 struct cifsTconInfo *pTcon;
2c2130e1 1796 unsigned int bytes_read = 0;
fb8c4b14 1797 unsigned int read_size, i;
1da177e4
LT
1798 char *smb_read_data = NULL;
1799 struct smb_com_read_rsp *pSMBr;
1800 struct pagevec lru_pvec;
1801 struct cifsFileInfo *open_file;
ec637e3f 1802 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1803
1804 xid = GetXid();
1805 if (file->private_data == NULL) {
1806 FreeXid(xid);
1807 return -EBADF;
1808 }
1809 open_file = (struct cifsFileInfo *)file->private_data;
e6a00296 1810 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1811 pTcon = cifs_sb->tcon;
bfa0d75a 1812
1da177e4 1813 pagevec_init(&lru_pvec, 0);
90c81e0b 1814 cFYI(DBG2, ("rpages: num pages %d", num_pages));
1da177e4
LT
1815 for (i = 0; i < num_pages; ) {
1816 unsigned contig_pages;
1817 struct page *tmp_page;
1818 unsigned long expected_index;
1819
1820 if (list_empty(page_list))
1821 break;
1822
1823 page = list_entry(page_list->prev, struct page, lru);
1824 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1825
1826 /* count adjacent pages that we will read into */
1827 contig_pages = 0;
fb8c4b14 1828 expected_index =
1da177e4 1829 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 1830 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
1831 if (tmp_page->index == expected_index) {
1832 contig_pages++;
1833 expected_index++;
1834 } else
fb8c4b14 1835 break;
1da177e4
LT
1836 }
1837 if (contig_pages + i > num_pages)
1838 contig_pages = num_pages - i;
1839
1840 /* for reads over a certain size could initiate async
1841 read ahead */
1842
1843 read_size = contig_pages * PAGE_CACHE_SIZE;
1844 /* Read size needs to be in multiples of one page */
1845 read_size = min_t(const unsigned int, read_size,
1846 cifs_sb->rsize & PAGE_CACHE_MASK);
90c81e0b 1847 cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d",
75865f8c 1848 read_size, contig_pages));
1da177e4
LT
1849 rc = -EAGAIN;
1850 while (rc == -EAGAIN) {
fb8c4b14 1851 if ((open_file->invalidHandle) &&
1da177e4 1852 (!open_file->closePend)) {
4b18f2a9 1853 rc = cifs_reopen_file(file, true);
1da177e4
LT
1854 if (rc != 0)
1855 break;
1856 }
1857
bfa0d75a 1858 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1859 open_file->netfid,
1860 read_size, offset,
1861 &bytes_read, &smb_read_data,
1862 &buf_type);
a9d02ad4 1863 /* BB more RC checks ? */
fb8c4b14 1864 if (rc == -EAGAIN) {
1da177e4 1865 if (smb_read_data) {
fb8c4b14 1866 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1867 cifs_small_buf_release(smb_read_data);
fb8c4b14 1868 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1869 cifs_buf_release(smb_read_data);
1da177e4
LT
1870 smb_read_data = NULL;
1871 }
1872 }
1873 }
1874 if ((rc < 0) || (smb_read_data == NULL)) {
1875 cFYI(1, ("Read error in readpages: %d", rc));
1da177e4
LT
1876 break;
1877 } else if (bytes_read > 0) {
6f88cc2e 1878 task_io_account_read(bytes_read);
1da177e4
LT
1879 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1880 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1881 smb_read_data + 4 /* RFC1001 hdr */ +
1882 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1883
1884 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 1885 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 1886 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
1887 i++; /* account for partial page */
1888
fb8c4b14 1889 /* server copy of file can have smaller size
1da177e4 1890 than client */
fb8c4b14
SF
1891 /* BB do we need to verify this common case ?
1892 this case is ok - if we are at server EOF
1da177e4
LT
1893 we will hit it on next read */
1894
05ac9d4b 1895 /* break; */
1da177e4
LT
1896 }
1897 } else {
1898 cFYI(1, ("No bytes read (%d) at offset %lld . "
1899 "Cleaning remaining pages from readahead list",
1900 bytes_read, offset));
fb8c4b14 1901 /* BB turn off caching and do new lookup on
1da177e4 1902 file size at server? */
1da177e4
LT
1903 break;
1904 }
1905 if (smb_read_data) {
fb8c4b14 1906 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1907 cifs_small_buf_release(smb_read_data);
fb8c4b14 1908 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1909 cifs_buf_release(smb_read_data);
1da177e4
LT
1910 smb_read_data = NULL;
1911 }
1912 bytes_read = 0;
1913 }
1914
1915 pagevec_lru_add(&lru_pvec);
1916
1917/* need to free smb_read_data buf before exit */
1918 if (smb_read_data) {
fb8c4b14 1919 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 1920 cifs_small_buf_release(smb_read_data);
fb8c4b14 1921 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 1922 cifs_buf_release(smb_read_data);
1da177e4 1923 smb_read_data = NULL;
fb8c4b14 1924 }
1da177e4
LT
1925
1926 FreeXid(xid);
1927 return rc;
1928}
1929
1930static int cifs_readpage_worker(struct file *file, struct page *page,
1931 loff_t *poffset)
1932{
1933 char *read_data;
1934 int rc;
1935
1936 page_cache_get(page);
1937 read_data = kmap(page);
1938 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 1939
1da177e4 1940 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 1941
1da177e4
LT
1942 if (rc < 0)
1943 goto io_error;
1944 else
fb8c4b14
SF
1945 cFYI(1, ("Bytes read %d", rc));
1946
e6a00296
JJS
1947 file->f_path.dentry->d_inode->i_atime =
1948 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 1949
1da177e4
LT
1950 if (PAGE_CACHE_SIZE > rc)
1951 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1952
1953 flush_dcache_page(page);
1954 SetPageUptodate(page);
1955 rc = 0;
fb8c4b14 1956
1da177e4 1957io_error:
fb8c4b14 1958 kunmap(page);
1da177e4
LT
1959 page_cache_release(page);
1960 return rc;
1961}
1962
1963static int cifs_readpage(struct file *file, struct page *page)
1964{
1965 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1966 int rc = -EACCES;
1967 int xid;
1968
1969 xid = GetXid();
1970
1971 if (file->private_data == NULL) {
1972 FreeXid(xid);
1973 return -EBADF;
1974 }
1975
fb8c4b14 1976 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1da177e4
LT
1977 page, (int)offset, (int)offset));
1978
1979 rc = cifs_readpage_worker(file, page, &offset);
1980
1981 unlock_page(page);
1982
1983 FreeXid(xid);
1984 return rc;
1985}
1986
a403a0a3
SF
1987static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
1988{
1989 struct cifsFileInfo *open_file;
1990
1991 read_lock(&GlobalSMBSeslock);
1992 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1993 if (open_file->closePend)
1994 continue;
1995 if (open_file->pfile &&
1996 ((open_file->pfile->f_flags & O_RDWR) ||
1997 (open_file->pfile->f_flags & O_WRONLY))) {
1998 read_unlock(&GlobalSMBSeslock);
1999 return 1;
2000 }
2001 }
2002 read_unlock(&GlobalSMBSeslock);
2003 return 0;
2004}
2005
1da177e4
LT
2006/* We do not want to update the file size from server for inodes
2007 open for write - to avoid races with writepage extending
2008 the file - in the future we could consider allowing
fb8c4b14 2009 refreshing the inode only on increases in the file size
1da177e4
LT
2010 but this is tricky to do without racing with writebehind
2011 page caching in the current Linux kernel design */
4b18f2a9 2012bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2013{
a403a0a3 2014 if (!cifsInode)
4b18f2a9 2015 return true;
50c2f753 2016
a403a0a3
SF
2017 if (is_inode_writable(cifsInode)) {
2018 /* This inode is open for write at least once */
c32a0b68
SF
2019 struct cifs_sb_info *cifs_sb;
2020
c32a0b68 2021 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2022 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2023 /* since no page cache to corrupt on directio
c32a0b68 2024 we can change size safely */
4b18f2a9 2025 return true;
c32a0b68
SF
2026 }
2027
fb8c4b14 2028 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2029 return true;
7ba52631 2030
4b18f2a9 2031 return false;
23e7dd7d 2032 } else
4b18f2a9 2033 return true;
1da177e4
LT
2034}
2035
1da177e4
LT
2036static int cifs_prepare_write(struct file *file, struct page *page,
2037 unsigned from, unsigned to)
2038{
2039 int rc = 0;
8a236264
SF
2040 loff_t i_size;
2041 loff_t offset;
2042
fb8c4b14 2043 cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
8a236264
SF
2044 if (PageUptodate(page))
2045 return 0;
2046
2047 /* If we are writing a full page it will be up to date,
2048 no need to read from the server */
2049 if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
2050 SetPageUptodate(page);
2051 return 0;
2052 }
2053
2054 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2055 i_size = i_size_read(page->mapping->host);
2056
2057 if ((offset >= i_size) ||
2058 ((from == 0) && (offset + to) >= i_size)) {
2059 /*
2060 * We don't need to read data beyond the end of the file.
2061 * zero it, and set the page uptodate
2062 */
8803863a 2063 simple_prepare_write(file, page, from, to);
8a236264
SF
2064 SetPageUptodate(page);
2065 } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1da177e4 2066 /* might as well read a page, it is fast enough */
8a236264
SF
2067 rc = cifs_readpage_worker(file, page, &offset);
2068 } else {
2069 /* we could try using another file handle if there is one -
2070 but how would we lock it to prevent close of that handle
2071 racing with this read? In any case
2072 this will be written out by commit_write so is fine */
1da177e4
LT
2073 }
2074
fb8c4b14
SF
2075 /* we do not need to pass errors back
2076 e.g. if we do not have read access to the file
8a236264
SF
2077 because cifs_commit_write will do the right thing. -- shaggy */
2078
1da177e4
LT
2079 return 0;
2080}
2081
f5e54d6e 2082const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2083 .readpage = cifs_readpage,
2084 .readpages = cifs_readpages,
2085 .writepage = cifs_writepage,
37c0eb46 2086 .writepages = cifs_writepages,
1da177e4
LT
2087 .prepare_write = cifs_prepare_write,
2088 .commit_write = cifs_commit_write,
2089 .set_page_dirty = __set_page_dirty_nobuffers,
2090 /* .sync_page = cifs_sync_page, */
2091 /* .direct_IO = */
2092};
273d81d6
DK
2093
2094/*
2095 * cifs_readpages requires the server to support a buffer large enough to
2096 * contain the header plus one complete page of data. Otherwise, we need
2097 * to leave cifs_readpages out of the address space operations.
2098 */
f5e54d6e 2099const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2100 .readpage = cifs_readpage,
2101 .writepage = cifs_writepage,
2102 .writepages = cifs_writepages,
2103 .prepare_write = cifs_prepare_write,
2104 .commit_write = cifs_commit_write,
2105 .set_page_dirty = __set_page_dirty_nobuffers,
2106 /* .sync_page = cifs_sync_page, */
2107 /* .direct_IO = */
2108};
This page took 0.303195 seconds and 5 git commands to generate.