4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
46 static inline int cifs_convert_flags(unsigned int flags)
48 if ((flags & O_ACCMODE) == O_RDONLY)
50 else if ((flags & O_ACCMODE) == O_WRONLY)
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
64 static u32 cifs_posix_convert_flags(unsigned int flags)
68 if ((flags & O_ACCMODE) == O_RDONLY)
69 posix_flags = SMB_O_RDONLY;
70 else if ((flags & O_ACCMODE) == O_WRONLY)
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
76 posix_flags |= SMB_O_CREAT;
78 posix_flags |= SMB_O_EXCL;
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
83 posix_flags |= SMB_O_SYNC;
84 if (flags & O_DIRECTORY)
85 posix_flags |= SMB_O_DIRECTORY;
86 if (flags & O_NOFOLLOW)
87 posix_flags |= SMB_O_NOFOLLOW;
89 posix_flags |= SMB_O_DIRECT;
94 static inline int cifs_get_disposition(unsigned int flags)
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
108 int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
118 struct cifs_tcon *tcon;
120 cFYI(1, "posix open %s", full_path);
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
126 tlink = cifs_sb_tlink(cifs_sb);
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
149 goto posix_open_ret; /* caller does not need info */
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
162 cifs_fattr_to_inode(*pinode, &fattr);
171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
173 __u16 *pnetfid, int xid)
178 int create_options = CREATE_NOT_DIR;
181 desiredAccess = cifs_convert_flags(f_flags);
183 /*********************************************************************
184 * open flag mapping table:
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
207 disposition = cifs_get_disposition(f_flags);
209 /* BB pass O_SYNC flag through on file attributes .. BB */
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
220 desiredAccess, create_options, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
244 struct cifsFileInfo *
245 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
257 pCifsFile->count = 1;
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
268 spin_lock(&cifs_file_list_lock);
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
275 spin_unlock(&cifs_file_list_lock);
277 cifs_set_oplock_level(pCifsInode, oplock);
278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
280 file->private_data = pCifsFile;
284 static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
287 * Release a reference on the file private data. This may involve closing
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
291 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
293 struct inode *inode = cifs_file->dentry->d_inode;
294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
297 struct cifsLockInfo *li, *tmp;
299 spin_lock(&cifs_file_list_lock);
300 if (--cifs_file->count > 0) {
301 spin_unlock(&cifs_file_list_lock);
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
319 cifs_set_oplock_level(cifsi, 0);
321 spin_unlock(&cifs_file_list_lock);
323 cancel_work_sync(&cifs_file->oplock_break);
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
333 /* Delete any outstanding lock records. We'll lose them when the file
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
340 list_del(&li->llist);
341 cifs_del_lock_waiters(li);
344 mutex_unlock(&cifsi->lock_mutex);
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
351 int cifs_open(struct inode *inode, struct file *file)
356 struct cifs_sb_info *cifs_sb;
357 struct cifs_tcon *tcon;
358 struct tcon_link *tlink;
359 struct cifsFileInfo *pCifsFile = NULL;
360 char *full_path = NULL;
361 bool posix_open_ok = false;
366 cifs_sb = CIFS_SB(inode->i_sb);
367 tlink = cifs_sb_tlink(cifs_sb);
370 return PTR_ERR(tlink);
372 tcon = tlink_tcon(tlink);
374 full_path = build_path_from_dentry(file->f_path.dentry);
375 if (full_path == NULL) {
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
383 if (tcon->ses->server->oplocks)
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
392 /* can not refresh inode info since size could be stale */
393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
394 cifs_sb->mnt_file_mode /* ignored */,
395 file->f_flags, &oplock, &netfid, xid);
397 cFYI(1, "posix open succeeded");
398 posix_open_ok = true;
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
401 cERROR(1, "server %s of type %s returned"
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
406 tcon->ses->serverNOS);
407 tcon->broken_posix_open = true;
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
411 /* else fallthrough to retry open the old way on network i/o
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
423 if (pCifsFile == NULL) {
424 CIFSSMBClose(xid, tcon, netfid);
429 cifs_fscache_set_inode_cookie(inode, file);
431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
450 cifs_put_tlink(tlink);
454 /* Try to reacquire byte range locks that were released when session */
455 /* to server was lost */
456 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
460 /* BB list all locks open on this file and relock */
465 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
470 struct cifs_sb_info *cifs_sb;
471 struct cifs_tcon *tcon;
472 struct cifsInodeInfo *pCifsInode;
474 char *full_path = NULL;
476 int disposition = FILE_OPEN;
477 int create_options = CREATE_NOT_DIR;
481 mutex_lock(&pCifsFile->fh_mutex);
482 if (!pCifsFile->invalidHandle) {
483 mutex_unlock(&pCifsFile->fh_mutex);
489 inode = pCifsFile->dentry->d_inode;
490 cifs_sb = CIFS_SB(inode->i_sb);
491 tcon = tlink_tcon(pCifsFile->tlink);
493 /* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
497 full_path = build_path_from_dentry(pCifsFile->dentry);
498 if (full_path == NULL) {
500 mutex_unlock(&pCifsFile->fh_mutex);
505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
506 inode, pCifsFile->f_flags, full_path);
508 if (tcon->ses->server->oplocks)
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
528 cFYI(1, "posix reopen succeeded");
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
540 /* Can not refresh inode by passing in file_info buf to be returned
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
547 create_options, &netfid, &oplock, NULL,
548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
549 CIFS_MOUNT_MAP_SPECIAL_CHR);
551 mutex_unlock(&pCifsFile->fh_mutex);
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
554 goto reopen_error_exit;
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
564 rc = filemap_write_and_wait(inode->i_mapping);
565 mapping_set_error(inode->i_mapping, rc);
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
581 cifs_set_oplock_level(pCifsInode, oplock);
583 cifs_relock_file(pCifsFile);
591 int cifs_close(struct inode *inode, struct file *file)
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
598 /* return code from the ->release op is always ignored */
602 int cifs_closedir(struct inode *inode, struct file *file)
606 struct cifsFileInfo *pCFileStruct = file->private_data;
609 cFYI(1, "Closedir inode = 0x%p", inode);
614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
616 cFYI(1, "Freeing private data in close dir");
617 spin_lock(&cifs_file_list_lock);
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
621 spin_unlock(&cifs_file_list_lock);
622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
623 cFYI(1, "Closing uncompleted readdir with rc %d",
625 /* not much we can do if it fails anyway, ignore rc */
628 spin_unlock(&cifs_file_list_lock);
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
631 cFYI(1, "closedir free smb buf in srch struct");
632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
633 if (pCFileStruct->srch_inf.smallBuf)
634 cifs_small_buf_release(ptmp);
636 cifs_buf_release(ptmp);
638 cifs_put_tlink(pCFileStruct->tlink);
639 kfree(file->private_data);
640 file->private_data = NULL;
642 /* BB can we lock the filestruct while this is going on? */
647 static struct cifsLockInfo *
648 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
650 struct cifsLockInfo *lock =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
654 lock->offset = offset;
655 lock->length = length;
657 lock->netfid = netfid;
658 lock->pid = current->tgid;
659 INIT_LIST_HEAD(&lock->blist);
660 init_waitqueue_head(&lock->block_q);
665 cifs_del_lock_waiters(struct cifsLockInfo *lock)
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
675 __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
679 struct cifsLockInfo *li, *tmp;
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
698 cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
699 struct cifsLockInfo **conf_lock)
701 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
702 lock->type, lock->netfid, conf_lock);
706 * Check if there is another lock that prevents us to set the lock (mandatory
707 * style). If such a lock exists, update the flock structure with its
708 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
709 * or leave it the same if we can't. Returns 0 if we don't need to request to
710 * the server or 1 otherwise.
713 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
714 __u8 type, __u16 netfid, struct file_lock *flock)
717 struct cifsLockInfo *conf_lock;
720 mutex_lock(&cinode->lock_mutex);
722 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
725 flock->fl_start = conf_lock->offset;
726 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
727 flock->fl_pid = conf_lock->pid;
728 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
729 flock->fl_type = F_RDLCK;
731 flock->fl_type = F_WRLCK;
732 } else if (!cinode->can_cache_brlcks)
735 flock->fl_type = F_UNLCK;
737 mutex_unlock(&cinode->lock_mutex);
742 cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
744 mutex_lock(&cinode->lock_mutex);
745 list_add_tail(&lock->llist, &cinode->llist);
746 mutex_unlock(&cinode->lock_mutex);
750 * Set the byte-range lock (mandatory style). Returns:
751 * 1) 0, if we set the lock and don't need to request to the server;
752 * 2) 1, if no locks prevent us but we need to request to the server;
753 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
756 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
759 struct cifsLockInfo *conf_lock;
765 mutex_lock(&cinode->lock_mutex);
767 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
768 if (!exist && cinode->can_cache_brlcks) {
769 list_add_tail(&lock->llist, &cinode->llist);
770 mutex_unlock(&cinode->lock_mutex);
779 list_add_tail(&lock->blist, &conf_lock->blist);
780 mutex_unlock(&cinode->lock_mutex);
781 rc = wait_event_interruptible(lock->block_q,
782 (lock->blist.prev == &lock->blist) &&
783 (lock->blist.next == &lock->blist));
786 mutex_lock(&cinode->lock_mutex);
787 list_del_init(&lock->blist);
790 mutex_unlock(&cinode->lock_mutex);
795 * Check if there is another lock that prevents us to set the lock (posix
796 * style). If such a lock exists, update the flock structure with its
797 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
798 * or leave it the same if we can't. Returns 0 if we don't need to request to
799 * the server or 1 otherwise.
802 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
805 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
806 unsigned char saved_type = flock->fl_type;
808 if ((flock->fl_flags & FL_POSIX) == 0)
811 mutex_lock(&cinode->lock_mutex);
812 posix_test_lock(file, flock);
814 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
815 flock->fl_type = saved_type;
819 mutex_unlock(&cinode->lock_mutex);
824 * Set the byte-range lock (posix style). Returns:
825 * 1) 0, if we set the lock and don't need to request to the server;
826 * 2) 1, if we need to request to the server;
827 * 3) <0, if the error occurs while setting the lock.
830 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
832 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
835 if ((flock->fl_flags & FL_POSIX) == 0)
839 mutex_lock(&cinode->lock_mutex);
840 if (!cinode->can_cache_brlcks) {
841 mutex_unlock(&cinode->lock_mutex);
845 rc = posix_lock_file(file, flock, NULL);
846 mutex_unlock(&cinode->lock_mutex);
847 if (rc == FILE_LOCK_DEFERRED) {
848 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
851 locks_delete_block(flock);
857 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
859 int xid, rc = 0, stored_rc;
860 struct cifsLockInfo *li, *tmp;
861 struct cifs_tcon *tcon;
862 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
863 unsigned int num, max_num;
864 LOCKING_ANDX_RANGE *buf, *cur;
865 int types[] = {LOCKING_ANDX_LARGE_FILES,
866 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
870 tcon = tlink_tcon(cfile->tlink);
872 mutex_lock(&cinode->lock_mutex);
873 if (!cinode->can_cache_brlcks) {
874 mutex_unlock(&cinode->lock_mutex);
879 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
880 sizeof(LOCKING_ANDX_RANGE);
881 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
883 mutex_unlock(&cinode->lock_mutex);
888 for (i = 0; i < 2; i++) {
891 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
892 if (li->type != types[i])
894 cur->Pid = cpu_to_le16(li->pid);
895 cur->LengthLow = cpu_to_le32((u32)li->length);
896 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
897 cur->OffsetLow = cpu_to_le32((u32)li->offset);
898 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
899 if (++num == max_num) {
900 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
901 li->type, 0, num, buf);
911 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
912 types[i], 0, num, buf);
918 cinode->can_cache_brlcks = false;
919 mutex_unlock(&cinode->lock_mutex);
926 /* copied from fs/locks.c with a name change */
927 #define cifs_for_each_lock(inode, lockp) \
928 for (lockp = &inode->i_flock; *lockp != NULL; \
929 lockp = &(*lockp)->fl_next)
931 struct lock_to_push {
932 struct list_head llist;
941 cifs_push_posix_locks(struct cifsFileInfo *cfile)
943 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
944 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
945 struct file_lock *flock, **before;
946 unsigned int count = 0, i = 0;
947 int rc = 0, xid, type;
948 struct list_head locks_to_send, *el;
949 struct lock_to_push *lck, *tmp;
954 mutex_lock(&cinode->lock_mutex);
955 if (!cinode->can_cache_brlcks) {
956 mutex_unlock(&cinode->lock_mutex);
962 cifs_for_each_lock(cfile->dentry->d_inode, before) {
963 if ((*before)->fl_flags & FL_POSIX)
968 INIT_LIST_HEAD(&locks_to_send);
971 * Allocating count locks is enough because no FL_POSIX locks can be
972 * added to the list while we are holding cinode->lock_mutex that
973 * protects locking operations of this inode.
975 for (; i < count; i++) {
976 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
981 list_add_tail(&lck->llist, &locks_to_send);
984 el = locks_to_send.next;
986 cifs_for_each_lock(cfile->dentry->d_inode, before) {
988 if ((flock->fl_flags & FL_POSIX) == 0)
990 if (el == &locks_to_send) {
992 * The list ended. We don't have enough allocated
993 * structures - something is really wrong.
995 cERROR(1, "Can't push all brlocks!");
998 length = 1 + flock->fl_end - flock->fl_start;
999 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1003 lck = list_entry(el, struct lock_to_push, llist);
1004 lck->pid = flock->fl_pid;
1005 lck->netfid = cfile->netfid;
1006 lck->length = length;
1008 lck->offset = flock->fl_start;
1013 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1014 struct file_lock tmp_lock;
1017 tmp_lock.fl_start = lck->offset;
1018 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1019 0, lck->length, &tmp_lock,
1023 list_del(&lck->llist);
1028 cinode->can_cache_brlcks = false;
1029 mutex_unlock(&cinode->lock_mutex);
1034 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1035 list_del(&lck->llist);
1042 cifs_push_locks(struct cifsFileInfo *cfile)
1044 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1045 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1047 if ((tcon->ses->capabilities & CAP_UNIX) &&
1048 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1049 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1050 return cifs_push_posix_locks(cfile);
1052 return cifs_push_mandatory_locks(cfile);
1056 cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
1059 if (flock->fl_flags & FL_POSIX)
1061 if (flock->fl_flags & FL_FLOCK)
1063 if (flock->fl_flags & FL_SLEEP) {
1064 cFYI(1, "Blocking lock");
1067 if (flock->fl_flags & FL_ACCESS)
1068 cFYI(1, "Process suspended by mandatory locking - "
1069 "not implemented yet");
1070 if (flock->fl_flags & FL_LEASE)
1071 cFYI(1, "Lease on file - not implemented yet");
1072 if (flock->fl_flags &
1073 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1074 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1076 *type = LOCKING_ANDX_LARGE_FILES;
1077 if (flock->fl_type == F_WRLCK) {
1078 cFYI(1, "F_WRLCK ");
1080 } else if (flock->fl_type == F_UNLCK) {
1083 /* Check if unlock includes more than one lock range */
1084 } else if (flock->fl_type == F_RDLCK) {
1086 *type |= LOCKING_ANDX_SHARED_LOCK;
1088 } else if (flock->fl_type == F_EXLCK) {
1091 } else if (flock->fl_type == F_SHLCK) {
1093 *type |= LOCKING_ANDX_SHARED_LOCK;
1096 cFYI(1, "Unknown type of lock");
1100 cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1101 bool wait_flag, bool posix_lck, int xid)
1104 __u64 length = 1 + flock->fl_end - flock->fl_start;
1105 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1106 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1107 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1108 __u16 netfid = cfile->netfid;
1111 int posix_lock_type;
1113 rc = cifs_posix_lock_test(file, flock);
1117 if (type & LOCKING_ANDX_SHARED_LOCK)
1118 posix_lock_type = CIFS_RDLCK;
1120 posix_lock_type = CIFS_WRLCK;
1121 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1122 1 /* get */, length, flock,
1123 posix_lock_type, wait_flag);
1127 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1132 /* BB we could chain these into one lock request BB */
1133 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1134 flock->fl_start, 0, 1, type, 0, 0);
1136 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1137 length, flock->fl_start, 1, 0,
1139 flock->fl_type = F_UNLCK;
1141 cERROR(1, "Error unlocking previously locked "
1142 "range %d during test of lock", rc);
1146 if (type & LOCKING_ANDX_SHARED_LOCK) {
1147 flock->fl_type = F_WRLCK;
1151 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1152 flock->fl_start, 0, 1,
1153 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1155 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1156 length, flock->fl_start, 1, 0,
1157 type | LOCKING_ANDX_SHARED_LOCK,
1159 flock->fl_type = F_RDLCK;
1161 cERROR(1, "Error unlocking previously locked "
1162 "range %d during test of lock", rc);
1164 flock->fl_type = F_WRLCK;
1170 cifs_move_llist(struct list_head *source, struct list_head *dest)
1172 struct list_head *li, *tmp;
1173 list_for_each_safe(li, tmp, source)
1174 list_move(li, dest);
1178 cifs_free_llist(struct list_head *llist)
1180 struct cifsLockInfo *li, *tmp;
1181 list_for_each_entry_safe(li, tmp, llist, llist) {
1182 cifs_del_lock_waiters(li);
1183 list_del(&li->llist);
1189 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1191 int rc = 0, stored_rc;
1192 int types[] = {LOCKING_ANDX_LARGE_FILES,
1193 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1195 unsigned int max_num, num;
1196 LOCKING_ANDX_RANGE *buf, *cur;
1197 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1198 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1199 struct cifsLockInfo *li, *tmp;
1200 __u64 length = 1 + flock->fl_end - flock->fl_start;
1201 struct list_head tmp_llist;
1203 INIT_LIST_HEAD(&tmp_llist);
1205 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1206 sizeof(LOCKING_ANDX_RANGE);
1207 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1211 mutex_lock(&cinode->lock_mutex);
1212 for (i = 0; i < 2; i++) {
1215 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1216 if (flock->fl_start > li->offset ||
1217 (flock->fl_start + length) <
1218 (li->offset + li->length))
1220 if (current->tgid != li->pid)
1222 if (cfile->netfid != li->netfid)
1224 if (types[i] != li->type)
1226 if (!cinode->can_cache_brlcks) {
1227 cur->Pid = cpu_to_le16(li->pid);
1228 cur->LengthLow = cpu_to_le32((u32)li->length);
1230 cpu_to_le32((u32)(li->length>>32));
1231 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1233 cpu_to_le32((u32)(li->offset>>32));
1235 * We need to save a lock here to let us add
1236 * it again to the inode list if the unlock
1237 * range request fails on the server.
1239 list_move(&li->llist, &tmp_llist);
1240 if (++num == max_num) {
1241 stored_rc = cifs_lockv(xid, tcon,
1247 * We failed on the unlock range
1248 * request - add all locks from
1249 * the tmp list to the head of
1252 cifs_move_llist(&tmp_llist,
1257 * The unlock range request
1258 * succeed - free the tmp list.
1260 cifs_free_llist(&tmp_llist);
1267 * We can cache brlock requests - simply remove
1268 * a lock from the inode list.
1270 list_del(&li->llist);
1271 cifs_del_lock_waiters(li);
1276 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1277 types[i], num, 0, buf);
1279 cifs_move_llist(&tmp_llist, &cinode->llist);
1282 cifs_free_llist(&tmp_llist);
1286 mutex_unlock(&cinode->lock_mutex);
1292 cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1293 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1296 __u64 length = 1 + flock->fl_end - flock->fl_start;
1297 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1298 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1299 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
1300 __u16 netfid = cfile->netfid;
1303 int posix_lock_type;
1305 rc = cifs_posix_lock_set(file, flock);
1309 if (type & LOCKING_ANDX_SHARED_LOCK)
1310 posix_lock_type = CIFS_RDLCK;
1312 posix_lock_type = CIFS_WRLCK;
1315 posix_lock_type = CIFS_UNLCK;
1317 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1318 0 /* set */, length, flock,
1319 posix_lock_type, wait_flag);
1324 struct cifsLockInfo *lock;
1326 lock = cifs_lock_init(flock->fl_start, length, type, netfid);
1330 rc = cifs_lock_add_if(cinode, lock, wait_flag);
1336 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1337 flock->fl_start, 0, 1, type, wait_flag, 0);
1343 cifs_lock_add(cinode, lock);
1345 rc = cifs_unlock_range(cfile, flock, xid);
1348 if (flock->fl_flags & FL_POSIX)
1349 posix_lock_file_wait(file, flock);
1353 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1356 int lock = 0, unlock = 0;
1357 bool wait_flag = false;
1358 bool posix_lck = false;
1359 struct cifs_sb_info *cifs_sb;
1360 struct cifs_tcon *tcon;
1361 struct cifsInodeInfo *cinode;
1362 struct cifsFileInfo *cfile;
1369 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1370 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1371 flock->fl_start, flock->fl_end);
1373 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1375 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1376 cfile = (struct cifsFileInfo *)file->private_data;
1377 tcon = tlink_tcon(cfile->tlink);
1378 netfid = cfile->netfid;
1379 cinode = CIFS_I(file->f_path.dentry->d_inode);
1381 if ((tcon->ses->capabilities & CAP_UNIX) &&
1382 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1383 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1386 * BB add code here to normalize offset and length to account for
1387 * negative length which we can not accept over the wire.
1389 if (IS_GETLK(cmd)) {
1390 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1395 if (!lock && !unlock) {
1397 * if no lock or unlock then nothing to do since we do not
1404 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1410 /* update the file size (if needed) after a write */
1412 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1413 unsigned int bytes_written)
1415 loff_t end_of_write = offset + bytes_written;
1417 if (end_of_write > cifsi->server_eof)
1418 cifsi->server_eof = end_of_write;
1421 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1422 const char *write_data, size_t write_size,
1426 unsigned int bytes_written = 0;
1427 unsigned int total_written;
1428 struct cifs_sb_info *cifs_sb;
1429 struct cifs_tcon *pTcon;
1431 struct dentry *dentry = open_file->dentry;
1432 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1433 struct cifs_io_parms io_parms;
1435 cifs_sb = CIFS_SB(dentry->d_sb);
1437 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1438 *poffset, dentry->d_name.name);
1440 pTcon = tlink_tcon(open_file->tlink);
1444 for (total_written = 0; write_size > total_written;
1445 total_written += bytes_written) {
1447 while (rc == -EAGAIN) {
1451 if (open_file->invalidHandle) {
1452 /* we could deadlock if we called
1453 filemap_fdatawait from here so tell
1454 reopen_file not to flush data to
1456 rc = cifs_reopen_file(open_file, false);
1461 len = min((size_t)cifs_sb->wsize,
1462 write_size - total_written);
1463 /* iov[0] is reserved for smb header */
1464 iov[1].iov_base = (char *)write_data + total_written;
1465 iov[1].iov_len = len;
1466 io_parms.netfid = open_file->netfid;
1468 io_parms.tcon = pTcon;
1469 io_parms.offset = *poffset;
1470 io_parms.length = len;
1471 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1474 if (rc || (bytes_written == 0)) {
1482 cifs_update_eof(cifsi, *poffset, bytes_written);
1483 *poffset += bytes_written;
1487 cifs_stats_bytes_written(pTcon, total_written);
1489 if (total_written > 0) {
1490 spin_lock(&dentry->d_inode->i_lock);
1491 if (*poffset > dentry->d_inode->i_size)
1492 i_size_write(dentry->d_inode, *poffset);
1493 spin_unlock(&dentry->d_inode->i_lock);
1495 mark_inode_dirty_sync(dentry->d_inode);
1497 return total_written;
1500 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1503 struct cifsFileInfo *open_file = NULL;
1504 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1506 /* only filter by fsuid on multiuser mounts */
1507 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1510 spin_lock(&cifs_file_list_lock);
1511 /* we could simply get the first_list_entry since write-only entries
1512 are always at the end of the list but since the first entry might
1513 have a close pending, we go through the whole list */
1514 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1515 if (fsuid_only && open_file->uid != current_fsuid())
1517 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1518 if (!open_file->invalidHandle) {
1519 /* found a good file */
1520 /* lock it so it will not be closed on us */
1521 cifsFileInfo_get(open_file);
1522 spin_unlock(&cifs_file_list_lock);
1524 } /* else might as well continue, and look for
1525 another, or simply have the caller reopen it
1526 again rather than trying to fix this handle */
1527 } else /* write only file */
1528 break; /* write only files are last so must be done */
1530 spin_unlock(&cifs_file_list_lock);
1534 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1537 struct cifsFileInfo *open_file, *inv_file = NULL;
1538 struct cifs_sb_info *cifs_sb;
1539 bool any_available = false;
1541 unsigned int refind = 0;
1543 /* Having a null inode here (because mapping->host was set to zero by
1544 the VFS or MM) should not happen but we had reports of on oops (due to
1545 it being zero) during stress testcases so we need to check for it */
1547 if (cifs_inode == NULL) {
1548 cERROR(1, "Null inode passed to cifs_writeable_file");
1553 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1555 /* only filter by fsuid on multiuser mounts */
1556 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1559 spin_lock(&cifs_file_list_lock);
1561 if (refind > MAX_REOPEN_ATT) {
1562 spin_unlock(&cifs_file_list_lock);
1565 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1566 if (!any_available && open_file->pid != current->tgid)
1568 if (fsuid_only && open_file->uid != current_fsuid())
1570 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1571 if (!open_file->invalidHandle) {
1572 /* found a good writable file */
1573 cifsFileInfo_get(open_file);
1574 spin_unlock(&cifs_file_list_lock);
1578 inv_file = open_file;
1582 /* couldn't find useable FH with same pid, try any available */
1583 if (!any_available) {
1584 any_available = true;
1585 goto refind_writable;
1589 any_available = false;
1590 cifsFileInfo_get(inv_file);
1593 spin_unlock(&cifs_file_list_lock);
1596 rc = cifs_reopen_file(inv_file, false);
1600 spin_lock(&cifs_file_list_lock);
1601 list_move_tail(&inv_file->flist,
1602 &cifs_inode->openFileList);
1603 spin_unlock(&cifs_file_list_lock);
1604 cifsFileInfo_put(inv_file);
1605 spin_lock(&cifs_file_list_lock);
1607 goto refind_writable;
1614 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1616 struct address_space *mapping = page->mapping;
1617 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1620 int bytes_written = 0;
1621 struct inode *inode;
1622 struct cifsFileInfo *open_file;
1624 if (!mapping || !mapping->host)
1627 inode = page->mapping->host;
1629 offset += (loff_t)from;
1630 write_data = kmap(page);
1633 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1638 /* racing with truncate? */
1639 if (offset > mapping->host->i_size) {
1641 return 0; /* don't care */
1644 /* check to make sure that we are not extending the file */
1645 if (mapping->host->i_size - offset < (loff_t)to)
1646 to = (unsigned)(mapping->host->i_size - offset);
1648 open_file = find_writable_file(CIFS_I(mapping->host), false);
1650 bytes_written = cifs_write(open_file, open_file->pid,
1651 write_data, to - from, &offset);
1652 cifsFileInfo_put(open_file);
1653 /* Does mm or vfs already set times? */
1654 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1655 if ((bytes_written > 0) && (offset))
1657 else if (bytes_written < 0)
1660 cFYI(1, "No writeable filehandles for inode");
1668 static int cifs_writepages(struct address_space *mapping,
1669 struct writeback_control *wbc)
1671 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1672 bool done = false, scanned = false, range_whole = false;
1674 struct cifs_writedata *wdata;
1679 * If wsize is smaller than the page cache size, default to writing
1680 * one page at a time via cifs_writepage
1682 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1683 return generic_writepages(mapping, wbc);
1685 if (wbc->range_cyclic) {
1686 index = mapping->writeback_index; /* Start from prev offset */
1689 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1690 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1691 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1696 while (!done && index <= end) {
1697 unsigned int i, nr_pages, found_pages;
1698 pgoff_t next = 0, tofind;
1699 struct page **pages;
1701 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1704 wdata = cifs_writedata_alloc((unsigned int)tofind);
1711 * find_get_pages_tag seems to return a max of 256 on each
1712 * iteration, so we must call it several times in order to
1713 * fill the array or the wsize is effectively limited to
1714 * 256 * PAGE_CACHE_SIZE.
1717 pages = wdata->pages;
1719 nr_pages = find_get_pages_tag(mapping, &index,
1720 PAGECACHE_TAG_DIRTY,
1722 found_pages += nr_pages;
1725 } while (nr_pages && tofind && index <= end);
1727 if (found_pages == 0) {
1728 kref_put(&wdata->refcount, cifs_writedata_release);
1733 for (i = 0; i < found_pages; i++) {
1734 page = wdata->pages[i];
1736 * At this point we hold neither mapping->tree_lock nor
1737 * lock on the page itself: the page may be truncated or
1738 * invalidated (changing page->mapping to NULL), or even
1739 * swizzled back from swapper_space to tmpfs file
1745 else if (!trylock_page(page))
1748 if (unlikely(page->mapping != mapping)) {
1753 if (!wbc->range_cyclic && page->index > end) {
1759 if (next && (page->index != next)) {
1760 /* Not next consecutive page */
1765 if (wbc->sync_mode != WB_SYNC_NONE)
1766 wait_on_page_writeback(page);
1768 if (PageWriteback(page) ||
1769 !clear_page_dirty_for_io(page)) {
1775 * This actually clears the dirty bit in the radix tree.
1776 * See cifs_writepage() for more commentary.
1778 set_page_writeback(page);
1780 if (page_offset(page) >= mapping->host->i_size) {
1783 end_page_writeback(page);
1787 wdata->pages[i] = page;
1788 next = page->index + 1;
1792 /* reset index to refind any pages skipped */
1794 index = wdata->pages[0]->index + 1;
1796 /* put any pages we aren't going to use */
1797 for (i = nr_pages; i < found_pages; i++) {
1798 page_cache_release(wdata->pages[i]);
1799 wdata->pages[i] = NULL;
1802 /* nothing to write? */
1803 if (nr_pages == 0) {
1804 kref_put(&wdata->refcount, cifs_writedata_release);
1808 wdata->sync_mode = wbc->sync_mode;
1809 wdata->nr_pages = nr_pages;
1810 wdata->offset = page_offset(wdata->pages[0]);
1813 if (wdata->cfile != NULL)
1814 cifsFileInfo_put(wdata->cfile);
1815 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1817 if (!wdata->cfile) {
1818 cERROR(1, "No writable handles for inode");
1822 rc = cifs_async_writev(wdata);
1823 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1825 for (i = 0; i < nr_pages; ++i)
1826 unlock_page(wdata->pages[i]);
1828 /* send failure -- clean up the mess */
1830 for (i = 0; i < nr_pages; ++i) {
1832 redirty_page_for_writepage(wbc,
1835 SetPageError(wdata->pages[i]);
1836 end_page_writeback(wdata->pages[i]);
1837 page_cache_release(wdata->pages[i]);
1840 mapping_set_error(mapping, rc);
1842 kref_put(&wdata->refcount, cifs_writedata_release);
1844 wbc->nr_to_write -= nr_pages;
1845 if (wbc->nr_to_write <= 0)
1851 if (!scanned && !done) {
1853 * We hit the last page and there is more work to be done: wrap
1854 * back to the start of the file
1861 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1862 mapping->writeback_index = index;
1868 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1874 /* BB add check for wbc flags */
1875 page_cache_get(page);
1876 if (!PageUptodate(page))
1877 cFYI(1, "ppw - page not up to date");
1880 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1882 * A writepage() implementation always needs to do either this,
1883 * or re-dirty the page with "redirty_page_for_writepage()" in
1884 * the case of a failure.
1886 * Just unlocking the page will cause the radix tree tag-bits
1887 * to fail to update with the state of the page correctly.
1889 set_page_writeback(page);
1891 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1892 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1894 else if (rc == -EAGAIN)
1895 redirty_page_for_writepage(wbc, page);
1899 SetPageUptodate(page);
1900 end_page_writeback(page);
1901 page_cache_release(page);
1906 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1908 int rc = cifs_writepage_locked(page, wbc);
1913 static int cifs_write_end(struct file *file, struct address_space *mapping,
1914 loff_t pos, unsigned len, unsigned copied,
1915 struct page *page, void *fsdata)
1918 struct inode *inode = mapping->host;
1919 struct cifsFileInfo *cfile = file->private_data;
1920 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1923 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1926 pid = current->tgid;
1928 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1931 if (PageChecked(page)) {
1933 SetPageUptodate(page);
1934 ClearPageChecked(page);
1935 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1936 SetPageUptodate(page);
1938 if (!PageUptodate(page)) {
1940 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1944 /* this is probably better than directly calling
1945 partialpage_write since in this function the file handle is
1946 known which we might as well leverage */
1947 /* BB check if anything else missing out of ppw
1948 such as updating last write time */
1949 page_data = kmap(page);
1950 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1951 /* if (rc < 0) should we set writebehind rc? */
1958 set_page_dirty(page);
1962 spin_lock(&inode->i_lock);
1963 if (pos > inode->i_size)
1964 i_size_write(inode, pos);
1965 spin_unlock(&inode->i_lock);
1969 page_cache_release(page);
1974 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1979 struct cifs_tcon *tcon;
1980 struct cifsFileInfo *smbfile = file->private_data;
1981 struct inode *inode = file->f_path.dentry->d_inode;
1982 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1984 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1987 mutex_lock(&inode->i_mutex);
1991 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1992 file->f_path.dentry->d_name.name, datasync);
1994 if (!CIFS_I(inode)->clientCanCacheRead) {
1995 rc = cifs_invalidate_mapping(inode);
1997 cFYI(1, "rc: %d during invalidate phase", rc);
1998 rc = 0; /* don't care about it in fsync */
2002 tcon = tlink_tcon(smbfile->tlink);
2003 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2004 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2007 mutex_unlock(&inode->i_mutex);
2011 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2015 struct cifs_tcon *tcon;
2016 struct cifsFileInfo *smbfile = file->private_data;
2017 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2018 struct inode *inode = file->f_mapping->host;
2020 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2023 mutex_lock(&inode->i_mutex);
2027 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2028 file->f_path.dentry->d_name.name, datasync);
2030 tcon = tlink_tcon(smbfile->tlink);
2031 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2032 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2035 mutex_unlock(&inode->i_mutex);
2040 * As file closes, flush all cached write data for this inode checking
2041 * for write behind errors.
2043 int cifs_flush(struct file *file, fl_owner_t id)
2045 struct inode *inode = file->f_path.dentry->d_inode;
2048 if (file->f_mode & FMODE_WRITE)
2049 rc = filemap_write_and_wait(inode->i_mapping);
2051 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
2057 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2062 for (i = 0; i < num_pages; i++) {
2063 pages[i] = alloc_page(__GFP_HIGHMEM);
2066 * save number of pages we have already allocated and
2067 * return with ENOMEM error
2078 for (i = 0; i < num_pages; i++)
2084 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2089 clen = min_t(const size_t, len, wsize);
2090 num_pages = clen / PAGE_CACHE_SIZE;
2091 if (clen % PAGE_CACHE_SIZE)
2101 cifs_iovec_write(struct file *file, const struct iovec *iov,
2102 unsigned long nr_segs, loff_t *poffset)
2104 unsigned int written;
2105 unsigned long num_pages, npages, i;
2106 size_t copied, len, cur_len;
2107 ssize_t total_written = 0;
2108 struct kvec *to_send;
2109 struct page **pages;
2111 struct inode *inode;
2112 struct cifsFileInfo *open_file;
2113 struct cifs_tcon *pTcon;
2114 struct cifs_sb_info *cifs_sb;
2115 struct cifs_io_parms io_parms;
2119 len = iov_length(iov, nr_segs);
2123 rc = generic_write_checks(file, poffset, &len, 0);
2127 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2128 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2130 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
2134 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
2140 rc = cifs_write_allocate_pages(pages, num_pages);
2148 open_file = file->private_data;
2150 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2151 pid = open_file->pid;
2153 pid = current->tgid;
2155 pTcon = tlink_tcon(open_file->tlink);
2156 inode = file->f_path.dentry->d_inode;
2158 iov_iter_init(&it, iov, nr_segs, len, 0);
2162 size_t save_len = cur_len;
2163 for (i = 0; i < npages; i++) {
2164 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
2165 copied = iov_iter_copy_from_user(pages[i], &it, 0,
2168 iov_iter_advance(&it, copied);
2169 to_send[i+1].iov_base = kmap(pages[i]);
2170 to_send[i+1].iov_len = copied;
2173 cur_len = save_len - cur_len;
2176 if (open_file->invalidHandle) {
2177 rc = cifs_reopen_file(open_file, false);
2181 io_parms.netfid = open_file->netfid;
2183 io_parms.tcon = pTcon;
2184 io_parms.offset = *poffset;
2185 io_parms.length = cur_len;
2186 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
2188 } while (rc == -EAGAIN);
2190 for (i = 0; i < npages; i++)
2195 total_written += written;
2196 cifs_update_eof(CIFS_I(inode), *poffset, written);
2197 *poffset += written;
2198 } else if (rc < 0) {
2204 /* get length and number of kvecs of the next write */
2205 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
2208 if (total_written > 0) {
2209 spin_lock(&inode->i_lock);
2210 if (*poffset > inode->i_size)
2211 i_size_write(inode, *poffset);
2212 spin_unlock(&inode->i_lock);
2215 cifs_stats_bytes_written(pTcon, total_written);
2216 mark_inode_dirty_sync(inode);
2218 for (i = 0; i < num_pages; i++)
2223 return total_written;
2226 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2227 unsigned long nr_segs, loff_t pos)
2230 struct inode *inode;
2232 inode = iocb->ki_filp->f_path.dentry->d_inode;
2235 * BB - optimize the way when signing is disabled. We can drop this
2236 * extra memory-to-memory copying and use iovec buffers for constructing
2240 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2242 CIFS_I(inode)->invalid_mapping = true;
2249 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2250 unsigned long nr_segs, loff_t pos)
2252 struct inode *inode;
2254 inode = iocb->ki_filp->f_path.dentry->d_inode;
2256 if (CIFS_I(inode)->clientCanCacheAll)
2257 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2260 * In strict cache mode we need to write the data to the server exactly
2261 * from the pos to pos+len-1 rather than flush all affected pages
2262 * because it may cause a error with mandatory locks on these pages but
2263 * not on the region from pos to ppos+len-1.
2266 return cifs_user_writev(iocb, iov, nr_segs, pos);
2270 cifs_iovec_read(struct file *file, const struct iovec *iov,
2271 unsigned long nr_segs, loff_t *poffset)
2276 unsigned int bytes_read = 0;
2277 size_t len, cur_len;
2279 struct cifs_sb_info *cifs_sb;
2280 struct cifs_tcon *pTcon;
2281 struct cifsFileInfo *open_file;
2282 struct smb_com_read_rsp *pSMBr;
2283 struct cifs_io_parms io_parms;
2291 len = iov_length(iov, nr_segs);
2296 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2298 /* FIXME: set up handlers for larger reads and/or convert to async */
2299 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2301 open_file = file->private_data;
2302 pTcon = tlink_tcon(open_file->tlink);
2304 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2305 pid = open_file->pid;
2307 pid = current->tgid;
2309 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2310 cFYI(1, "attempting read on write only file instance");
2312 for (total_read = 0; total_read < len; total_read += bytes_read) {
2313 cur_len = min_t(const size_t, len - total_read, rsize);
2317 while (rc == -EAGAIN) {
2318 int buf_type = CIFS_NO_BUFFER;
2319 if (open_file->invalidHandle) {
2320 rc = cifs_reopen_file(open_file, true);
2324 io_parms.netfid = open_file->netfid;
2326 io_parms.tcon = pTcon;
2327 io_parms.offset = *poffset;
2328 io_parms.length = cur_len;
2329 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2330 &read_data, &buf_type);
2331 pSMBr = (struct smb_com_read_rsp *)read_data;
2333 char *data_offset = read_data + 4 +
2334 le16_to_cpu(pSMBr->DataOffset);
2335 if (memcpy_toiovecend(iov, data_offset,
2336 iov_offset, bytes_read))
2338 if (buf_type == CIFS_SMALL_BUFFER)
2339 cifs_small_buf_release(read_data);
2340 else if (buf_type == CIFS_LARGE_BUFFER)
2341 cifs_buf_release(read_data);
2343 iov_offset += bytes_read;
2347 if (rc || (bytes_read == 0)) {
2355 cifs_stats_bytes_read(pTcon, bytes_read);
2356 *poffset += bytes_read;
2364 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2365 unsigned long nr_segs, loff_t pos)
2369 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2376 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2377 unsigned long nr_segs, loff_t pos)
2379 struct inode *inode;
2381 inode = iocb->ki_filp->f_path.dentry->d_inode;
2383 if (CIFS_I(inode)->clientCanCacheRead)
2384 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2387 * In strict cache mode we need to read from the server all the time
2388 * if we don't have level II oplock because the server can delay mtime
2389 * change - so we can't make a decision about inode invalidating.
2390 * And we can also fail with pagereading if there are mandatory locks
2391 * on pages affected by this read but not on the region from pos to
2395 return cifs_user_readv(iocb, iov, nr_segs, pos);
2398 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
2402 unsigned int bytes_read = 0;
2403 unsigned int total_read;
2404 unsigned int current_read_size;
2406 struct cifs_sb_info *cifs_sb;
2407 struct cifs_tcon *pTcon;
2409 char *current_offset;
2410 struct cifsFileInfo *open_file;
2411 struct cifs_io_parms io_parms;
2412 int buf_type = CIFS_NO_BUFFER;
2416 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2418 /* FIXME: set up handlers for larger reads and/or convert to async */
2419 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2421 if (file->private_data == NULL) {
2426 open_file = file->private_data;
2427 pTcon = tlink_tcon(open_file->tlink);
2429 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2430 pid = open_file->pid;
2432 pid = current->tgid;
2434 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2435 cFYI(1, "attempting read on write only file instance");
2437 for (total_read = 0, current_offset = read_data;
2438 read_size > total_read;
2439 total_read += bytes_read, current_offset += bytes_read) {
2440 current_read_size = min_t(uint, read_size - total_read, rsize);
2442 /* For windows me and 9x we do not want to request more
2443 than it negotiated since it will refuse the read then */
2445 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
2446 current_read_size = min_t(uint, current_read_size,
2450 while (rc == -EAGAIN) {
2451 if (open_file->invalidHandle) {
2452 rc = cifs_reopen_file(open_file, true);
2456 io_parms.netfid = open_file->netfid;
2458 io_parms.tcon = pTcon;
2459 io_parms.offset = *poffset;
2460 io_parms.length = current_read_size;
2461 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2462 ¤t_offset, &buf_type);
2464 if (rc || (bytes_read == 0)) {
2472 cifs_stats_bytes_read(pTcon, total_read);
2473 *poffset += bytes_read;
2481 * If the page is mmap'ed into a process' page tables, then we need to make
2482 * sure that it doesn't change while being written back.
2485 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2487 struct page *page = vmf->page;
2490 return VM_FAULT_LOCKED;
2493 static struct vm_operations_struct cifs_file_vm_ops = {
2494 .fault = filemap_fault,
2495 .page_mkwrite = cifs_page_mkwrite,
2498 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2501 struct inode *inode = file->f_path.dentry->d_inode;
2505 if (!CIFS_I(inode)->clientCanCacheRead) {
2506 rc = cifs_invalidate_mapping(inode);
2511 rc = generic_file_mmap(file, vma);
2513 vma->vm_ops = &cifs_file_vm_ops;
2518 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2523 rc = cifs_revalidate_file(file);
2525 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2529 rc = generic_file_mmap(file, vma);
2531 vma->vm_ops = &cifs_file_vm_ops;
2536 static int cifs_readpages(struct file *file, struct address_space *mapping,
2537 struct list_head *page_list, unsigned num_pages)
2540 struct list_head tmplist;
2541 struct cifsFileInfo *open_file = file->private_data;
2542 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2543 unsigned int rsize = cifs_sb->rsize;
2547 * Give up immediately if rsize is too small to read an entire page.
2548 * The VFS will fall back to readpage. We should never reach this
2549 * point however since we set ra_pages to 0 when the rsize is smaller
2550 * than a cache page.
2552 if (unlikely(rsize < PAGE_CACHE_SIZE))
2556 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2557 * immediately if the cookie is negative
2559 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2564 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2565 pid = open_file->pid;
2567 pid = current->tgid;
2570 INIT_LIST_HEAD(&tmplist);
2572 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2573 mapping, num_pages);
2576 * Start with the page at end of list and move it to private
2577 * list. Do the same with any following pages until we hit
2578 * the rsize limit, hit an index discontinuity, or run out of
2579 * pages. Issue the async read and then start the loop again
2580 * until the list is empty.
2582 * Note that list order is important. The page_list is in
2583 * the order of declining indexes. When we put the pages in
2584 * the rdata->pages, then we want them in increasing order.
2586 while (!list_empty(page_list)) {
2587 unsigned int bytes = PAGE_CACHE_SIZE;
2588 unsigned int expected_index;
2589 unsigned int nr_pages = 1;
2591 struct page *page, *tpage;
2592 struct cifs_readdata *rdata;
2594 page = list_entry(page_list->prev, struct page, lru);
2597 * Lock the page and put it in the cache. Since no one else
2598 * should have access to this page, we're safe to simply set
2599 * PG_locked without checking it first.
2601 __set_page_locked(page);
2602 rc = add_to_page_cache_locked(page, mapping,
2603 page->index, GFP_KERNEL);
2605 /* give up if we can't stick it in the cache */
2607 __clear_page_locked(page);
2611 /* move first page to the tmplist */
2612 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2613 list_move_tail(&page->lru, &tmplist);
2615 /* now try and add more pages onto the request */
2616 expected_index = page->index + 1;
2617 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2618 /* discontinuity ? */
2619 if (page->index != expected_index)
2622 /* would this page push the read over the rsize? */
2623 if (bytes + PAGE_CACHE_SIZE > rsize)
2626 __set_page_locked(page);
2627 if (add_to_page_cache_locked(page, mapping,
2628 page->index, GFP_KERNEL)) {
2629 __clear_page_locked(page);
2632 list_move_tail(&page->lru, &tmplist);
2633 bytes += PAGE_CACHE_SIZE;
2638 rdata = cifs_readdata_alloc(nr_pages);
2640 /* best to give up if we're out of mem */
2641 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2642 list_del(&page->lru);
2643 lru_cache_add_file(page);
2645 page_cache_release(page);
2651 spin_lock(&cifs_file_list_lock);
2652 cifsFileInfo_get(open_file);
2653 spin_unlock(&cifs_file_list_lock);
2654 rdata->cfile = open_file;
2655 rdata->mapping = mapping;
2656 rdata->offset = offset;
2657 rdata->bytes = bytes;
2659 list_splice_init(&tmplist, &rdata->pages);
2662 if (open_file->invalidHandle) {
2663 rc = cifs_reopen_file(open_file, true);
2667 rc = cifs_async_readv(rdata);
2668 } while (rc == -EAGAIN);
2671 list_for_each_entry_safe(page, tpage, &rdata->pages,
2673 list_del(&page->lru);
2674 lru_cache_add_file(page);
2676 page_cache_release(page);
2678 cifs_readdata_free(rdata);
2686 static int cifs_readpage_worker(struct file *file, struct page *page,
2692 /* Is the page cached? */
2693 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2697 page_cache_get(page);
2698 read_data = kmap(page);
2699 /* for reads over a certain size could initiate async read ahead */
2701 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2706 cFYI(1, "Bytes read %d", rc);
2708 file->f_path.dentry->d_inode->i_atime =
2709 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2711 if (PAGE_CACHE_SIZE > rc)
2712 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2714 flush_dcache_page(page);
2715 SetPageUptodate(page);
2717 /* send this page to the cache */
2718 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2724 page_cache_release(page);
2730 static int cifs_readpage(struct file *file, struct page *page)
2732 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2738 if (file->private_data == NULL) {
2744 cFYI(1, "readpage %p at offset %d 0x%x\n",
2745 page, (int)offset, (int)offset);
2747 rc = cifs_readpage_worker(file, page, &offset);
2755 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2757 struct cifsFileInfo *open_file;
2759 spin_lock(&cifs_file_list_lock);
2760 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2761 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2762 spin_unlock(&cifs_file_list_lock);
2766 spin_unlock(&cifs_file_list_lock);
2770 /* We do not want to update the file size from server for inodes
2771 open for write - to avoid races with writepage extending
2772 the file - in the future we could consider allowing
2773 refreshing the inode only on increases in the file size
2774 but this is tricky to do without racing with writebehind
2775 page caching in the current Linux kernel design */
2776 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2781 if (is_inode_writable(cifsInode)) {
2782 /* This inode is open for write at least once */
2783 struct cifs_sb_info *cifs_sb;
2785 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2786 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2787 /* since no page cache to corrupt on directio
2788 we can change size safely */
2792 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2800 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2801 loff_t pos, unsigned len, unsigned flags,
2802 struct page **pagep, void **fsdata)
2804 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2805 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2806 loff_t page_start = pos & PAGE_MASK;
2811 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2813 page = grab_cache_page_write_begin(mapping, index, flags);
2819 if (PageUptodate(page))
2823 * If we write a full page it will be up to date, no need to read from
2824 * the server. If the write is short, we'll end up doing a sync write
2827 if (len == PAGE_CACHE_SIZE)
2831 * optimize away the read when we have an oplock, and we're not
2832 * expecting to use any of the data we'd be reading in. That
2833 * is, when the page lies beyond the EOF, or straddles the EOF
2834 * and the write will cover all of the existing data.
2836 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2837 i_size = i_size_read(mapping->host);
2838 if (page_start >= i_size ||
2839 (offset == 0 && (pos + len) >= i_size)) {
2840 zero_user_segments(page, 0, offset,
2844 * PageChecked means that the parts of the page
2845 * to which we're not writing are considered up
2846 * to date. Once the data is copied to the
2847 * page, it can be set uptodate.
2849 SetPageChecked(page);
2854 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2856 * might as well read a page, it is fast enough. If we get
2857 * an error, we don't need to return it. cifs_write_end will
2858 * do a sync write instead since PG_uptodate isn't set.
2860 cifs_readpage_worker(file, page, &page_start);
2862 /* we could try using another file handle if there is one -
2863 but how would we lock it to prevent close of that handle
2864 racing with this read? In any case
2865 this will be written out by write_end so is fine */
2872 static int cifs_release_page(struct page *page, gfp_t gfp)
2874 if (PagePrivate(page))
2877 return cifs_fscache_release_page(page, gfp);
2880 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2882 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2885 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2888 static int cifs_launder_page(struct page *page)
2891 loff_t range_start = page_offset(page);
2892 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2893 struct writeback_control wbc = {
2894 .sync_mode = WB_SYNC_ALL,
2896 .range_start = range_start,
2897 .range_end = range_end,
2900 cFYI(1, "Launder page: %p", page);
2902 if (clear_page_dirty_for_io(page))
2903 rc = cifs_writepage_locked(page, &wbc);
2905 cifs_fscache_invalidate_page(page, page->mapping->host);
2909 void cifs_oplock_break(struct work_struct *work)
2911 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2913 struct inode *inode = cfile->dentry->d_inode;
2914 struct cifsInodeInfo *cinode = CIFS_I(inode);
2917 if (inode && S_ISREG(inode->i_mode)) {
2918 if (cinode->clientCanCacheRead)
2919 break_lease(inode, O_RDONLY);
2921 break_lease(inode, O_WRONLY);
2922 rc = filemap_fdatawrite(inode->i_mapping);
2923 if (cinode->clientCanCacheRead == 0) {
2924 rc = filemap_fdatawait(inode->i_mapping);
2925 mapping_set_error(inode->i_mapping, rc);
2926 invalidate_remote_inode(inode);
2928 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2931 rc = cifs_push_locks(cfile);
2933 cERROR(1, "Push locks rc = %d", rc);
2936 * releasing stale oplock after recent reconnect of smb session using
2937 * a now incorrect file handle is not a data integrity issue but do
2938 * not bother sending an oplock release if session to server still is
2939 * disconnected since oplock already released by the server
2941 if (!cfile->oplock_break_cancelled) {
2942 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2943 current->tgid, 0, 0, 0, 0,
2944 LOCKING_ANDX_OPLOCK_RELEASE, false,
2945 cinode->clientCanCacheRead ? 1 : 0);
2946 cFYI(1, "Oplock release rc = %d", rc);
2950 const struct address_space_operations cifs_addr_ops = {
2951 .readpage = cifs_readpage,
2952 .readpages = cifs_readpages,
2953 .writepage = cifs_writepage,
2954 .writepages = cifs_writepages,
2955 .write_begin = cifs_write_begin,
2956 .write_end = cifs_write_end,
2957 .set_page_dirty = __set_page_dirty_nobuffers,
2958 .releasepage = cifs_release_page,
2959 .invalidatepage = cifs_invalidate_page,
2960 .launder_page = cifs_launder_page,
2964 * cifs_readpages requires the server to support a buffer large enough to
2965 * contain the header plus one complete page of data. Otherwise, we need
2966 * to leave cifs_readpages out of the address space operations.
2968 const struct address_space_operations cifs_addr_ops_smallbuf = {
2969 .readpage = cifs_readpage,
2970 .writepage = cifs_writepage,
2971 .writepages = cifs_writepages,
2972 .write_begin = cifs_write_begin,
2973 .write_end = cifs_write_end,
2974 .set_page_dirty = __set_page_dirty_nobuffers,
2975 .releasepage = cifs_release_page,
2976 .invalidatepage = cifs_invalidate_page,
2977 .launder_page = cifs_launder_page,