[CIFS] Warn on requesting default security (ntlm) on mount
[pandora-kernel.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
44
45 static inline int cifs_convert_flags(unsigned int flags)
46 {
47         if ((flags & O_ACCMODE) == O_RDONLY)
48                 return GENERIC_READ;
49         else if ((flags & O_ACCMODE) == O_WRONLY)
50                 return GENERIC_WRITE;
51         else if ((flags & O_ACCMODE) == O_RDWR) {
52                 /* GENERIC_ALL is too much permission to request
53                    can cause unnecessary access denied on create */
54                 /* return GENERIC_ALL; */
55                 return (GENERIC_READ | GENERIC_WRITE);
56         }
57
58         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60                 FILE_READ_DATA);
61 }
62
63 static u32 cifs_posix_convert_flags(unsigned int flags)
64 {
65         u32 posix_flags = 0;
66
67         if ((flags & O_ACCMODE) == O_RDONLY)
68                 posix_flags = SMB_O_RDONLY;
69         else if ((flags & O_ACCMODE) == O_WRONLY)
70                 posix_flags = SMB_O_WRONLY;
71         else if ((flags & O_ACCMODE) == O_RDWR)
72                 posix_flags = SMB_O_RDWR;
73
74         if (flags & O_CREAT)
75                 posix_flags |= SMB_O_CREAT;
76         if (flags & O_EXCL)
77                 posix_flags |= SMB_O_EXCL;
78         if (flags & O_TRUNC)
79                 posix_flags |= SMB_O_TRUNC;
80         /* be safe and imply O_SYNC for O_DSYNC */
81         if (flags & O_DSYNC)
82                 posix_flags |= SMB_O_SYNC;
83         if (flags & O_DIRECTORY)
84                 posix_flags |= SMB_O_DIRECTORY;
85         if (flags & O_NOFOLLOW)
86                 posix_flags |= SMB_O_NOFOLLOW;
87         if (flags & O_DIRECT)
88                 posix_flags |= SMB_O_DIRECT;
89
90         return posix_flags;
91 }
92
93 static inline int cifs_get_disposition(unsigned int flags)
94 {
95         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96                 return FILE_CREATE;
97         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98                 return FILE_OVERWRITE_IF;
99         else if ((flags & O_CREAT) == O_CREAT)
100                 return FILE_OPEN_IF;
101         else if ((flags & O_TRUNC) == O_TRUNC)
102                 return FILE_OVERWRITE;
103         else
104                 return FILE_OPEN;
105 }
106
107 int cifs_posix_open(char *full_path, struct inode **pinode,
108                         struct super_block *sb, int mode, unsigned int f_flags,
109                         __u32 *poplock, __u16 *pnetfid, int xid)
110 {
111         int rc;
112         FILE_UNIX_BASIC_INFO *presp_data;
113         __u32 posix_flags = 0;
114         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115         struct cifs_fattr fattr;
116         struct tcon_link *tlink;
117         struct cifsTconInfo *tcon;
118
119         cFYI(1, "posix open %s", full_path);
120
121         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122         if (presp_data == NULL)
123                 return -ENOMEM;
124
125         tlink = cifs_sb_tlink(cifs_sb);
126         if (IS_ERR(tlink)) {
127                 rc = PTR_ERR(tlink);
128                 goto posix_open_ret;
129         }
130
131         tcon = tlink_tcon(tlink);
132         mode &= ~current_umask();
133
134         posix_flags = cifs_posix_convert_flags(f_flags);
135         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136                              poplock, full_path, cifs_sb->local_nls,
137                              cifs_sb->mnt_cifs_flags &
138                                         CIFS_MOUNT_MAP_SPECIAL_CHR);
139         cifs_put_tlink(tlink);
140
141         if (rc)
142                 goto posix_open_ret;
143
144         if (presp_data->Type == cpu_to_le32(-1))
145                 goto posix_open_ret; /* open ok, caller does qpathinfo */
146
147         if (!pinode)
148                 goto posix_open_ret; /* caller does not need info */
149
150         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
151
152         /* get new inode and set it up */
153         if (*pinode == NULL) {
154                 cifs_fill_uniqueid(sb, &fattr);
155                 *pinode = cifs_iget(sb, &fattr);
156                 if (!*pinode) {
157                         rc = -ENOMEM;
158                         goto posix_open_ret;
159                 }
160         } else {
161                 cifs_fattr_to_inode(*pinode, &fattr);
162         }
163
164 posix_open_ret:
165         kfree(presp_data);
166         return rc;
167 }
168
169 static int
170 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171              struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
172              __u16 *pnetfid, int xid)
173 {
174         int rc;
175         int desiredAccess;
176         int disposition;
177         FILE_ALL_INFO *buf;
178
179         desiredAccess = cifs_convert_flags(f_flags);
180
181 /*********************************************************************
182  *  open flag mapping table:
183  *
184  *      POSIX Flag            CIFS Disposition
185  *      ----------            ----------------
186  *      O_CREAT               FILE_OPEN_IF
187  *      O_CREAT | O_EXCL      FILE_CREATE
188  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
189  *      O_TRUNC               FILE_OVERWRITE
190  *      none of the above     FILE_OPEN
191  *
192  *      Note that there is not a direct match between disposition
193  *      FILE_SUPERSEDE (ie create whether or not file exists although
194  *      O_CREAT | O_TRUNC is similar but truncates the existing
195  *      file rather than creating a new file as FILE_SUPERSEDE does
196  *      (which uses the attributes / metadata passed in on open call)
197  *?
198  *?  O_SYNC is a reasonable match to CIFS writethrough flag
199  *?  and the read write flags match reasonably.  O_LARGEFILE
200  *?  is irrelevant because largefile support is always used
201  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203  *********************************************************************/
204
205         disposition = cifs_get_disposition(f_flags);
206
207         /* BB pass O_SYNC flag through on file attributes .. BB */
208
209         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210         if (!buf)
211                 return -ENOMEM;
212
213         if (tcon->ses->capabilities & CAP_NT_SMBS)
214                 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215                          desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
218         else
219                 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220                         desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223
224         if (rc)
225                 goto out;
226
227         if (tcon->unix_ext)
228                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229                                               xid);
230         else
231                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232                                          xid, pnetfid);
233
234 out:
235         kfree(buf);
236         return rc;
237 }
238
239 struct cifsFileInfo *
240 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241                   struct tcon_link *tlink, __u32 oplock)
242 {
243         struct dentry *dentry = file->f_path.dentry;
244         struct inode *inode = dentry->d_inode;
245         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246         struct cifsFileInfo *pCifsFile;
247
248         pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249         if (pCifsFile == NULL)
250                 return pCifsFile;
251
252         pCifsFile->count = 1;
253         pCifsFile->netfid = fileHandle;
254         pCifsFile->pid = current->tgid;
255         pCifsFile->uid = current_fsuid();
256         pCifsFile->dentry = dget(dentry);
257         pCifsFile->f_flags = file->f_flags;
258         pCifsFile->invalidHandle = false;
259         pCifsFile->tlink = cifs_get_tlink(tlink);
260         mutex_init(&pCifsFile->fh_mutex);
261         mutex_init(&pCifsFile->lock_mutex);
262         INIT_LIST_HEAD(&pCifsFile->llist);
263         INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
264
265         spin_lock(&cifs_file_list_lock);
266         list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267         /* if readable file instance put first in list*/
268         if (file->f_mode & FMODE_READ)
269                 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270         else
271                 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
272         spin_unlock(&cifs_file_list_lock);
273
274         cifs_set_oplock_level(pCifsInode, oplock);
275
276         file->private_data = pCifsFile;
277         return pCifsFile;
278 }
279
280 /*
281  * Release a reference on the file private data. This may involve closing
282  * the filehandle out on the server. Must be called without holding
283  * cifs_file_list_lock.
284  */
285 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
286 {
287         struct inode *inode = cifs_file->dentry->d_inode;
288         struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
289         struct cifsInodeInfo *cifsi = CIFS_I(inode);
290         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
291         struct cifsLockInfo *li, *tmp;
292
293         spin_lock(&cifs_file_list_lock);
294         if (--cifs_file->count > 0) {
295                 spin_unlock(&cifs_file_list_lock);
296                 return;
297         }
298
299         /* remove it from the lists */
300         list_del(&cifs_file->flist);
301         list_del(&cifs_file->tlist);
302
303         if (list_empty(&cifsi->openFileList)) {
304                 cFYI(1, "closing last open instance for inode %p",
305                         cifs_file->dentry->d_inode);
306
307                 /* in strict cache mode we need invalidate mapping on the last
308                    close  because it may cause a error when we open this file
309                    again and get at least level II oplock */
310                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311                         CIFS_I(inode)->invalid_mapping = true;
312
313                 cifs_set_oplock_level(cifsi, 0);
314         }
315         spin_unlock(&cifs_file_list_lock);
316
317         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
318                 int xid, rc;
319
320                 xid = GetXid();
321                 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
322                 FreeXid(xid);
323         }
324
325         /* Delete any outstanding lock records. We'll lose them when the file
326          * is closed anyway.
327          */
328         mutex_lock(&cifs_file->lock_mutex);
329         list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
330                 list_del(&li->llist);
331                 kfree(li);
332         }
333         mutex_unlock(&cifs_file->lock_mutex);
334
335         cifs_put_tlink(cifs_file->tlink);
336         dput(cifs_file->dentry);
337         kfree(cifs_file);
338 }
339
340 int cifs_open(struct inode *inode, struct file *file)
341 {
342         int rc = -EACCES;
343         int xid;
344         __u32 oplock;
345         struct cifs_sb_info *cifs_sb;
346         struct cifsTconInfo *tcon;
347         struct tcon_link *tlink;
348         struct cifsFileInfo *pCifsFile = NULL;
349         char *full_path = NULL;
350         bool posix_open_ok = false;
351         __u16 netfid;
352
353         xid = GetXid();
354
355         cifs_sb = CIFS_SB(inode->i_sb);
356         tlink = cifs_sb_tlink(cifs_sb);
357         if (IS_ERR(tlink)) {
358                 FreeXid(xid);
359                 return PTR_ERR(tlink);
360         }
361         tcon = tlink_tcon(tlink);
362
363         full_path = build_path_from_dentry(file->f_path.dentry);
364         if (full_path == NULL) {
365                 rc = -ENOMEM;
366                 goto out;
367         }
368
369         cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
370                  inode, file->f_flags, full_path);
371
372         if (oplockEnabled)
373                 oplock = REQ_OPLOCK;
374         else
375                 oplock = 0;
376
377         if (!tcon->broken_posix_open && tcon->unix_ext &&
378             (tcon->ses->capabilities & CAP_UNIX) &&
379             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
380                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
381                 /* can not refresh inode info since size could be stale */
382                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
383                                 cifs_sb->mnt_file_mode /* ignored */,
384                                 file->f_flags, &oplock, &netfid, xid);
385                 if (rc == 0) {
386                         cFYI(1, "posix open succeeded");
387                         posix_open_ok = true;
388                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
389                         if (tcon->ses->serverNOS)
390                                 cERROR(1, "server %s of type %s returned"
391                                            " unexpected error on SMB posix open"
392                                            ", disabling posix open support."
393                                            " Check if server update available.",
394                                            tcon->ses->serverName,
395                                            tcon->ses->serverNOS);
396                         tcon->broken_posix_open = true;
397                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
398                          (rc != -EOPNOTSUPP)) /* path not found or net err */
399                         goto out;
400                 /* else fallthrough to retry open the old way on network i/o
401                    or DFS errors */
402         }
403
404         if (!posix_open_ok) {
405                 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
406                                   file->f_flags, &oplock, &netfid, xid);
407                 if (rc)
408                         goto out;
409         }
410
411         pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
412         if (pCifsFile == NULL) {
413                 CIFSSMBClose(xid, tcon, netfid);
414                 rc = -ENOMEM;
415                 goto out;
416         }
417
418         cifs_fscache_set_inode_cookie(inode, file);
419
420         if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
421                 /* time to set mode which we can not set earlier due to
422                    problems creating new read-only files */
423                 struct cifs_unix_set_info_args args = {
424                         .mode   = inode->i_mode,
425                         .uid    = NO_CHANGE_64,
426                         .gid    = NO_CHANGE_64,
427                         .ctime  = NO_CHANGE_64,
428                         .atime  = NO_CHANGE_64,
429                         .mtime  = NO_CHANGE_64,
430                         .device = 0,
431                 };
432                 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
433                                         pCifsFile->pid);
434         }
435
436 out:
437         kfree(full_path);
438         FreeXid(xid);
439         cifs_put_tlink(tlink);
440         return rc;
441 }
442
443 /* Try to reacquire byte range locks that were released when session */
444 /* to server was lost */
445 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
446 {
447         int rc = 0;
448
449 /* BB list all locks open on this file and relock */
450
451         return rc;
452 }
453
454 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
455 {
456         int rc = -EACCES;
457         int xid;
458         __u32 oplock;
459         struct cifs_sb_info *cifs_sb;
460         struct cifsTconInfo *tcon;
461         struct cifsInodeInfo *pCifsInode;
462         struct inode *inode;
463         char *full_path = NULL;
464         int desiredAccess;
465         int disposition = FILE_OPEN;
466         __u16 netfid;
467
468         xid = GetXid();
469         mutex_lock(&pCifsFile->fh_mutex);
470         if (!pCifsFile->invalidHandle) {
471                 mutex_unlock(&pCifsFile->fh_mutex);
472                 rc = 0;
473                 FreeXid(xid);
474                 return rc;
475         }
476
477         inode = pCifsFile->dentry->d_inode;
478         cifs_sb = CIFS_SB(inode->i_sb);
479         tcon = tlink_tcon(pCifsFile->tlink);
480
481 /* can not grab rename sem here because various ops, including
482    those that already have the rename sem can end up causing writepage
483    to get called and if the server was down that means we end up here,
484    and we can never tell if the caller already has the rename_sem */
485         full_path = build_path_from_dentry(pCifsFile->dentry);
486         if (full_path == NULL) {
487                 rc = -ENOMEM;
488                 mutex_unlock(&pCifsFile->fh_mutex);
489                 FreeXid(xid);
490                 return rc;
491         }
492
493         cFYI(1, "inode = 0x%p file flags 0x%x for %s",
494                  inode, pCifsFile->f_flags, full_path);
495
496         if (oplockEnabled)
497                 oplock = REQ_OPLOCK;
498         else
499                 oplock = 0;
500
501         if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
502             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
503                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
504
505                 /*
506                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
507                  * original open. Must mask them off for a reopen.
508                  */
509                 unsigned int oflags = pCifsFile->f_flags &
510                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
511
512                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
513                                 cifs_sb->mnt_file_mode /* ignored */,
514                                 oflags, &oplock, &netfid, xid);
515                 if (rc == 0) {
516                         cFYI(1, "posix reopen succeeded");
517                         goto reopen_success;
518                 }
519                 /* fallthrough to retry open the old way on errors, especially
520                    in the reconnect path it is important to retry hard */
521         }
522
523         desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
524
525         /* Can not refresh inode by passing in file_info buf to be returned
526            by SMBOpen and then calling get_inode_info with returned buf
527            since file might have write behind data that needs to be flushed
528            and server version of file size can be stale. If we knew for sure
529            that inode was not dirty locally we could do this */
530
531         rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
532                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
533                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
534                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
535         if (rc) {
536                 mutex_unlock(&pCifsFile->fh_mutex);
537                 cFYI(1, "cifs_open returned 0x%x", rc);
538                 cFYI(1, "oplock: %d", oplock);
539                 goto reopen_error_exit;
540         }
541
542 reopen_success:
543         pCifsFile->netfid = netfid;
544         pCifsFile->invalidHandle = false;
545         mutex_unlock(&pCifsFile->fh_mutex);
546         pCifsInode = CIFS_I(inode);
547
548         if (can_flush) {
549                 rc = filemap_write_and_wait(inode->i_mapping);
550                 mapping_set_error(inode->i_mapping, rc);
551
552                 if (tcon->unix_ext)
553                         rc = cifs_get_inode_info_unix(&inode,
554                                 full_path, inode->i_sb, xid);
555                 else
556                         rc = cifs_get_inode_info(&inode,
557                                 full_path, NULL, inode->i_sb,
558                                 xid, NULL);
559         } /* else we are writing out data to server already
560              and could deadlock if we tried to flush data, and
561              since we do not know if we have data that would
562              invalidate the current end of file on the server
563              we can not go to the server to get the new inod
564              info */
565
566         cifs_set_oplock_level(pCifsInode, oplock);
567
568         cifs_relock_file(pCifsFile);
569
570 reopen_error_exit:
571         kfree(full_path);
572         FreeXid(xid);
573         return rc;
574 }
575
576 int cifs_close(struct inode *inode, struct file *file)
577 {
578         if (file->private_data != NULL) {
579                 cifsFileInfo_put(file->private_data);
580                 file->private_data = NULL;
581         }
582
583         /* return code from the ->release op is always ignored */
584         return 0;
585 }
586
587 int cifs_closedir(struct inode *inode, struct file *file)
588 {
589         int rc = 0;
590         int xid;
591         struct cifsFileInfo *pCFileStruct = file->private_data;
592         char *ptmp;
593
594         cFYI(1, "Closedir inode = 0x%p", inode);
595
596         xid = GetXid();
597
598         if (pCFileStruct) {
599                 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
600
601                 cFYI(1, "Freeing private data in close dir");
602                 spin_lock(&cifs_file_list_lock);
603                 if (!pCFileStruct->srch_inf.endOfSearch &&
604                     !pCFileStruct->invalidHandle) {
605                         pCFileStruct->invalidHandle = true;
606                         spin_unlock(&cifs_file_list_lock);
607                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
608                         cFYI(1, "Closing uncompleted readdir with rc %d",
609                                  rc);
610                         /* not much we can do if it fails anyway, ignore rc */
611                         rc = 0;
612                 } else
613                         spin_unlock(&cifs_file_list_lock);
614                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
615                 if (ptmp) {
616                         cFYI(1, "closedir free smb buf in srch struct");
617                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
618                         if (pCFileStruct->srch_inf.smallBuf)
619                                 cifs_small_buf_release(ptmp);
620                         else
621                                 cifs_buf_release(ptmp);
622                 }
623                 cifs_put_tlink(pCFileStruct->tlink);
624                 kfree(file->private_data);
625                 file->private_data = NULL;
626         }
627         /* BB can we lock the filestruct while this is going on? */
628         FreeXid(xid);
629         return rc;
630 }
631
632 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
633                                 __u64 offset, __u8 lockType)
634 {
635         struct cifsLockInfo *li =
636                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
637         if (li == NULL)
638                 return -ENOMEM;
639         li->offset = offset;
640         li->length = len;
641         li->type = lockType;
642         mutex_lock(&fid->lock_mutex);
643         list_add(&li->llist, &fid->llist);
644         mutex_unlock(&fid->lock_mutex);
645         return 0;
646 }
647
648 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
649 {
650         int rc, xid;
651         __u32 numLock = 0;
652         __u32 numUnlock = 0;
653         __u64 length;
654         bool wait_flag = false;
655         struct cifs_sb_info *cifs_sb;
656         struct cifsTconInfo *tcon;
657         __u16 netfid;
658         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
659         bool posix_locking = 0;
660
661         length = 1 + pfLock->fl_end - pfLock->fl_start;
662         rc = -EACCES;
663         xid = GetXid();
664
665         cFYI(1, "Lock parm: 0x%x flockflags: "
666                  "0x%x flocktype: 0x%x start: %lld end: %lld",
667                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
668                 pfLock->fl_end);
669
670         if (pfLock->fl_flags & FL_POSIX)
671                 cFYI(1, "Posix");
672         if (pfLock->fl_flags & FL_FLOCK)
673                 cFYI(1, "Flock");
674         if (pfLock->fl_flags & FL_SLEEP) {
675                 cFYI(1, "Blocking lock");
676                 wait_flag = true;
677         }
678         if (pfLock->fl_flags & FL_ACCESS)
679                 cFYI(1, "Process suspended by mandatory locking - "
680                          "not implemented yet");
681         if (pfLock->fl_flags & FL_LEASE)
682                 cFYI(1, "Lease on file - not implemented yet");
683         if (pfLock->fl_flags &
684             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
685                 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
686
687         if (pfLock->fl_type == F_WRLCK) {
688                 cFYI(1, "F_WRLCK ");
689                 numLock = 1;
690         } else if (pfLock->fl_type == F_UNLCK) {
691                 cFYI(1, "F_UNLCK");
692                 numUnlock = 1;
693                 /* Check if unlock includes more than
694                 one lock range */
695         } else if (pfLock->fl_type == F_RDLCK) {
696                 cFYI(1, "F_RDLCK");
697                 lockType |= LOCKING_ANDX_SHARED_LOCK;
698                 numLock = 1;
699         } else if (pfLock->fl_type == F_EXLCK) {
700                 cFYI(1, "F_EXLCK");
701                 numLock = 1;
702         } else if (pfLock->fl_type == F_SHLCK) {
703                 cFYI(1, "F_SHLCK");
704                 lockType |= LOCKING_ANDX_SHARED_LOCK;
705                 numLock = 1;
706         } else
707                 cFYI(1, "Unknown type of lock");
708
709         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
710         tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
711         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
712
713         if ((tcon->ses->capabilities & CAP_UNIX) &&
714             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
715             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
716                 posix_locking = 1;
717         /* BB add code here to normalize offset and length to
718         account for negative length which we can not accept over the
719         wire */
720         if (IS_GETLK(cmd)) {
721                 if (posix_locking) {
722                         int posix_lock_type;
723                         if (lockType & LOCKING_ANDX_SHARED_LOCK)
724                                 posix_lock_type = CIFS_RDLCK;
725                         else
726                                 posix_lock_type = CIFS_WRLCK;
727                         rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
728                                         length, pfLock,
729                                         posix_lock_type, wait_flag);
730                         FreeXid(xid);
731                         return rc;
732                 }
733
734                 /* BB we could chain these into one lock request BB */
735                 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
736                                  0, 1, lockType, 0 /* wait flag */, 0);
737                 if (rc == 0) {
738                         rc = CIFSSMBLock(xid, tcon, netfid, length,
739                                          pfLock->fl_start, 1 /* numUnlock */ ,
740                                          0 /* numLock */ , lockType,
741                                          0 /* wait flag */, 0);
742                         pfLock->fl_type = F_UNLCK;
743                         if (rc != 0)
744                                 cERROR(1, "Error unlocking previously locked "
745                                            "range %d during test of lock", rc);
746                         rc = 0;
747
748                 } else {
749                         /* if rc == ERR_SHARING_VIOLATION ? */
750                         rc = 0;
751
752                         if (lockType & LOCKING_ANDX_SHARED_LOCK) {
753                                 pfLock->fl_type = F_WRLCK;
754                         } else {
755                                 rc = CIFSSMBLock(xid, tcon, netfid, length,
756                                         pfLock->fl_start, 0, 1,
757                                         lockType | LOCKING_ANDX_SHARED_LOCK,
758                                         0 /* wait flag */, 0);
759                                 if (rc == 0) {
760                                         rc = CIFSSMBLock(xid, tcon, netfid,
761                                                 length, pfLock->fl_start, 1, 0,
762                                                 lockType |
763                                                 LOCKING_ANDX_SHARED_LOCK,
764                                                 0 /* wait flag */, 0);
765                                         pfLock->fl_type = F_RDLCK;
766                                         if (rc != 0)
767                                                 cERROR(1, "Error unlocking "
768                                                 "previously locked range %d "
769                                                 "during test of lock", rc);
770                                         rc = 0;
771                                 } else {
772                                         pfLock->fl_type = F_WRLCK;
773                                         rc = 0;
774                                 }
775                         }
776                 }
777
778                 FreeXid(xid);
779                 return rc;
780         }
781
782         if (!numLock && !numUnlock) {
783                 /* if no lock or unlock then nothing
784                 to do since we do not know what it is */
785                 FreeXid(xid);
786                 return -EOPNOTSUPP;
787         }
788
789         if (posix_locking) {
790                 int posix_lock_type;
791                 if (lockType & LOCKING_ANDX_SHARED_LOCK)
792                         posix_lock_type = CIFS_RDLCK;
793                 else
794                         posix_lock_type = CIFS_WRLCK;
795
796                 if (numUnlock == 1)
797                         posix_lock_type = CIFS_UNLCK;
798
799                 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
800                                       length, pfLock,
801                                       posix_lock_type, wait_flag);
802         } else {
803                 struct cifsFileInfo *fid = file->private_data;
804
805                 if (numLock) {
806                         rc = CIFSSMBLock(xid, tcon, netfid, length,
807                                          pfLock->fl_start, 0, numLock, lockType,
808                                          wait_flag, 0);
809
810                         if (rc == 0) {
811                                 /* For Windows locks we must store them. */
812                                 rc = store_file_lock(fid, length,
813                                                 pfLock->fl_start, lockType);
814                         }
815                 } else if (numUnlock) {
816                         /* For each stored lock that this unlock overlaps
817                            completely, unlock it. */
818                         int stored_rc = 0;
819                         struct cifsLockInfo *li, *tmp;
820
821                         rc = 0;
822                         mutex_lock(&fid->lock_mutex);
823                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
824                                 if (pfLock->fl_start <= li->offset &&
825                                                 (pfLock->fl_start + length) >=
826                                                 (li->offset + li->length)) {
827                                         stored_rc = CIFSSMBLock(xid, tcon,
828                                                         netfid, li->length,
829                                                         li->offset, 1, 0,
830                                                         li->type, false, 0);
831                                         if (stored_rc)
832                                                 rc = stored_rc;
833                                         else {
834                                                 list_del(&li->llist);
835                                                 kfree(li);
836                                         }
837                                 }
838                         }
839                         mutex_unlock(&fid->lock_mutex);
840                 }
841         }
842
843         if (pfLock->fl_flags & FL_POSIX)
844                 posix_lock_file_wait(file, pfLock);
845         FreeXid(xid);
846         return rc;
847 }
848
849 /* update the file size (if needed) after a write */
850 void
851 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
852                       unsigned int bytes_written)
853 {
854         loff_t end_of_write = offset + bytes_written;
855
856         if (end_of_write > cifsi->server_eof)
857                 cifsi->server_eof = end_of_write;
858 }
859
860 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
861         size_t write_size, loff_t *poffset)
862 {
863         struct inode *inode = file->f_path.dentry->d_inode;
864         int rc = 0;
865         unsigned int bytes_written = 0;
866         unsigned int total_written;
867         struct cifs_sb_info *cifs_sb;
868         struct cifsTconInfo *pTcon;
869         int xid;
870         struct cifsFileInfo *open_file;
871         struct cifsInodeInfo *cifsi = CIFS_I(inode);
872
873         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
874
875         /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
876            *poffset, file->f_path.dentry->d_name.name); */
877
878         if (file->private_data == NULL)
879                 return -EBADF;
880
881         open_file = file->private_data;
882         pTcon = tlink_tcon(open_file->tlink);
883
884         rc = generic_write_checks(file, poffset, &write_size, 0);
885         if (rc)
886                 return rc;
887
888         xid = GetXid();
889
890         for (total_written = 0; write_size > total_written;
891              total_written += bytes_written) {
892                 rc = -EAGAIN;
893                 while (rc == -EAGAIN) {
894                         if (file->private_data == NULL) {
895                                 /* file has been closed on us */
896                                 FreeXid(xid);
897                         /* if we have gotten here we have written some data
898                            and blocked, and the file has been freed on us while
899                            we blocked so return what we managed to write */
900                                 return total_written;
901                         }
902                         if (open_file->invalidHandle) {
903                                 /* we could deadlock if we called
904                                    filemap_fdatawait from here so tell
905                                    reopen_file not to flush data to server
906                                    now */
907                                 rc = cifs_reopen_file(open_file, false);
908                                 if (rc != 0)
909                                         break;
910                         }
911
912                         rc = CIFSSMBWrite(xid, pTcon,
913                                 open_file->netfid,
914                                 min_t(const int, cifs_sb->wsize,
915                                       write_size - total_written),
916                                 *poffset, &bytes_written,
917                                 NULL, write_data + total_written, 0);
918                 }
919                 if (rc || (bytes_written == 0)) {
920                         if (total_written)
921                                 break;
922                         else {
923                                 FreeXid(xid);
924                                 return rc;
925                         }
926                 } else {
927                         cifs_update_eof(cifsi, *poffset, bytes_written);
928                         *poffset += bytes_written;
929                 }
930         }
931
932         cifs_stats_bytes_written(pTcon, total_written);
933
934 /* Do not update local mtime - server will set its actual value on write
935  *      inode->i_ctime = inode->i_mtime =
936  *              current_fs_time(inode->i_sb);*/
937         if (total_written > 0) {
938                 spin_lock(&inode->i_lock);
939                 if (*poffset > inode->i_size)
940                         i_size_write(inode, *poffset);
941                 spin_unlock(&inode->i_lock);
942         }
943         mark_inode_dirty_sync(inode);
944
945         FreeXid(xid);
946         return total_written;
947 }
948
949 static ssize_t cifs_write(struct cifsFileInfo *open_file,
950                           const char *write_data, size_t write_size,
951                           loff_t *poffset)
952 {
953         int rc = 0;
954         unsigned int bytes_written = 0;
955         unsigned int total_written;
956         struct cifs_sb_info *cifs_sb;
957         struct cifsTconInfo *pTcon;
958         int xid;
959         struct dentry *dentry = open_file->dentry;
960         struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
961
962         cifs_sb = CIFS_SB(dentry->d_sb);
963
964         cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
965            *poffset, dentry->d_name.name);
966
967         pTcon = tlink_tcon(open_file->tlink);
968
969         xid = GetXid();
970
971         for (total_written = 0; write_size > total_written;
972              total_written += bytes_written) {
973                 rc = -EAGAIN;
974                 while (rc == -EAGAIN) {
975                         if (open_file->invalidHandle) {
976                                 /* we could deadlock if we called
977                                    filemap_fdatawait from here so tell
978                                    reopen_file not to flush data to
979                                    server now */
980                                 rc = cifs_reopen_file(open_file, false);
981                                 if (rc != 0)
982                                         break;
983                         }
984                         if (sign_zero_copy || (pTcon->ses->server &&
985                                 ((pTcon->ses->server->secMode &
986                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
987                                 == 0))) {
988                                 struct kvec iov[2];
989                                 unsigned int len;
990
991                                 len = min((size_t)cifs_sb->wsize,
992                                           write_size - total_written);
993                                 /* iov[0] is reserved for smb header */
994                                 iov[1].iov_base = (char *)write_data +
995                                                   total_written;
996                                 iov[1].iov_len = len;
997                                 rc = CIFSSMBWrite2(xid, pTcon,
998                                                 open_file->netfid, len,
999                                                 *poffset, &bytes_written,
1000                                                 iov, 1, 0);
1001                         } else
1002                                 rc = CIFSSMBWrite(xid, pTcon,
1003                                          open_file->netfid,
1004                                          min_t(const int, cifs_sb->wsize,
1005                                                write_size - total_written),
1006                                          *poffset, &bytes_written,
1007                                          write_data + total_written,
1008                                          NULL, 0);
1009                 }
1010                 if (rc || (bytes_written == 0)) {
1011                         if (total_written)
1012                                 break;
1013                         else {
1014                                 FreeXid(xid);
1015                                 return rc;
1016                         }
1017                 } else {
1018                         cifs_update_eof(cifsi, *poffset, bytes_written);
1019                         *poffset += bytes_written;
1020                 }
1021         }
1022
1023         cifs_stats_bytes_written(pTcon, total_written);
1024
1025         if (total_written > 0) {
1026                 spin_lock(&dentry->d_inode->i_lock);
1027                 if (*poffset > dentry->d_inode->i_size)
1028                         i_size_write(dentry->d_inode, *poffset);
1029                 spin_unlock(&dentry->d_inode->i_lock);
1030         }
1031         mark_inode_dirty_sync(dentry->d_inode);
1032         FreeXid(xid);
1033         return total_written;
1034 }
1035
1036 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1037                                         bool fsuid_only)
1038 {
1039         struct cifsFileInfo *open_file = NULL;
1040         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1041
1042         /* only filter by fsuid on multiuser mounts */
1043         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1044                 fsuid_only = false;
1045
1046         spin_lock(&cifs_file_list_lock);
1047         /* we could simply get the first_list_entry since write-only entries
1048            are always at the end of the list but since the first entry might
1049            have a close pending, we go through the whole list */
1050         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1051                 if (fsuid_only && open_file->uid != current_fsuid())
1052                         continue;
1053                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1054                         if (!open_file->invalidHandle) {
1055                                 /* found a good file */
1056                                 /* lock it so it will not be closed on us */
1057                                 cifsFileInfo_get(open_file);
1058                                 spin_unlock(&cifs_file_list_lock);
1059                                 return open_file;
1060                         } /* else might as well continue, and look for
1061                              another, or simply have the caller reopen it
1062                              again rather than trying to fix this handle */
1063                 } else /* write only file */
1064                         break; /* write only files are last so must be done */
1065         }
1066         spin_unlock(&cifs_file_list_lock);
1067         return NULL;
1068 }
1069
1070 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1071                                         bool fsuid_only)
1072 {
1073         struct cifsFileInfo *open_file;
1074         struct cifs_sb_info *cifs_sb;
1075         bool any_available = false;
1076         int rc;
1077
1078         /* Having a null inode here (because mapping->host was set to zero by
1079         the VFS or MM) should not happen but we had reports of on oops (due to
1080         it being zero) during stress testcases so we need to check for it */
1081
1082         if (cifs_inode == NULL) {
1083                 cERROR(1, "Null inode passed to cifs_writeable_file");
1084                 dump_stack();
1085                 return NULL;
1086         }
1087
1088         cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1089
1090         /* only filter by fsuid on multiuser mounts */
1091         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1092                 fsuid_only = false;
1093
1094         spin_lock(&cifs_file_list_lock);
1095 refind_writable:
1096         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1097                 if (!any_available && open_file->pid != current->tgid)
1098                         continue;
1099                 if (fsuid_only && open_file->uid != current_fsuid())
1100                         continue;
1101                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1102                         cifsFileInfo_get(open_file);
1103
1104                         if (!open_file->invalidHandle) {
1105                                 /* found a good writable file */
1106                                 spin_unlock(&cifs_file_list_lock);
1107                                 return open_file;
1108                         }
1109
1110                         spin_unlock(&cifs_file_list_lock);
1111
1112                         /* Had to unlock since following call can block */
1113                         rc = cifs_reopen_file(open_file, false);
1114                         if (!rc)
1115                                 return open_file;
1116
1117                         /* if it fails, try another handle if possible */
1118                         cFYI(1, "wp failed on reopen file");
1119                         cifsFileInfo_put(open_file);
1120
1121                         spin_lock(&cifs_file_list_lock);
1122
1123                         /* else we simply continue to the next entry. Thus
1124                            we do not loop on reopen errors.  If we
1125                            can not reopen the file, for example if we
1126                            reconnected to a server with another client
1127                            racing to delete or lock the file we would not
1128                            make progress if we restarted before the beginning
1129                            of the loop here. */
1130                 }
1131         }
1132         /* couldn't find useable FH with same pid, try any available */
1133         if (!any_available) {
1134                 any_available = true;
1135                 goto refind_writable;
1136         }
1137         spin_unlock(&cifs_file_list_lock);
1138         return NULL;
1139 }
1140
1141 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1142 {
1143         struct address_space *mapping = page->mapping;
1144         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1145         char *write_data;
1146         int rc = -EFAULT;
1147         int bytes_written = 0;
1148         struct inode *inode;
1149         struct cifsFileInfo *open_file;
1150
1151         if (!mapping || !mapping->host)
1152                 return -EFAULT;
1153
1154         inode = page->mapping->host;
1155
1156         offset += (loff_t)from;
1157         write_data = kmap(page);
1158         write_data += from;
1159
1160         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1161                 kunmap(page);
1162                 return -EIO;
1163         }
1164
1165         /* racing with truncate? */
1166         if (offset > mapping->host->i_size) {
1167                 kunmap(page);
1168                 return 0; /* don't care */
1169         }
1170
1171         /* check to make sure that we are not extending the file */
1172         if (mapping->host->i_size - offset < (loff_t)to)
1173                 to = (unsigned)(mapping->host->i_size - offset);
1174
1175         open_file = find_writable_file(CIFS_I(mapping->host), false);
1176         if (open_file) {
1177                 bytes_written = cifs_write(open_file, write_data,
1178                                            to - from, &offset);
1179                 cifsFileInfo_put(open_file);
1180                 /* Does mm or vfs already set times? */
1181                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1182                 if ((bytes_written > 0) && (offset))
1183                         rc = 0;
1184                 else if (bytes_written < 0)
1185                         rc = bytes_written;
1186         } else {
1187                 cFYI(1, "No writeable filehandles for inode");
1188                 rc = -EIO;
1189         }
1190
1191         kunmap(page);
1192         return rc;
1193 }
1194
1195 static int cifs_writepages(struct address_space *mapping,
1196                            struct writeback_control *wbc)
1197 {
1198         unsigned int bytes_to_write;
1199         unsigned int bytes_written;
1200         struct cifs_sb_info *cifs_sb;
1201         int done = 0;
1202         pgoff_t end;
1203         pgoff_t index;
1204         int range_whole = 0;
1205         struct kvec *iov;
1206         int len;
1207         int n_iov = 0;
1208         pgoff_t next;
1209         int nr_pages;
1210         __u64 offset = 0;
1211         struct cifsFileInfo *open_file;
1212         struct cifsTconInfo *tcon;
1213         struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1214         struct page *page;
1215         struct pagevec pvec;
1216         int rc = 0;
1217         int scanned = 0;
1218         int xid;
1219
1220         cifs_sb = CIFS_SB(mapping->host->i_sb);
1221
1222         /*
1223          * If wsize is smaller that the page cache size, default to writing
1224          * one page at a time via cifs_writepage
1225          */
1226         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1227                 return generic_writepages(mapping, wbc);
1228
1229         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1230         if (iov == NULL)
1231                 return generic_writepages(mapping, wbc);
1232
1233         /*
1234          * if there's no open file, then this is likely to fail too,
1235          * but it'll at least handle the return. Maybe it should be
1236          * a BUG() instead?
1237          */
1238         open_file = find_writable_file(CIFS_I(mapping->host), false);
1239         if (!open_file) {
1240                 kfree(iov);
1241                 return generic_writepages(mapping, wbc);
1242         }
1243
1244         tcon = tlink_tcon(open_file->tlink);
1245         if (!sign_zero_copy && tcon->ses->server->secMode &
1246                         (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1247                 cifsFileInfo_put(open_file);
1248                 kfree(iov);
1249                 return generic_writepages(mapping, wbc);
1250         }
1251         cifsFileInfo_put(open_file);
1252
1253         xid = GetXid();
1254
1255         pagevec_init(&pvec, 0);
1256         if (wbc->range_cyclic) {
1257                 index = mapping->writeback_index; /* Start from prev offset */
1258                 end = -1;
1259         } else {
1260                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1261                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1262                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1263                         range_whole = 1;
1264                 scanned = 1;
1265         }
1266 retry:
1267         while (!done && (index <= end) &&
1268                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1269                         PAGECACHE_TAG_DIRTY,
1270                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1271                 int first;
1272                 unsigned int i;
1273
1274                 first = -1;
1275                 next = 0;
1276                 n_iov = 0;
1277                 bytes_to_write = 0;
1278
1279                 for (i = 0; i < nr_pages; i++) {
1280                         page = pvec.pages[i];
1281                         /*
1282                          * At this point we hold neither mapping->tree_lock nor
1283                          * lock on the page itself: the page may be truncated or
1284                          * invalidated (changing page->mapping to NULL), or even
1285                          * swizzled back from swapper_space to tmpfs file
1286                          * mapping
1287                          */
1288
1289                         if (first < 0)
1290                                 lock_page(page);
1291                         else if (!trylock_page(page))
1292                                 break;
1293
1294                         if (unlikely(page->mapping != mapping)) {
1295                                 unlock_page(page);
1296                                 break;
1297                         }
1298
1299                         if (!wbc->range_cyclic && page->index > end) {
1300                                 done = 1;
1301                                 unlock_page(page);
1302                                 break;
1303                         }
1304
1305                         if (next && (page->index != next)) {
1306                                 /* Not next consecutive page */
1307                                 unlock_page(page);
1308                                 break;
1309                         }
1310
1311                         if (wbc->sync_mode != WB_SYNC_NONE)
1312                                 wait_on_page_writeback(page);
1313
1314                         if (PageWriteback(page) ||
1315                                         !clear_page_dirty_for_io(page)) {
1316                                 unlock_page(page);
1317                                 break;
1318                         }
1319
1320                         /*
1321                          * This actually clears the dirty bit in the radix tree.
1322                          * See cifs_writepage() for more commentary.
1323                          */
1324                         set_page_writeback(page);
1325
1326                         if (page_offset(page) >= mapping->host->i_size) {
1327                                 done = 1;
1328                                 unlock_page(page);
1329                                 end_page_writeback(page);
1330                                 break;
1331                         }
1332
1333                         /*
1334                          * BB can we get rid of this?  pages are held by pvec
1335                          */
1336                         page_cache_get(page);
1337
1338                         len = min(mapping->host->i_size - page_offset(page),
1339                                   (loff_t)PAGE_CACHE_SIZE);
1340
1341                         /* reserve iov[0] for the smb header */
1342                         n_iov++;
1343                         iov[n_iov].iov_base = kmap(page);
1344                         iov[n_iov].iov_len = len;
1345                         bytes_to_write += len;
1346
1347                         if (first < 0) {
1348                                 first = i;
1349                                 offset = page_offset(page);
1350                         }
1351                         next = page->index + 1;
1352                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1353                                 break;
1354                 }
1355                 if (n_iov) {
1356 retry_write:
1357                         open_file = find_writable_file(CIFS_I(mapping->host),
1358                                                         false);
1359                         if (!open_file) {
1360                                 cERROR(1, "No writable handles for inode");
1361                                 rc = -EBADF;
1362                         } else {
1363                                 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1364                                                    bytes_to_write, offset,
1365                                                    &bytes_written, iov, n_iov,
1366                                                    0);
1367                                 cifsFileInfo_put(open_file);
1368                         }
1369
1370                         cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
1371
1372                         /*
1373                          * For now, treat a short write as if nothing got
1374                          * written. A zero length write however indicates
1375                          * ENOSPC or EFBIG. We have no way to know which
1376                          * though, so call it ENOSPC for now. EFBIG would
1377                          * get translated to AS_EIO anyway.
1378                          *
1379                          * FIXME: make it take into account the data that did
1380                          *        get written
1381                          */
1382                         if (rc == 0) {
1383                                 if (bytes_written == 0)
1384                                         rc = -ENOSPC;
1385                                 else if (bytes_written < bytes_to_write)
1386                                         rc = -EAGAIN;
1387                         }
1388
1389                         /* retry on data-integrity flush */
1390                         if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
1391                                 goto retry_write;
1392
1393                         /* fix the stats and EOF */
1394                         if (bytes_written > 0) {
1395                                 cifs_stats_bytes_written(tcon, bytes_written);
1396                                 cifs_update_eof(cifsi, offset, bytes_written);
1397                         }
1398
1399                         for (i = 0; i < n_iov; i++) {
1400                                 page = pvec.pages[first + i];
1401                                 /* on retryable write error, redirty page */
1402                                 if (rc == -EAGAIN)
1403                                         redirty_page_for_writepage(wbc, page);
1404                                 else if (rc != 0)
1405                                         SetPageError(page);
1406                                 kunmap(page);
1407                                 unlock_page(page);
1408                                 end_page_writeback(page);
1409                                 page_cache_release(page);
1410                         }
1411
1412                         if (rc != -EAGAIN)
1413                                 mapping_set_error(mapping, rc);
1414                         else
1415                                 rc = 0;
1416
1417                         if ((wbc->nr_to_write -= n_iov) <= 0)
1418                                 done = 1;
1419                         index = next;
1420                 } else
1421                         /* Need to re-find the pages we skipped */
1422                         index = pvec.pages[0]->index + 1;
1423
1424                 pagevec_release(&pvec);
1425         }
1426         if (!scanned && !done) {
1427                 /*
1428                  * We hit the last page and there is more work to be done: wrap
1429                  * back to the start of the file
1430                  */
1431                 scanned = 1;
1432                 index = 0;
1433                 goto retry;
1434         }
1435         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1436                 mapping->writeback_index = index;
1437
1438         FreeXid(xid);
1439         kfree(iov);
1440         return rc;
1441 }
1442
1443 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1444 {
1445         int rc = -EFAULT;
1446         int xid;
1447
1448         xid = GetXid();
1449 /* BB add check for wbc flags */
1450         page_cache_get(page);
1451         if (!PageUptodate(page))
1452                 cFYI(1, "ppw - page not up to date");
1453
1454         /*
1455          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1456          *
1457          * A writepage() implementation always needs to do either this,
1458          * or re-dirty the page with "redirty_page_for_writepage()" in
1459          * the case of a failure.
1460          *
1461          * Just unlocking the page will cause the radix tree tag-bits
1462          * to fail to update with the state of the page correctly.
1463          */
1464         set_page_writeback(page);
1465         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1466         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1467         unlock_page(page);
1468         end_page_writeback(page);
1469         page_cache_release(page);
1470         FreeXid(xid);
1471         return rc;
1472 }
1473
1474 static int cifs_write_end(struct file *file, struct address_space *mapping,
1475                         loff_t pos, unsigned len, unsigned copied,
1476                         struct page *page, void *fsdata)
1477 {
1478         int rc;
1479         struct inode *inode = mapping->host;
1480
1481         cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1482                  page, pos, copied);
1483
1484         if (PageChecked(page)) {
1485                 if (copied == len)
1486                         SetPageUptodate(page);
1487                 ClearPageChecked(page);
1488         } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1489                 SetPageUptodate(page);
1490
1491         if (!PageUptodate(page)) {
1492                 char *page_data;
1493                 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1494                 int xid;
1495
1496                 xid = GetXid();
1497                 /* this is probably better than directly calling
1498                    partialpage_write since in this function the file handle is
1499                    known which we might as well leverage */
1500                 /* BB check if anything else missing out of ppw
1501                    such as updating last write time */
1502                 page_data = kmap(page);
1503                 rc = cifs_write(file->private_data, page_data + offset,
1504                                 copied, &pos);
1505                 /* if (rc < 0) should we set writebehind rc? */
1506                 kunmap(page);
1507
1508                 FreeXid(xid);
1509         } else {
1510                 rc = copied;
1511                 pos += copied;
1512                 set_page_dirty(page);
1513         }
1514
1515         if (rc > 0) {
1516                 spin_lock(&inode->i_lock);
1517                 if (pos > inode->i_size)
1518                         i_size_write(inode, pos);
1519                 spin_unlock(&inode->i_lock);
1520         }
1521
1522         unlock_page(page);
1523         page_cache_release(page);
1524
1525         return rc;
1526 }
1527
1528 int cifs_strict_fsync(struct file *file, int datasync)
1529 {
1530         int xid;
1531         int rc = 0;
1532         struct cifsTconInfo *tcon;
1533         struct cifsFileInfo *smbfile = file->private_data;
1534         struct inode *inode = file->f_path.dentry->d_inode;
1535         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1536
1537         xid = GetXid();
1538
1539         cFYI(1, "Sync file - name: %s datasync: 0x%x",
1540                 file->f_path.dentry->d_name.name, datasync);
1541
1542         if (!CIFS_I(inode)->clientCanCacheRead)
1543                 cifs_invalidate_mapping(inode);
1544
1545         tcon = tlink_tcon(smbfile->tlink);
1546         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1547                 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1548
1549         FreeXid(xid);
1550         return rc;
1551 }
1552
1553 int cifs_fsync(struct file *file, int datasync)
1554 {
1555         int xid;
1556         int rc = 0;
1557         struct cifsTconInfo *tcon;
1558         struct cifsFileInfo *smbfile = file->private_data;
1559         struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1560
1561         xid = GetXid();
1562
1563         cFYI(1, "Sync file - name: %s datasync: 0x%x",
1564                 file->f_path.dentry->d_name.name, datasync);
1565
1566         tcon = tlink_tcon(smbfile->tlink);
1567         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1568                 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1569
1570         FreeXid(xid);
1571         return rc;
1572 }
1573
1574 /*
1575  * As file closes, flush all cached write data for this inode checking
1576  * for write behind errors.
1577  */
1578 int cifs_flush(struct file *file, fl_owner_t id)
1579 {
1580         struct inode *inode = file->f_path.dentry->d_inode;
1581         int rc = 0;
1582
1583         if (file->f_mode & FMODE_WRITE)
1584                 rc = filemap_write_and_wait(inode->i_mapping);
1585
1586         cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1587
1588         return rc;
1589 }
1590
1591 static int
1592 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1593 {
1594         int rc = 0;
1595         unsigned long i;
1596
1597         for (i = 0; i < num_pages; i++) {
1598                 pages[i] = alloc_page(__GFP_HIGHMEM);
1599                 if (!pages[i]) {
1600                         /*
1601                          * save number of pages we have already allocated and
1602                          * return with ENOMEM error
1603                          */
1604                         num_pages = i;
1605                         rc = -ENOMEM;
1606                         goto error;
1607                 }
1608         }
1609
1610         return rc;
1611
1612 error:
1613         for (i = 0; i < num_pages; i++)
1614                 put_page(pages[i]);
1615         return rc;
1616 }
1617
1618 static inline
1619 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1620 {
1621         size_t num_pages;
1622         size_t clen;
1623
1624         clen = min_t(const size_t, len, wsize);
1625         num_pages = clen / PAGE_CACHE_SIZE;
1626         if (clen % PAGE_CACHE_SIZE)
1627                 num_pages++;
1628
1629         if (cur_len)
1630                 *cur_len = clen;
1631
1632         return num_pages;
1633 }
1634
1635 static ssize_t
1636 cifs_iovec_write(struct file *file, const struct iovec *iov,
1637                  unsigned long nr_segs, loff_t *poffset)
1638 {
1639         unsigned int written;
1640         unsigned long num_pages, npages, i;
1641         size_t copied, len, cur_len;
1642         ssize_t total_written = 0;
1643         struct kvec *to_send;
1644         struct page **pages;
1645         struct iov_iter it;
1646         struct inode *inode;
1647         struct cifsFileInfo *open_file;
1648         struct cifsTconInfo *pTcon;
1649         struct cifs_sb_info *cifs_sb;
1650         int xid, rc;
1651
1652         len = iov_length(iov, nr_segs);
1653         if (!len)
1654                 return 0;
1655
1656         rc = generic_write_checks(file, poffset, &len, 0);
1657         if (rc)
1658                 return rc;
1659
1660         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1661         num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1662
1663         pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1664         if (!pages)
1665                 return -ENOMEM;
1666
1667         to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1668         if (!to_send) {
1669                 kfree(pages);
1670                 return -ENOMEM;
1671         }
1672
1673         rc = cifs_write_allocate_pages(pages, num_pages);
1674         if (rc) {
1675                 kfree(pages);
1676                 kfree(to_send);
1677                 return rc;
1678         }
1679
1680         xid = GetXid();
1681         open_file = file->private_data;
1682         pTcon = tlink_tcon(open_file->tlink);
1683         inode = file->f_path.dentry->d_inode;
1684
1685         iov_iter_init(&it, iov, nr_segs, len, 0);
1686         npages = num_pages;
1687
1688         do {
1689                 size_t save_len = cur_len;
1690                 for (i = 0; i < npages; i++) {
1691                         copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1692                         copied = iov_iter_copy_from_user(pages[i], &it, 0,
1693                                                          copied);
1694                         cur_len -= copied;
1695                         iov_iter_advance(&it, copied);
1696                         to_send[i+1].iov_base = kmap(pages[i]);
1697                         to_send[i+1].iov_len = copied;
1698                 }
1699
1700                 cur_len = save_len - cur_len;
1701
1702                 do {
1703                         if (open_file->invalidHandle) {
1704                                 rc = cifs_reopen_file(open_file, false);
1705                                 if (rc != 0)
1706                                         break;
1707                         }
1708                         rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
1709                                            cur_len, *poffset, &written,
1710                                            to_send, npages, 0);
1711                 } while (rc == -EAGAIN);
1712
1713                 for (i = 0; i < npages; i++)
1714                         kunmap(pages[i]);
1715
1716                 if (written) {
1717                         len -= written;
1718                         total_written += written;
1719                         cifs_update_eof(CIFS_I(inode), *poffset, written);
1720                         *poffset += written;
1721                 } else if (rc < 0) {
1722                         if (!total_written)
1723                                 total_written = rc;
1724                         break;
1725                 }
1726
1727                 /* get length and number of kvecs of the next write */
1728                 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1729         } while (len > 0);
1730
1731         if (total_written > 0) {
1732                 spin_lock(&inode->i_lock);
1733                 if (*poffset > inode->i_size)
1734                         i_size_write(inode, *poffset);
1735                 spin_unlock(&inode->i_lock);
1736         }
1737
1738         cifs_stats_bytes_written(pTcon, total_written);
1739         mark_inode_dirty_sync(inode);
1740
1741         for (i = 0; i < num_pages; i++)
1742                 put_page(pages[i]);
1743         kfree(to_send);
1744         kfree(pages);
1745         FreeXid(xid);
1746         return total_written;
1747 }
1748
1749 static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1750                                 unsigned long nr_segs, loff_t pos)
1751 {
1752         ssize_t written;
1753         struct inode *inode;
1754
1755         inode = iocb->ki_filp->f_path.dentry->d_inode;
1756
1757         /*
1758          * BB - optimize the way when signing is disabled. We can drop this
1759          * extra memory-to-memory copying and use iovec buffers for constructing
1760          * write request.
1761          */
1762
1763         written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1764         if (written > 0) {
1765                 CIFS_I(inode)->invalid_mapping = true;
1766                 iocb->ki_pos = pos;
1767         }
1768
1769         return written;
1770 }
1771
1772 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1773                            unsigned long nr_segs, loff_t pos)
1774 {
1775         struct inode *inode;
1776
1777         inode = iocb->ki_filp->f_path.dentry->d_inode;
1778
1779         if (CIFS_I(inode)->clientCanCacheAll)
1780                 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1781
1782         /*
1783          * In strict cache mode we need to write the data to the server exactly
1784          * from the pos to pos+len-1 rather than flush all affected pages
1785          * because it may cause a error with mandatory locks on these pages but
1786          * not on the region from pos to ppos+len-1.
1787          */
1788
1789         return cifs_user_writev(iocb, iov, nr_segs, pos);
1790 }
1791
1792 static ssize_t
1793 cifs_iovec_read(struct file *file, const struct iovec *iov,
1794                  unsigned long nr_segs, loff_t *poffset)
1795 {
1796         int rc;
1797         int xid;
1798         ssize_t total_read;
1799         unsigned int bytes_read = 0;
1800         size_t len, cur_len;
1801         int iov_offset = 0;
1802         struct cifs_sb_info *cifs_sb;
1803         struct cifsTconInfo *pTcon;
1804         struct cifsFileInfo *open_file;
1805         struct smb_com_read_rsp *pSMBr;
1806         char *read_data;
1807
1808         if (!nr_segs)
1809                 return 0;
1810
1811         len = iov_length(iov, nr_segs);
1812         if (!len)
1813                 return 0;
1814
1815         xid = GetXid();
1816         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1817
1818         open_file = file->private_data;
1819         pTcon = tlink_tcon(open_file->tlink);
1820
1821         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1822                 cFYI(1, "attempting read on write only file instance");
1823
1824         for (total_read = 0; total_read < len; total_read += bytes_read) {
1825                 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1826                 rc = -EAGAIN;
1827                 read_data = NULL;
1828
1829                 while (rc == -EAGAIN) {
1830                         int buf_type = CIFS_NO_BUFFER;
1831                         if (open_file->invalidHandle) {
1832                                 rc = cifs_reopen_file(open_file, true);
1833                                 if (rc != 0)
1834                                         break;
1835                         }
1836                         rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
1837                                          cur_len, *poffset, &bytes_read,
1838                                          &read_data, &buf_type);
1839                         pSMBr = (struct smb_com_read_rsp *)read_data;
1840                         if (read_data) {
1841                                 char *data_offset = read_data + 4 +
1842                                                 le16_to_cpu(pSMBr->DataOffset);
1843                                 if (memcpy_toiovecend(iov, data_offset,
1844                                                       iov_offset, bytes_read))
1845                                         rc = -EFAULT;
1846                                 if (buf_type == CIFS_SMALL_BUFFER)
1847                                         cifs_small_buf_release(read_data);
1848                                 else if (buf_type == CIFS_LARGE_BUFFER)
1849                                         cifs_buf_release(read_data);
1850                                 read_data = NULL;
1851                                 iov_offset += bytes_read;
1852                         }
1853                 }
1854
1855                 if (rc || (bytes_read == 0)) {
1856                         if (total_read) {
1857                                 break;
1858                         } else {
1859                                 FreeXid(xid);
1860                                 return rc;
1861                         }
1862                 } else {
1863                         cifs_stats_bytes_read(pTcon, bytes_read);
1864                         *poffset += bytes_read;
1865                 }
1866         }
1867
1868         FreeXid(xid);
1869         return total_read;
1870 }
1871
1872 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1873                        size_t read_size, loff_t *poffset)
1874 {
1875         struct iovec iov;
1876         iov.iov_base = read_data;
1877         iov.iov_len = read_size;
1878
1879         return cifs_iovec_read(file, &iov, 1, poffset);
1880 }
1881
1882 static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1883                                unsigned long nr_segs, loff_t pos)
1884 {
1885         ssize_t read;
1886
1887         read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1888         if (read > 0)
1889                 iocb->ki_pos = pos;
1890
1891         return read;
1892 }
1893
1894 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1895                           unsigned long nr_segs, loff_t pos)
1896 {
1897         struct inode *inode;
1898
1899         inode = iocb->ki_filp->f_path.dentry->d_inode;
1900
1901         if (CIFS_I(inode)->clientCanCacheRead)
1902                 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1903
1904         /*
1905          * In strict cache mode we need to read from the server all the time
1906          * if we don't have level II oplock because the server can delay mtime
1907          * change - so we can't make a decision about inode invalidating.
1908          * And we can also fail with pagereading if there are mandatory locks
1909          * on pages affected by this read but not on the region from pos to
1910          * pos+len-1.
1911          */
1912
1913         return cifs_user_readv(iocb, iov, nr_segs, pos);
1914 }
1915
1916 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1917                          loff_t *poffset)
1918 {
1919         int rc = -EACCES;
1920         unsigned int bytes_read = 0;
1921         unsigned int total_read;
1922         unsigned int current_read_size;
1923         struct cifs_sb_info *cifs_sb;
1924         struct cifsTconInfo *pTcon;
1925         int xid;
1926         char *current_offset;
1927         struct cifsFileInfo *open_file;
1928         int buf_type = CIFS_NO_BUFFER;
1929
1930         xid = GetXid();
1931         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1932
1933         if (file->private_data == NULL) {
1934                 rc = -EBADF;
1935                 FreeXid(xid);
1936                 return rc;
1937         }
1938         open_file = file->private_data;
1939         pTcon = tlink_tcon(open_file->tlink);
1940
1941         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1942                 cFYI(1, "attempting read on write only file instance");
1943
1944         for (total_read = 0, current_offset = read_data;
1945              read_size > total_read;
1946              total_read += bytes_read, current_offset += bytes_read) {
1947                 current_read_size = min_t(const int, read_size - total_read,
1948                                           cifs_sb->rsize);
1949                 /* For windows me and 9x we do not want to request more
1950                 than it negotiated since it will refuse the read then */
1951                 if ((pTcon->ses) &&
1952                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1953                         current_read_size = min_t(const int, current_read_size,
1954                                         pTcon->ses->server->maxBuf - 128);
1955                 }
1956                 rc = -EAGAIN;
1957                 while (rc == -EAGAIN) {
1958                         if (open_file->invalidHandle) {
1959                                 rc = cifs_reopen_file(open_file, true);
1960                                 if (rc != 0)
1961                                         break;
1962                         }
1963                         rc = CIFSSMBRead(xid, pTcon,
1964                                          open_file->netfid,
1965                                          current_read_size, *poffset,
1966                                          &bytes_read, &current_offset,
1967                                          &buf_type);
1968                 }
1969                 if (rc || (bytes_read == 0)) {
1970                         if (total_read) {
1971                                 break;
1972                         } else {
1973                                 FreeXid(xid);
1974                                 return rc;
1975                         }
1976                 } else {
1977                         cifs_stats_bytes_read(pTcon, total_read);
1978                         *poffset += bytes_read;
1979                 }
1980         }
1981         FreeXid(xid);
1982         return total_read;
1983 }
1984
1985 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1986 {
1987         int rc, xid;
1988         struct inode *inode = file->f_path.dentry->d_inode;
1989
1990         xid = GetXid();
1991
1992         if (!CIFS_I(inode)->clientCanCacheRead)
1993                 cifs_invalidate_mapping(inode);
1994
1995         rc = generic_file_mmap(file, vma);
1996         FreeXid(xid);
1997         return rc;
1998 }
1999
2000 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2001 {
2002         int rc, xid;
2003
2004         xid = GetXid();
2005         rc = cifs_revalidate_file(file);
2006         if (rc) {
2007                 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2008                 FreeXid(xid);
2009                 return rc;
2010         }
2011         rc = generic_file_mmap(file, vma);
2012         FreeXid(xid);
2013         return rc;
2014 }
2015
2016
2017 static void cifs_copy_cache_pages(struct address_space *mapping,
2018         struct list_head *pages, int bytes_read, char *data)
2019 {
2020         struct page *page;
2021         char *target;
2022
2023         while (bytes_read > 0) {
2024                 if (list_empty(pages))
2025                         break;
2026
2027                 page = list_entry(pages->prev, struct page, lru);
2028                 list_del(&page->lru);
2029
2030                 if (add_to_page_cache_lru(page, mapping, page->index,
2031                                       GFP_KERNEL)) {
2032                         page_cache_release(page);
2033                         cFYI(1, "Add page cache failed");
2034                         data += PAGE_CACHE_SIZE;
2035                         bytes_read -= PAGE_CACHE_SIZE;
2036                         continue;
2037                 }
2038                 page_cache_release(page);
2039
2040                 target = kmap_atomic(page, KM_USER0);
2041
2042                 if (PAGE_CACHE_SIZE > bytes_read) {
2043                         memcpy(target, data, bytes_read);
2044                         /* zero the tail end of this partial page */
2045                         memset(target + bytes_read, 0,
2046                                PAGE_CACHE_SIZE - bytes_read);
2047                         bytes_read = 0;
2048                 } else {
2049                         memcpy(target, data, PAGE_CACHE_SIZE);
2050                         bytes_read -= PAGE_CACHE_SIZE;
2051                 }
2052                 kunmap_atomic(target, KM_USER0);
2053
2054                 flush_dcache_page(page);
2055                 SetPageUptodate(page);
2056                 unlock_page(page);
2057                 data += PAGE_CACHE_SIZE;
2058
2059                 /* add page to FS-Cache */
2060                 cifs_readpage_to_fscache(mapping->host, page);
2061         }
2062         return;
2063 }
2064
2065 static int cifs_readpages(struct file *file, struct address_space *mapping,
2066         struct list_head *page_list, unsigned num_pages)
2067 {
2068         int rc = -EACCES;
2069         int xid;
2070         loff_t offset;
2071         struct page *page;
2072         struct cifs_sb_info *cifs_sb;
2073         struct cifsTconInfo *pTcon;
2074         unsigned int bytes_read = 0;
2075         unsigned int read_size, i;
2076         char *smb_read_data = NULL;
2077         struct smb_com_read_rsp *pSMBr;
2078         struct cifsFileInfo *open_file;
2079         int buf_type = CIFS_NO_BUFFER;
2080
2081         xid = GetXid();
2082         if (file->private_data == NULL) {
2083                 rc = -EBADF;
2084                 FreeXid(xid);
2085                 return rc;
2086         }
2087         open_file = file->private_data;
2088         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2089         pTcon = tlink_tcon(open_file->tlink);
2090
2091         /*
2092          * Reads as many pages as possible from fscache. Returns -ENOBUFS
2093          * immediately if the cookie is negative
2094          */
2095         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2096                                          &num_pages);
2097         if (rc == 0)
2098                 goto read_complete;
2099
2100         cFYI(DBG2, "rpages: num pages %d", num_pages);
2101         for (i = 0; i < num_pages; ) {
2102                 unsigned contig_pages;
2103                 struct page *tmp_page;
2104                 unsigned long expected_index;
2105
2106                 if (list_empty(page_list))
2107                         break;
2108
2109                 page = list_entry(page_list->prev, struct page, lru);
2110                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2111
2112                 /* count adjacent pages that we will read into */
2113                 contig_pages = 0;
2114                 expected_index =
2115                         list_entry(page_list->prev, struct page, lru)->index;
2116                 list_for_each_entry_reverse(tmp_page, page_list, lru) {
2117                         if (tmp_page->index == expected_index) {
2118                                 contig_pages++;
2119                                 expected_index++;
2120                         } else
2121                                 break;
2122                 }
2123                 if (contig_pages + i >  num_pages)
2124                         contig_pages = num_pages - i;
2125
2126                 /* for reads over a certain size could initiate async
2127                    read ahead */
2128
2129                 read_size = contig_pages * PAGE_CACHE_SIZE;
2130                 /* Read size needs to be in multiples of one page */
2131                 read_size = min_t(const unsigned int, read_size,
2132                                   cifs_sb->rsize & PAGE_CACHE_MASK);
2133                 cFYI(DBG2, "rpages: read size 0x%x  contiguous pages %d",
2134                                 read_size, contig_pages);
2135                 rc = -EAGAIN;
2136                 while (rc == -EAGAIN) {
2137                         if (open_file->invalidHandle) {
2138                                 rc = cifs_reopen_file(open_file, true);
2139                                 if (rc != 0)
2140                                         break;
2141                         }
2142
2143                         rc = CIFSSMBRead(xid, pTcon,
2144                                          open_file->netfid,
2145                                          read_size, offset,
2146                                          &bytes_read, &smb_read_data,
2147                                          &buf_type);
2148                         /* BB more RC checks ? */
2149                         if (rc == -EAGAIN) {
2150                                 if (smb_read_data) {
2151                                         if (buf_type == CIFS_SMALL_BUFFER)
2152                                                 cifs_small_buf_release(smb_read_data);
2153                                         else if (buf_type == CIFS_LARGE_BUFFER)
2154                                                 cifs_buf_release(smb_read_data);
2155                                         smb_read_data = NULL;
2156                                 }
2157                         }
2158                 }
2159                 if ((rc < 0) || (smb_read_data == NULL)) {
2160                         cFYI(1, "Read error in readpages: %d", rc);
2161                         break;
2162                 } else if (bytes_read > 0) {
2163                         task_io_account_read(bytes_read);
2164                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2165                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
2166                                 smb_read_data + 4 /* RFC1001 hdr */ +
2167                                 le16_to_cpu(pSMBr->DataOffset));
2168
2169                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
2170                         cifs_stats_bytes_read(pTcon, bytes_read);
2171                         if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2172                                 i++; /* account for partial page */
2173
2174                                 /* server copy of file can have smaller size
2175                                    than client */
2176                                 /* BB do we need to verify this common case ?
2177                                    this case is ok - if we are at server EOF
2178                                    we will hit it on next read */
2179
2180                                 /* break; */
2181                         }
2182                 } else {
2183                         cFYI(1, "No bytes read (%d) at offset %lld . "
2184                                 "Cleaning remaining pages from readahead list",
2185                                 bytes_read, offset);
2186                         /* BB turn off caching and do new lookup on
2187                            file size at server? */
2188                         break;
2189                 }
2190                 if (smb_read_data) {
2191                         if (buf_type == CIFS_SMALL_BUFFER)
2192                                 cifs_small_buf_release(smb_read_data);
2193                         else if (buf_type == CIFS_LARGE_BUFFER)
2194                                 cifs_buf_release(smb_read_data);
2195                         smb_read_data = NULL;
2196                 }
2197                 bytes_read = 0;
2198         }
2199
2200 /* need to free smb_read_data buf before exit */
2201         if (smb_read_data) {
2202                 if (buf_type == CIFS_SMALL_BUFFER)
2203                         cifs_small_buf_release(smb_read_data);
2204                 else if (buf_type == CIFS_LARGE_BUFFER)
2205                         cifs_buf_release(smb_read_data);
2206                 smb_read_data = NULL;
2207         }
2208
2209 read_complete:
2210         FreeXid(xid);
2211         return rc;
2212 }
2213
2214 static int cifs_readpage_worker(struct file *file, struct page *page,
2215         loff_t *poffset)
2216 {
2217         char *read_data;
2218         int rc;
2219
2220         /* Is the page cached? */
2221         rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2222         if (rc == 0)
2223                 goto read_complete;
2224
2225         page_cache_get(page);
2226         read_data = kmap(page);
2227         /* for reads over a certain size could initiate async read ahead */
2228
2229         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2230
2231         if (rc < 0)
2232                 goto io_error;
2233         else
2234                 cFYI(1, "Bytes read %d", rc);
2235
2236         file->f_path.dentry->d_inode->i_atime =
2237                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2238
2239         if (PAGE_CACHE_SIZE > rc)
2240                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2241
2242         flush_dcache_page(page);
2243         SetPageUptodate(page);
2244
2245         /* send this page to the cache */
2246         cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2247
2248         rc = 0;
2249
2250 io_error:
2251         kunmap(page);
2252         page_cache_release(page);
2253
2254 read_complete:
2255         return rc;
2256 }
2257
2258 static int cifs_readpage(struct file *file, struct page *page)
2259 {
2260         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2261         int rc = -EACCES;
2262         int xid;
2263
2264         xid = GetXid();
2265
2266         if (file->private_data == NULL) {
2267                 rc = -EBADF;
2268                 FreeXid(xid);
2269                 return rc;
2270         }
2271
2272         cFYI(1, "readpage %p at offset %d 0x%x\n",
2273                  page, (int)offset, (int)offset);
2274
2275         rc = cifs_readpage_worker(file, page, &offset);
2276
2277         unlock_page(page);
2278
2279         FreeXid(xid);
2280         return rc;
2281 }
2282
2283 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2284 {
2285         struct cifsFileInfo *open_file;
2286
2287         spin_lock(&cifs_file_list_lock);
2288         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2289                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2290                         spin_unlock(&cifs_file_list_lock);
2291                         return 1;
2292                 }
2293         }
2294         spin_unlock(&cifs_file_list_lock);
2295         return 0;
2296 }
2297
2298 /* We do not want to update the file size from server for inodes
2299    open for write - to avoid races with writepage extending
2300    the file - in the future we could consider allowing
2301    refreshing the inode only on increases in the file size
2302    but this is tricky to do without racing with writebehind
2303    page caching in the current Linux kernel design */
2304 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2305 {
2306         if (!cifsInode)
2307                 return true;
2308
2309         if (is_inode_writable(cifsInode)) {
2310                 /* This inode is open for write at least once */
2311                 struct cifs_sb_info *cifs_sb;
2312
2313                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2314                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2315                         /* since no page cache to corrupt on directio
2316                         we can change size safely */
2317                         return true;
2318                 }
2319
2320                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2321                         return true;
2322
2323                 return false;
2324         } else
2325                 return true;
2326 }
2327
2328 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2329                         loff_t pos, unsigned len, unsigned flags,
2330                         struct page **pagep, void **fsdata)
2331 {
2332         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2333         loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2334         loff_t page_start = pos & PAGE_MASK;
2335         loff_t i_size;
2336         struct page *page;
2337         int rc = 0;
2338
2339         cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2340
2341         page = grab_cache_page_write_begin(mapping, index, flags);
2342         if (!page) {
2343                 rc = -ENOMEM;
2344                 goto out;
2345         }
2346
2347         if (PageUptodate(page))
2348                 goto out;
2349
2350         /*
2351          * If we write a full page it will be up to date, no need to read from
2352          * the server. If the write is short, we'll end up doing a sync write
2353          * instead.
2354          */
2355         if (len == PAGE_CACHE_SIZE)
2356                 goto out;
2357
2358         /*
2359          * optimize away the read when we have an oplock, and we're not
2360          * expecting to use any of the data we'd be reading in. That
2361          * is, when the page lies beyond the EOF, or straddles the EOF
2362          * and the write will cover all of the existing data.
2363          */
2364         if (CIFS_I(mapping->host)->clientCanCacheRead) {
2365                 i_size = i_size_read(mapping->host);
2366                 if (page_start >= i_size ||
2367                     (offset == 0 && (pos + len) >= i_size)) {
2368                         zero_user_segments(page, 0, offset,
2369                                            offset + len,
2370                                            PAGE_CACHE_SIZE);
2371                         /*
2372                          * PageChecked means that the parts of the page
2373                          * to which we're not writing are considered up
2374                          * to date. Once the data is copied to the
2375                          * page, it can be set uptodate.
2376                          */
2377                         SetPageChecked(page);
2378                         goto out;
2379                 }
2380         }
2381
2382         if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2383                 /*
2384                  * might as well read a page, it is fast enough. If we get
2385                  * an error, we don't need to return it. cifs_write_end will
2386                  * do a sync write instead since PG_uptodate isn't set.
2387                  */
2388                 cifs_readpage_worker(file, page, &page_start);
2389         } else {
2390                 /* we could try using another file handle if there is one -
2391                    but how would we lock it to prevent close of that handle
2392                    racing with this read? In any case
2393                    this will be written out by write_end so is fine */
2394         }
2395 out:
2396         *pagep = page;
2397         return rc;
2398 }
2399
2400 static int cifs_release_page(struct page *page, gfp_t gfp)
2401 {
2402         if (PagePrivate(page))
2403                 return 0;
2404
2405         return cifs_fscache_release_page(page, gfp);
2406 }
2407
2408 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2409 {
2410         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2411
2412         if (offset == 0)
2413                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2414 }
2415
2416 void cifs_oplock_break(struct work_struct *work)
2417 {
2418         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2419                                                   oplock_break);
2420         struct inode *inode = cfile->dentry->d_inode;
2421         struct cifsInodeInfo *cinode = CIFS_I(inode);
2422         int rc = 0;
2423
2424         if (inode && S_ISREG(inode->i_mode)) {
2425                 if (cinode->clientCanCacheRead)
2426                         break_lease(inode, O_RDONLY);
2427                 else
2428                         break_lease(inode, O_WRONLY);
2429                 rc = filemap_fdatawrite(inode->i_mapping);
2430                 if (cinode->clientCanCacheRead == 0) {
2431                         rc = filemap_fdatawait(inode->i_mapping);
2432                         mapping_set_error(inode->i_mapping, rc);
2433                         invalidate_remote_inode(inode);
2434                 }
2435                 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2436         }
2437
2438         /*
2439          * releasing stale oplock after recent reconnect of smb session using
2440          * a now incorrect file handle is not a data integrity issue but do
2441          * not bother sending an oplock release if session to server still is
2442          * disconnected since oplock already released by the server
2443          */
2444         if (!cfile->oplock_break_cancelled) {
2445                 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2446                                  0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2447                                  cinode->clientCanCacheRead ? 1 : 0);
2448                 cFYI(1, "Oplock release rc = %d", rc);
2449         }
2450
2451         /*
2452          * We might have kicked in before is_valid_oplock_break()
2453          * finished grabbing reference for us.  Make sure it's done by
2454          * waiting for cifs_file_list_lock.
2455          */
2456         spin_lock(&cifs_file_list_lock);
2457         spin_unlock(&cifs_file_list_lock);
2458
2459         cifs_oplock_break_put(cfile);
2460 }
2461
2462 /* must be called while holding cifs_file_list_lock */
2463 void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2464 {
2465         cifs_sb_active(cfile->dentry->d_sb);
2466         cifsFileInfo_get(cfile);
2467 }
2468
2469 void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2470 {
2471         struct super_block *sb = cfile->dentry->d_sb;
2472
2473         cifsFileInfo_put(cfile);
2474         cifs_sb_deactive(sb);
2475 }
2476
2477 const struct address_space_operations cifs_addr_ops = {
2478         .readpage = cifs_readpage,
2479         .readpages = cifs_readpages,
2480         .writepage = cifs_writepage,
2481         .writepages = cifs_writepages,
2482         .write_begin = cifs_write_begin,
2483         .write_end = cifs_write_end,
2484         .set_page_dirty = __set_page_dirty_nobuffers,
2485         .releasepage = cifs_release_page,
2486         .invalidatepage = cifs_invalidate_page,
2487         /* .direct_IO = */
2488 };
2489
2490 /*
2491  * cifs_readpages requires the server to support a buffer large enough to
2492  * contain the header plus one complete page of data.  Otherwise, we need
2493  * to leave cifs_readpages out of the address space operations.
2494  */
2495 const struct address_space_operations cifs_addr_ops_smallbuf = {
2496         .readpage = cifs_readpage,
2497         .writepage = cifs_writepage,
2498         .writepages = cifs_writepages,
2499         .write_begin = cifs_write_begin,
2500         .write_end = cifs_write_end,
2501         .set_page_dirty = __set_page_dirty_nobuffers,
2502         .releasepage = cifs_release_page,
2503         .invalidatepage = cifs_invalidate_page,
2504         /* .direct_IO = */
2505 };