Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[pandora-kernel.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
44
45 static inline int cifs_convert_flags(unsigned int flags)
46 {
47         if ((flags & O_ACCMODE) == O_RDONLY)
48                 return GENERIC_READ;
49         else if ((flags & O_ACCMODE) == O_WRONLY)
50                 return GENERIC_WRITE;
51         else if ((flags & O_ACCMODE) == O_RDWR) {
52                 /* GENERIC_ALL is too much permission to request
53                    can cause unnecessary access denied on create */
54                 /* return GENERIC_ALL; */
55                 return (GENERIC_READ | GENERIC_WRITE);
56         }
57
58         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60                 FILE_READ_DATA);
61 }
62
63 static u32 cifs_posix_convert_flags(unsigned int flags)
64 {
65         u32 posix_flags = 0;
66
67         if ((flags & O_ACCMODE) == O_RDONLY)
68                 posix_flags = SMB_O_RDONLY;
69         else if ((flags & O_ACCMODE) == O_WRONLY)
70                 posix_flags = SMB_O_WRONLY;
71         else if ((flags & O_ACCMODE) == O_RDWR)
72                 posix_flags = SMB_O_RDWR;
73
74         if (flags & O_CREAT)
75                 posix_flags |= SMB_O_CREAT;
76         if (flags & O_EXCL)
77                 posix_flags |= SMB_O_EXCL;
78         if (flags & O_TRUNC)
79                 posix_flags |= SMB_O_TRUNC;
80         /* be safe and imply O_SYNC for O_DSYNC */
81         if (flags & O_DSYNC)
82                 posix_flags |= SMB_O_SYNC;
83         if (flags & O_DIRECTORY)
84                 posix_flags |= SMB_O_DIRECTORY;
85         if (flags & O_NOFOLLOW)
86                 posix_flags |= SMB_O_NOFOLLOW;
87         if (flags & O_DIRECT)
88                 posix_flags |= SMB_O_DIRECT;
89
90         return posix_flags;
91 }
92
93 static inline int cifs_get_disposition(unsigned int flags)
94 {
95         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96                 return FILE_CREATE;
97         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98                 return FILE_OVERWRITE_IF;
99         else if ((flags & O_CREAT) == O_CREAT)
100                 return FILE_OPEN_IF;
101         else if ((flags & O_TRUNC) == O_TRUNC)
102                 return FILE_OVERWRITE;
103         else
104                 return FILE_OPEN;
105 }
106
107 static inline int cifs_open_inode_helper(struct inode *inode,
108         struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
109         char *full_path, int xid)
110 {
111         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
112         struct timespec temp;
113         int rc;
114
115         if (pCifsInode->clientCanCacheRead) {
116                 /* we have the inode open somewhere else
117                    no need to discard cache data */
118                 goto client_can_cache;
119         }
120
121         /* BB need same check in cifs_create too? */
122         /* if not oplocked, invalidate inode pages if mtime or file
123            size changed */
124         temp = cifs_NTtimeToUnix(buf->LastWriteTime);
125         if (timespec_equal(&inode->i_mtime, &temp) &&
126                            (inode->i_size ==
127                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
128                 cFYI(1, "inode unchanged on server");
129         } else {
130                 if (inode->i_mapping) {
131                         /* BB no need to lock inode until after invalidate
132                         since namei code should already have it locked? */
133                         rc = filemap_write_and_wait(inode->i_mapping);
134                         if (rc != 0)
135                                 pCifsInode->write_behind_rc = rc;
136                 }
137                 cFYI(1, "invalidating remote inode since open detected it "
138                          "changed");
139                 invalidate_remote_inode(inode);
140         }
141
142 client_can_cache:
143         if (pTcon->unix_ext)
144                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
145                                               xid);
146         else
147                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
148                                          xid, NULL);
149
150         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
151                 pCifsInode->clientCanCacheAll = true;
152                 pCifsInode->clientCanCacheRead = true;
153                 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
154         } else if ((oplock & 0xF) == OPLOCK_READ)
155                 pCifsInode->clientCanCacheRead = true;
156
157         return rc;
158 }
159
160 int cifs_posix_open(char *full_path, struct inode **pinode,
161                         struct super_block *sb, int mode, unsigned int f_flags,
162                         __u32 *poplock, __u16 *pnetfid, int xid)
163 {
164         int rc;
165         FILE_UNIX_BASIC_INFO *presp_data;
166         __u32 posix_flags = 0;
167         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
168         struct cifs_fattr fattr;
169         struct tcon_link *tlink;
170         struct cifsTconInfo *tcon;
171
172         cFYI(1, "posix open %s", full_path);
173
174         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
175         if (presp_data == NULL)
176                 return -ENOMEM;
177
178         tlink = cifs_sb_tlink(cifs_sb);
179         if (IS_ERR(tlink)) {
180                 rc = PTR_ERR(tlink);
181                 goto posix_open_ret;
182         }
183
184         tcon = tlink_tcon(tlink);
185         mode &= ~current_umask();
186
187         posix_flags = cifs_posix_convert_flags(f_flags);
188         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
189                              poplock, full_path, cifs_sb->local_nls,
190                              cifs_sb->mnt_cifs_flags &
191                                         CIFS_MOUNT_MAP_SPECIAL_CHR);
192         cifs_put_tlink(tlink);
193
194         if (rc)
195                 goto posix_open_ret;
196
197         if (presp_data->Type == cpu_to_le32(-1))
198                 goto posix_open_ret; /* open ok, caller does qpathinfo */
199
200         if (!pinode)
201                 goto posix_open_ret; /* caller does not need info */
202
203         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
204
205         /* get new inode and set it up */
206         if (*pinode == NULL) {
207                 cifs_fill_uniqueid(sb, &fattr);
208                 *pinode = cifs_iget(sb, &fattr);
209                 if (!*pinode) {
210                         rc = -ENOMEM;
211                         goto posix_open_ret;
212                 }
213         } else {
214                 cifs_fattr_to_inode(*pinode, &fattr);
215         }
216
217 posix_open_ret:
218         kfree(presp_data);
219         return rc;
220 }
221
222 struct cifsFileInfo *
223 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
224                   struct tcon_link *tlink, __u32 oplock)
225 {
226         struct dentry *dentry = file->f_path.dentry;
227         struct inode *inode = dentry->d_inode;
228         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
229         struct cifsFileInfo *pCifsFile;
230
231         pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
232         if (pCifsFile == NULL)
233                 return pCifsFile;
234
235         pCifsFile->netfid = fileHandle;
236         pCifsFile->pid = current->tgid;
237         pCifsFile->uid = current_fsuid();
238         pCifsFile->dentry = dget(dentry);
239         pCifsFile->f_flags = file->f_flags;
240         pCifsFile->invalidHandle = false;
241         pCifsFile->tlink = cifs_get_tlink(tlink);
242         mutex_init(&pCifsFile->fh_mutex);
243         mutex_init(&pCifsFile->lock_mutex);
244         INIT_LIST_HEAD(&pCifsFile->llist);
245         atomic_set(&pCifsFile->count, 1);
246         INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
247
248         spin_lock(&cifs_file_list_lock);
249         list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
250         /* if readable file instance put first in list*/
251         if (file->f_mode & FMODE_READ)
252                 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
253         else
254                 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
255         spin_unlock(&cifs_file_list_lock);
256
257         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
258                 pCifsInode->clientCanCacheAll = true;
259                 pCifsInode->clientCanCacheRead = true;
260                 cFYI(1, "Exclusive Oplock inode %p", inode);
261         } else if ((oplock & 0xF) == OPLOCK_READ)
262                 pCifsInode->clientCanCacheRead = true;
263
264         file->private_data = pCifsFile;
265         return pCifsFile;
266 }
267
268 /*
269  * Release a reference on the file private data. This may involve closing
270  * the filehandle out on the server.
271  */
272 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
273 {
274         struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
275         struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
276         struct cifsLockInfo *li, *tmp;
277
278         spin_lock(&cifs_file_list_lock);
279         if (!atomic_dec_and_test(&cifs_file->count)) {
280                 spin_unlock(&cifs_file_list_lock);
281                 return;
282         }
283
284         /* remove it from the lists */
285         list_del(&cifs_file->flist);
286         list_del(&cifs_file->tlist);
287
288         if (list_empty(&cifsi->openFileList)) {
289                 cFYI(1, "closing last open instance for inode %p",
290                         cifs_file->dentry->d_inode);
291                 cifsi->clientCanCacheRead = false;
292                 cifsi->clientCanCacheAll  = false;
293         }
294         spin_unlock(&cifs_file_list_lock);
295
296         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
297                 int xid, rc;
298
299                 xid = GetXid();
300                 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
301                 FreeXid(xid);
302         }
303
304         /* Delete any outstanding lock records. We'll lose them when the file
305          * is closed anyway.
306          */
307         mutex_lock(&cifs_file->lock_mutex);
308         list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
309                 list_del(&li->llist);
310                 kfree(li);
311         }
312         mutex_unlock(&cifs_file->lock_mutex);
313
314         cifs_put_tlink(cifs_file->tlink);
315         dput(cifs_file->dentry);
316         kfree(cifs_file);
317 }
318
319 int cifs_open(struct inode *inode, struct file *file)
320 {
321         int rc = -EACCES;
322         int xid;
323         __u32 oplock;
324         struct cifs_sb_info *cifs_sb;
325         struct cifsTconInfo *tcon;
326         struct tcon_link *tlink;
327         struct cifsFileInfo *pCifsFile = NULL;
328         struct cifsInodeInfo *pCifsInode;
329         char *full_path = NULL;
330         int desiredAccess;
331         int disposition;
332         __u16 netfid;
333         FILE_ALL_INFO *buf = NULL;
334
335         xid = GetXid();
336
337         cifs_sb = CIFS_SB(inode->i_sb);
338         tlink = cifs_sb_tlink(cifs_sb);
339         if (IS_ERR(tlink)) {
340                 FreeXid(xid);
341                 return PTR_ERR(tlink);
342         }
343         tcon = tlink_tcon(tlink);
344
345         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
346
347         full_path = build_path_from_dentry(file->f_path.dentry);
348         if (full_path == NULL) {
349                 rc = -ENOMEM;
350                 goto out;
351         }
352
353         cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
354                  inode, file->f_flags, full_path);
355
356         if (oplockEnabled)
357                 oplock = REQ_OPLOCK;
358         else
359                 oplock = 0;
360
361         if (!tcon->broken_posix_open && tcon->unix_ext &&
362             (tcon->ses->capabilities & CAP_UNIX) &&
363             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
364                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
365                 /* can not refresh inode info since size could be stale */
366                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
367                                 cifs_sb->mnt_file_mode /* ignored */,
368                                 file->f_flags, &oplock, &netfid, xid);
369                 if (rc == 0) {
370                         cFYI(1, "posix open succeeded");
371
372                         pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
373                                                       oplock);
374                         if (pCifsFile == NULL) {
375                                 CIFSSMBClose(xid, tcon, netfid);
376                                 rc = -ENOMEM;
377                         }
378
379                         cifs_fscache_set_inode_cookie(inode, file);
380
381                         goto out;
382                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
383                         if (tcon->ses->serverNOS)
384                                 cERROR(1, "server %s of type %s returned"
385                                            " unexpected error on SMB posix open"
386                                            ", disabling posix open support."
387                                            " Check if server update available.",
388                                            tcon->ses->serverName,
389                                            tcon->ses->serverNOS);
390                         tcon->broken_posix_open = true;
391                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
392                          (rc != -EOPNOTSUPP)) /* path not found or net err */
393                         goto out;
394                 /* else fallthrough to retry open the old way on network i/o
395                    or DFS errors */
396         }
397
398         desiredAccess = cifs_convert_flags(file->f_flags);
399
400 /*********************************************************************
401  *  open flag mapping table:
402  *
403  *      POSIX Flag            CIFS Disposition
404  *      ----------            ----------------
405  *      O_CREAT               FILE_OPEN_IF
406  *      O_CREAT | O_EXCL      FILE_CREATE
407  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
408  *      O_TRUNC               FILE_OVERWRITE
409  *      none of the above     FILE_OPEN
410  *
411  *      Note that there is not a direct match between disposition
412  *      FILE_SUPERSEDE (ie create whether or not file exists although
413  *      O_CREAT | O_TRUNC is similar but truncates the existing
414  *      file rather than creating a new file as FILE_SUPERSEDE does
415  *      (which uses the attributes / metadata passed in on open call)
416  *?
417  *?  O_SYNC is a reasonable match to CIFS writethrough flag
418  *?  and the read write flags match reasonably.  O_LARGEFILE
419  *?  is irrelevant because largefile support is always used
420  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
421  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
422  *********************************************************************/
423
424         disposition = cifs_get_disposition(file->f_flags);
425
426         /* BB pass O_SYNC flag through on file attributes .. BB */
427
428         /* Also refresh inode by passing in file_info buf returned by SMBOpen
429            and calling get_inode_info with returned buf (at least helps
430            non-Unix server case) */
431
432         /* BB we can not do this if this is the second open of a file
433            and the first handle has writebehind data, we might be
434            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
435         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
436         if (!buf) {
437                 rc = -ENOMEM;
438                 goto out;
439         }
440
441         if (tcon->ses->capabilities & CAP_NT_SMBS)
442                 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
443                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
444                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
445                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
446         else
447                 rc = -EIO; /* no NT SMB support fall into legacy open below */
448
449         if (rc == -EIO) {
450                 /* Old server, try legacy style OpenX */
451                 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
452                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
453                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
454                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
455         }
456         if (rc) {
457                 cFYI(1, "cifs_open returned 0x%x", rc);
458                 goto out;
459         }
460
461         rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
462         if (rc != 0)
463                 goto out;
464
465         pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
466         if (pCifsFile == NULL) {
467                 rc = -ENOMEM;
468                 goto out;
469         }
470
471         cifs_fscache_set_inode_cookie(inode, file);
472
473         if (oplock & CIFS_CREATE_ACTION) {
474                 /* time to set mode which we can not set earlier due to
475                    problems creating new read-only files */
476                 if (tcon->unix_ext) {
477                         struct cifs_unix_set_info_args args = {
478                                 .mode   = inode->i_mode,
479                                 .uid    = NO_CHANGE_64,
480                                 .gid    = NO_CHANGE_64,
481                                 .ctime  = NO_CHANGE_64,
482                                 .atime  = NO_CHANGE_64,
483                                 .mtime  = NO_CHANGE_64,
484                                 .device = 0,
485                         };
486                         CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
487                                                cifs_sb->local_nls,
488                                                cifs_sb->mnt_cifs_flags &
489                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
490                 }
491         }
492
493 out:
494         kfree(buf);
495         kfree(full_path);
496         FreeXid(xid);
497         cifs_put_tlink(tlink);
498         return rc;
499 }
500
501 /* Try to reacquire byte range locks that were released when session */
502 /* to server was lost */
503 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
504 {
505         int rc = 0;
506
507 /* BB list all locks open on this file and relock */
508
509         return rc;
510 }
511
512 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
513 {
514         int rc = -EACCES;
515         int xid;
516         __u32 oplock;
517         struct cifs_sb_info *cifs_sb;
518         struct cifsTconInfo *tcon;
519         struct cifsInodeInfo *pCifsInode;
520         struct inode *inode;
521         char *full_path = NULL;
522         int desiredAccess;
523         int disposition = FILE_OPEN;
524         __u16 netfid;
525
526         xid = GetXid();
527         mutex_lock(&pCifsFile->fh_mutex);
528         if (!pCifsFile->invalidHandle) {
529                 mutex_unlock(&pCifsFile->fh_mutex);
530                 rc = 0;
531                 FreeXid(xid);
532                 return rc;
533         }
534
535         inode = pCifsFile->dentry->d_inode;
536         cifs_sb = CIFS_SB(inode->i_sb);
537         tcon = tlink_tcon(pCifsFile->tlink);
538
539 /* can not grab rename sem here because various ops, including
540    those that already have the rename sem can end up causing writepage
541    to get called and if the server was down that means we end up here,
542    and we can never tell if the caller already has the rename_sem */
543         full_path = build_path_from_dentry(pCifsFile->dentry);
544         if (full_path == NULL) {
545                 rc = -ENOMEM;
546                 mutex_unlock(&pCifsFile->fh_mutex);
547                 FreeXid(xid);
548                 return rc;
549         }
550
551         cFYI(1, "inode = 0x%p file flags 0x%x for %s",
552                  inode, pCifsFile->f_flags, full_path);
553
554         if (oplockEnabled)
555                 oplock = REQ_OPLOCK;
556         else
557                 oplock = 0;
558
559         if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
560             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
561                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
562
563                 /*
564                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
565                  * original open. Must mask them off for a reopen.
566                  */
567                 unsigned int oflags = pCifsFile->f_flags &
568                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
569
570                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
571                                 cifs_sb->mnt_file_mode /* ignored */,
572                                 oflags, &oplock, &netfid, xid);
573                 if (rc == 0) {
574                         cFYI(1, "posix reopen succeeded");
575                         goto reopen_success;
576                 }
577                 /* fallthrough to retry open the old way on errors, especially
578                    in the reconnect path it is important to retry hard */
579         }
580
581         desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
582
583         /* Can not refresh inode by passing in file_info buf to be returned
584            by SMBOpen and then calling get_inode_info with returned buf
585            since file might have write behind data that needs to be flushed
586            and server version of file size can be stale. If we knew for sure
587            that inode was not dirty locally we could do this */
588
589         rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
590                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
591                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
592                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
593         if (rc) {
594                 mutex_unlock(&pCifsFile->fh_mutex);
595                 cFYI(1, "cifs_open returned 0x%x", rc);
596                 cFYI(1, "oplock: %d", oplock);
597                 goto reopen_error_exit;
598         }
599
600 reopen_success:
601         pCifsFile->netfid = netfid;
602         pCifsFile->invalidHandle = false;
603         mutex_unlock(&pCifsFile->fh_mutex);
604         pCifsInode = CIFS_I(inode);
605
606         if (can_flush) {
607                 rc = filemap_write_and_wait(inode->i_mapping);
608                 if (rc != 0)
609                         CIFS_I(inode)->write_behind_rc = rc;
610
611                 pCifsInode->clientCanCacheAll = false;
612                 pCifsInode->clientCanCacheRead = false;
613                 if (tcon->unix_ext)
614                         rc = cifs_get_inode_info_unix(&inode,
615                                 full_path, inode->i_sb, xid);
616                 else
617                         rc = cifs_get_inode_info(&inode,
618                                 full_path, NULL, inode->i_sb,
619                                 xid, NULL);
620         } /* else we are writing out data to server already
621              and could deadlock if we tried to flush data, and
622              since we do not know if we have data that would
623              invalidate the current end of file on the server
624              we can not go to the server to get the new inod
625              info */
626         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
627                 pCifsInode->clientCanCacheAll = true;
628                 pCifsInode->clientCanCacheRead = true;
629                 cFYI(1, "Exclusive Oplock granted on inode %p",
630                          pCifsFile->dentry->d_inode);
631         } else if ((oplock & 0xF) == OPLOCK_READ) {
632                 pCifsInode->clientCanCacheRead = true;
633                 pCifsInode->clientCanCacheAll = false;
634         } else {
635                 pCifsInode->clientCanCacheRead = false;
636                 pCifsInode->clientCanCacheAll = false;
637         }
638         cifs_relock_file(pCifsFile);
639
640 reopen_error_exit:
641         kfree(full_path);
642         FreeXid(xid);
643         return rc;
644 }
645
646 int cifs_close(struct inode *inode, struct file *file)
647 {
648         cifsFileInfo_put(file->private_data);
649         file->private_data = NULL;
650
651         /* return code from the ->release op is always ignored */
652         return 0;
653 }
654
655 int cifs_closedir(struct inode *inode, struct file *file)
656 {
657         int rc = 0;
658         int xid;
659         struct cifsFileInfo *pCFileStruct = file->private_data;
660         char *ptmp;
661
662         cFYI(1, "Closedir inode = 0x%p", inode);
663
664         xid = GetXid();
665
666         if (pCFileStruct) {
667                 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
668
669                 cFYI(1, "Freeing private data in close dir");
670                 spin_lock(&cifs_file_list_lock);
671                 if (!pCFileStruct->srch_inf.endOfSearch &&
672                     !pCFileStruct->invalidHandle) {
673                         pCFileStruct->invalidHandle = true;
674                         spin_unlock(&cifs_file_list_lock);
675                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
676                         cFYI(1, "Closing uncompleted readdir with rc %d",
677                                  rc);
678                         /* not much we can do if it fails anyway, ignore rc */
679                         rc = 0;
680                 } else
681                         spin_unlock(&cifs_file_list_lock);
682                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
683                 if (ptmp) {
684                         cFYI(1, "closedir free smb buf in srch struct");
685                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
686                         if (pCFileStruct->srch_inf.smallBuf)
687                                 cifs_small_buf_release(ptmp);
688                         else
689                                 cifs_buf_release(ptmp);
690                 }
691                 cifs_put_tlink(pCFileStruct->tlink);
692                 kfree(file->private_data);
693                 file->private_data = NULL;
694         }
695         /* BB can we lock the filestruct while this is going on? */
696         FreeXid(xid);
697         return rc;
698 }
699
700 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
701                                 __u64 offset, __u8 lockType)
702 {
703         struct cifsLockInfo *li =
704                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
705         if (li == NULL)
706                 return -ENOMEM;
707         li->offset = offset;
708         li->length = len;
709         li->type = lockType;
710         mutex_lock(&fid->lock_mutex);
711         list_add(&li->llist, &fid->llist);
712         mutex_unlock(&fid->lock_mutex);
713         return 0;
714 }
715
716 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
717 {
718         int rc, xid;
719         __u32 numLock = 0;
720         __u32 numUnlock = 0;
721         __u64 length;
722         bool wait_flag = false;
723         struct cifs_sb_info *cifs_sb;
724         struct cifsTconInfo *tcon;
725         __u16 netfid;
726         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
727         bool posix_locking = 0;
728
729         length = 1 + pfLock->fl_end - pfLock->fl_start;
730         rc = -EACCES;
731         xid = GetXid();
732
733         cFYI(1, "Lock parm: 0x%x flockflags: "
734                  "0x%x flocktype: 0x%x start: %lld end: %lld",
735                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
736                 pfLock->fl_end);
737
738         if (pfLock->fl_flags & FL_POSIX)
739                 cFYI(1, "Posix");
740         if (pfLock->fl_flags & FL_FLOCK)
741                 cFYI(1, "Flock");
742         if (pfLock->fl_flags & FL_SLEEP) {
743                 cFYI(1, "Blocking lock");
744                 wait_flag = true;
745         }
746         if (pfLock->fl_flags & FL_ACCESS)
747                 cFYI(1, "Process suspended by mandatory locking - "
748                          "not implemented yet");
749         if (pfLock->fl_flags & FL_LEASE)
750                 cFYI(1, "Lease on file - not implemented yet");
751         if (pfLock->fl_flags &
752             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
753                 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
754
755         if (pfLock->fl_type == F_WRLCK) {
756                 cFYI(1, "F_WRLCK ");
757                 numLock = 1;
758         } else if (pfLock->fl_type == F_UNLCK) {
759                 cFYI(1, "F_UNLCK");
760                 numUnlock = 1;
761                 /* Check if unlock includes more than
762                 one lock range */
763         } else if (pfLock->fl_type == F_RDLCK) {
764                 cFYI(1, "F_RDLCK");
765                 lockType |= LOCKING_ANDX_SHARED_LOCK;
766                 numLock = 1;
767         } else if (pfLock->fl_type == F_EXLCK) {
768                 cFYI(1, "F_EXLCK");
769                 numLock = 1;
770         } else if (pfLock->fl_type == F_SHLCK) {
771                 cFYI(1, "F_SHLCK");
772                 lockType |= LOCKING_ANDX_SHARED_LOCK;
773                 numLock = 1;
774         } else
775                 cFYI(1, "Unknown type of lock");
776
777         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
778         tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
779
780         if (file->private_data == NULL) {
781                 rc = -EBADF;
782                 FreeXid(xid);
783                 return rc;
784         }
785         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
786
787         if ((tcon->ses->capabilities & CAP_UNIX) &&
788             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
789             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
790                 posix_locking = 1;
791         /* BB add code here to normalize offset and length to
792         account for negative length which we can not accept over the
793         wire */
794         if (IS_GETLK(cmd)) {
795                 if (posix_locking) {
796                         int posix_lock_type;
797                         if (lockType & LOCKING_ANDX_SHARED_LOCK)
798                                 posix_lock_type = CIFS_RDLCK;
799                         else
800                                 posix_lock_type = CIFS_WRLCK;
801                         rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
802                                         length, pfLock,
803                                         posix_lock_type, wait_flag);
804                         FreeXid(xid);
805                         return rc;
806                 }
807
808                 /* BB we could chain these into one lock request BB */
809                 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
810                                  0, 1, lockType, 0 /* wait flag */ );
811                 if (rc == 0) {
812                         rc = CIFSSMBLock(xid, tcon, netfid, length,
813                                          pfLock->fl_start, 1 /* numUnlock */ ,
814                                          0 /* numLock */ , lockType,
815                                          0 /* wait flag */ );
816                         pfLock->fl_type = F_UNLCK;
817                         if (rc != 0)
818                                 cERROR(1, "Error unlocking previously locked "
819                                            "range %d during test of lock", rc);
820                         rc = 0;
821
822                 } else {
823                         /* if rc == ERR_SHARING_VIOLATION ? */
824                         rc = 0;
825
826                         if (lockType & LOCKING_ANDX_SHARED_LOCK) {
827                                 pfLock->fl_type = F_WRLCK;
828                         } else {
829                                 rc = CIFSSMBLock(xid, tcon, netfid, length,
830                                         pfLock->fl_start, 0, 1,
831                                         lockType | LOCKING_ANDX_SHARED_LOCK,
832                                         0 /* wait flag */);
833                                 if (rc == 0) {
834                                         rc = CIFSSMBLock(xid, tcon, netfid,
835                                                 length, pfLock->fl_start, 1, 0,
836                                                 lockType |
837                                                 LOCKING_ANDX_SHARED_LOCK,
838                                                 0 /* wait flag */);
839                                         pfLock->fl_type = F_RDLCK;
840                                         if (rc != 0)
841                                                 cERROR(1, "Error unlocking "
842                                                 "previously locked range %d "
843                                                 "during test of lock", rc);
844                                         rc = 0;
845                                 } else {
846                                         pfLock->fl_type = F_WRLCK;
847                                         rc = 0;
848                                 }
849                         }
850                 }
851
852                 FreeXid(xid);
853                 return rc;
854         }
855
856         if (!numLock && !numUnlock) {
857                 /* if no lock or unlock then nothing
858                 to do since we do not know what it is */
859                 FreeXid(xid);
860                 return -EOPNOTSUPP;
861         }
862
863         if (posix_locking) {
864                 int posix_lock_type;
865                 if (lockType & LOCKING_ANDX_SHARED_LOCK)
866                         posix_lock_type = CIFS_RDLCK;
867                 else
868                         posix_lock_type = CIFS_WRLCK;
869
870                 if (numUnlock == 1)
871                         posix_lock_type = CIFS_UNLCK;
872
873                 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
874                                       length, pfLock,
875                                       posix_lock_type, wait_flag);
876         } else {
877                 struct cifsFileInfo *fid = file->private_data;
878
879                 if (numLock) {
880                         rc = CIFSSMBLock(xid, tcon, netfid, length,
881                                         pfLock->fl_start,
882                                         0, numLock, lockType, wait_flag);
883
884                         if (rc == 0) {
885                                 /* For Windows locks we must store them. */
886                                 rc = store_file_lock(fid, length,
887                                                 pfLock->fl_start, lockType);
888                         }
889                 } else if (numUnlock) {
890                         /* For each stored lock that this unlock overlaps
891                            completely, unlock it. */
892                         int stored_rc = 0;
893                         struct cifsLockInfo *li, *tmp;
894
895                         rc = 0;
896                         mutex_lock(&fid->lock_mutex);
897                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
898                                 if (pfLock->fl_start <= li->offset &&
899                                                 (pfLock->fl_start + length) >=
900                                                 (li->offset + li->length)) {
901                                         stored_rc = CIFSSMBLock(xid, tcon,
902                                                         netfid,
903                                                         li->length, li->offset,
904                                                         1, 0, li->type, false);
905                                         if (stored_rc)
906                                                 rc = stored_rc;
907                                         else {
908                                                 list_del(&li->llist);
909                                                 kfree(li);
910                                         }
911                                 }
912                         }
913                         mutex_unlock(&fid->lock_mutex);
914                 }
915         }
916
917         if (pfLock->fl_flags & FL_POSIX)
918                 posix_lock_file_wait(file, pfLock);
919         FreeXid(xid);
920         return rc;
921 }
922
923 /*
924  * Set the timeout on write requests past EOF. For some servers (Windows)
925  * these calls can be very long.
926  *
927  * If we're writing >10M past the EOF we give a 180s timeout. Anything less
928  * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
929  * The 10M cutoff is totally arbitrary. A better scheme for this would be
930  * welcome if someone wants to suggest one.
931  *
932  * We may be able to do a better job with this if there were some way to
933  * declare that a file should be sparse.
934  */
935 static int
936 cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
937 {
938         if (offset <= cifsi->server_eof)
939                 return CIFS_STD_OP;
940         else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
941                 return CIFS_VLONG_OP;
942         else
943                 return CIFS_LONG_OP;
944 }
945
946 /* update the file size (if needed) after a write */
947 static void
948 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
949                       unsigned int bytes_written)
950 {
951         loff_t end_of_write = offset + bytes_written;
952
953         if (end_of_write > cifsi->server_eof)
954                 cifsi->server_eof = end_of_write;
955 }
956
957 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
958         size_t write_size, loff_t *poffset)
959 {
960         int rc = 0;
961         unsigned int bytes_written = 0;
962         unsigned int total_written;
963         struct cifs_sb_info *cifs_sb;
964         struct cifsTconInfo *pTcon;
965         int xid, long_op;
966         struct cifsFileInfo *open_file;
967         struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
968
969         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
970
971         /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
972            *poffset, file->f_path.dentry->d_name.name); */
973
974         if (file->private_data == NULL)
975                 return -EBADF;
976
977         open_file = file->private_data;
978         pTcon = tlink_tcon(open_file->tlink);
979
980         rc = generic_write_checks(file, poffset, &write_size, 0);
981         if (rc)
982                 return rc;
983
984         xid = GetXid();
985
986         long_op = cifs_write_timeout(cifsi, *poffset);
987         for (total_written = 0; write_size > total_written;
988              total_written += bytes_written) {
989                 rc = -EAGAIN;
990                 while (rc == -EAGAIN) {
991                         if (file->private_data == NULL) {
992                                 /* file has been closed on us */
993                                 FreeXid(xid);
994                         /* if we have gotten here we have written some data
995                            and blocked, and the file has been freed on us while
996                            we blocked so return what we managed to write */
997                                 return total_written;
998                         }
999                         if (open_file->invalidHandle) {
1000                                 /* we could deadlock if we called
1001                                    filemap_fdatawait from here so tell
1002                                    reopen_file not to flush data to server
1003                                    now */
1004                                 rc = cifs_reopen_file(open_file, false);
1005                                 if (rc != 0)
1006                                         break;
1007                         }
1008
1009                         rc = CIFSSMBWrite(xid, pTcon,
1010                                 open_file->netfid,
1011                                 min_t(const int, cifs_sb->wsize,
1012                                       write_size - total_written),
1013                                 *poffset, &bytes_written,
1014                                 NULL, write_data + total_written, long_op);
1015                 }
1016                 if (rc || (bytes_written == 0)) {
1017                         if (total_written)
1018                                 break;
1019                         else {
1020                                 FreeXid(xid);
1021                                 return rc;
1022                         }
1023                 } else {
1024                         cifs_update_eof(cifsi, *poffset, bytes_written);
1025                         *poffset += bytes_written;
1026                 }
1027                 long_op = CIFS_STD_OP; /* subsequent writes fast -
1028                                     15 seconds is plenty */
1029         }
1030
1031         cifs_stats_bytes_written(pTcon, total_written);
1032
1033         /* since the write may have blocked check these pointers again */
1034         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1035                 struct inode *inode = file->f_path.dentry->d_inode;
1036 /* Do not update local mtime - server will set its actual value on write
1037  *              inode->i_ctime = inode->i_mtime =
1038  *                      current_fs_time(inode->i_sb);*/
1039                 if (total_written > 0) {
1040                         spin_lock(&inode->i_lock);
1041                         if (*poffset > file->f_path.dentry->d_inode->i_size)
1042                                 i_size_write(file->f_path.dentry->d_inode,
1043                                         *poffset);
1044                         spin_unlock(&inode->i_lock);
1045                 }
1046                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1047         }
1048         FreeXid(xid);
1049         return total_written;
1050 }
1051
1052 static ssize_t cifs_write(struct cifsFileInfo *open_file,
1053                           const char *write_data, size_t write_size,
1054                           loff_t *poffset)
1055 {
1056         int rc = 0;
1057         unsigned int bytes_written = 0;
1058         unsigned int total_written;
1059         struct cifs_sb_info *cifs_sb;
1060         struct cifsTconInfo *pTcon;
1061         int xid, long_op;
1062         struct dentry *dentry = open_file->dentry;
1063         struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1064
1065         cifs_sb = CIFS_SB(dentry->d_sb);
1066
1067         cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1068            *poffset, dentry->d_name.name);
1069
1070         pTcon = tlink_tcon(open_file->tlink);
1071
1072         xid = GetXid();
1073
1074         long_op = cifs_write_timeout(cifsi, *poffset);
1075         for (total_written = 0; write_size > total_written;
1076              total_written += bytes_written) {
1077                 rc = -EAGAIN;
1078                 while (rc == -EAGAIN) {
1079                         if (open_file->invalidHandle) {
1080                                 /* we could deadlock if we called
1081                                    filemap_fdatawait from here so tell
1082                                    reopen_file not to flush data to
1083                                    server now */
1084                                 rc = cifs_reopen_file(open_file, false);
1085                                 if (rc != 0)
1086                                         break;
1087                         }
1088                         if (experimEnabled || (pTcon->ses->server &&
1089                                 ((pTcon->ses->server->secMode &
1090                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1091                                 == 0))) {
1092                                 struct kvec iov[2];
1093                                 unsigned int len;
1094
1095                                 len = min((size_t)cifs_sb->wsize,
1096                                           write_size - total_written);
1097                                 /* iov[0] is reserved for smb header */
1098                                 iov[1].iov_base = (char *)write_data +
1099                                                   total_written;
1100                                 iov[1].iov_len = len;
1101                                 rc = CIFSSMBWrite2(xid, pTcon,
1102                                                 open_file->netfid, len,
1103                                                 *poffset, &bytes_written,
1104                                                 iov, 1, long_op);
1105                         } else
1106                                 rc = CIFSSMBWrite(xid, pTcon,
1107                                          open_file->netfid,
1108                                          min_t(const int, cifs_sb->wsize,
1109                                                write_size - total_written),
1110                                          *poffset, &bytes_written,
1111                                          write_data + total_written,
1112                                          NULL, long_op);
1113                 }
1114                 if (rc || (bytes_written == 0)) {
1115                         if (total_written)
1116                                 break;
1117                         else {
1118                                 FreeXid(xid);
1119                                 return rc;
1120                         }
1121                 } else {
1122                         cifs_update_eof(cifsi, *poffset, bytes_written);
1123                         *poffset += bytes_written;
1124                 }
1125                 long_op = CIFS_STD_OP; /* subsequent writes fast -
1126                                     15 seconds is plenty */
1127         }
1128
1129         cifs_stats_bytes_written(pTcon, total_written);
1130
1131         if (total_written > 0) {
1132                 spin_lock(&dentry->d_inode->i_lock);
1133                 if (*poffset > dentry->d_inode->i_size)
1134                         i_size_write(dentry->d_inode, *poffset);
1135                 spin_unlock(&dentry->d_inode->i_lock);
1136         }
1137         mark_inode_dirty_sync(dentry->d_inode);
1138         FreeXid(xid);
1139         return total_written;
1140 }
1141
1142 #ifdef CONFIG_CIFS_EXPERIMENTAL
1143 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1144                                         bool fsuid_only)
1145 {
1146         struct cifsFileInfo *open_file = NULL;
1147         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1148
1149         /* only filter by fsuid on multiuser mounts */
1150         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1151                 fsuid_only = false;
1152
1153         spin_lock(&cifs_file_list_lock);
1154         /* we could simply get the first_list_entry since write-only entries
1155            are always at the end of the list but since the first entry might
1156            have a close pending, we go through the whole list */
1157         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1158                 if (fsuid_only && open_file->uid != current_fsuid())
1159                         continue;
1160                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1161                         if (!open_file->invalidHandle) {
1162                                 /* found a good file */
1163                                 /* lock it so it will not be closed on us */
1164                                 cifsFileInfo_get(open_file);
1165                                 spin_unlock(&cifs_file_list_lock);
1166                                 return open_file;
1167                         } /* else might as well continue, and look for
1168                              another, or simply have the caller reopen it
1169                              again rather than trying to fix this handle */
1170                 } else /* write only file */
1171                         break; /* write only files are last so must be done */
1172         }
1173         spin_unlock(&cifs_file_list_lock);
1174         return NULL;
1175 }
1176 #endif
1177
1178 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1179                                         bool fsuid_only)
1180 {
1181         struct cifsFileInfo *open_file;
1182         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1183         bool any_available = false;
1184         int rc;
1185
1186         /* Having a null inode here (because mapping->host was set to zero by
1187         the VFS or MM) should not happen but we had reports of on oops (due to
1188         it being zero) during stress testcases so we need to check for it */
1189
1190         if (cifs_inode == NULL) {
1191                 cERROR(1, "Null inode passed to cifs_writeable_file");
1192                 dump_stack();
1193                 return NULL;
1194         }
1195
1196         /* only filter by fsuid on multiuser mounts */
1197         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1198                 fsuid_only = false;
1199
1200         spin_lock(&cifs_file_list_lock);
1201 refind_writable:
1202         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1203                 if (!any_available && open_file->pid != current->tgid)
1204                         continue;
1205                 if (fsuid_only && open_file->uid != current_fsuid())
1206                         continue;
1207                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1208                         cifsFileInfo_get(open_file);
1209
1210                         if (!open_file->invalidHandle) {
1211                                 /* found a good writable file */
1212                                 spin_unlock(&cifs_file_list_lock);
1213                                 return open_file;
1214                         }
1215
1216                         spin_unlock(&cifs_file_list_lock);
1217
1218                         /* Had to unlock since following call can block */
1219                         rc = cifs_reopen_file(open_file, false);
1220                         if (!rc)
1221                                 return open_file;
1222
1223                         /* if it fails, try another handle if possible */
1224                         cFYI(1, "wp failed on reopen file");
1225                         cifsFileInfo_put(open_file);
1226
1227                         spin_lock(&cifs_file_list_lock);
1228
1229                         /* else we simply continue to the next entry. Thus
1230                            we do not loop on reopen errors.  If we
1231                            can not reopen the file, for example if we
1232                            reconnected to a server with another client
1233                            racing to delete or lock the file we would not
1234                            make progress if we restarted before the beginning
1235                            of the loop here. */
1236                 }
1237         }
1238         /* couldn't find useable FH with same pid, try any available */
1239         if (!any_available) {
1240                 any_available = true;
1241                 goto refind_writable;
1242         }
1243         spin_unlock(&cifs_file_list_lock);
1244         return NULL;
1245 }
1246
1247 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1248 {
1249         struct address_space *mapping = page->mapping;
1250         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1251         char *write_data;
1252         int rc = -EFAULT;
1253         int bytes_written = 0;
1254         struct cifs_sb_info *cifs_sb;
1255         struct inode *inode;
1256         struct cifsFileInfo *open_file;
1257
1258         if (!mapping || !mapping->host)
1259                 return -EFAULT;
1260
1261         inode = page->mapping->host;
1262         cifs_sb = CIFS_SB(inode->i_sb);
1263
1264         offset += (loff_t)from;
1265         write_data = kmap(page);
1266         write_data += from;
1267
1268         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1269                 kunmap(page);
1270                 return -EIO;
1271         }
1272
1273         /* racing with truncate? */
1274         if (offset > mapping->host->i_size) {
1275                 kunmap(page);
1276                 return 0; /* don't care */
1277         }
1278
1279         /* check to make sure that we are not extending the file */
1280         if (mapping->host->i_size - offset < (loff_t)to)
1281                 to = (unsigned)(mapping->host->i_size - offset);
1282
1283         open_file = find_writable_file(CIFS_I(mapping->host), false);
1284         if (open_file) {
1285                 bytes_written = cifs_write(open_file, write_data,
1286                                            to - from, &offset);
1287                 cifsFileInfo_put(open_file);
1288                 /* Does mm or vfs already set times? */
1289                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1290                 if ((bytes_written > 0) && (offset))
1291                         rc = 0;
1292                 else if (bytes_written < 0)
1293                         rc = bytes_written;
1294         } else {
1295                 cFYI(1, "No writeable filehandles for inode");
1296                 rc = -EIO;
1297         }
1298
1299         kunmap(page);
1300         return rc;
1301 }
1302
1303 static int cifs_writepages(struct address_space *mapping,
1304                            struct writeback_control *wbc)
1305 {
1306         unsigned int bytes_to_write;
1307         unsigned int bytes_written;
1308         struct cifs_sb_info *cifs_sb;
1309         int done = 0;
1310         pgoff_t end;
1311         pgoff_t index;
1312         int range_whole = 0;
1313         struct kvec *iov;
1314         int len;
1315         int n_iov = 0;
1316         pgoff_t next;
1317         int nr_pages;
1318         __u64 offset = 0;
1319         struct cifsFileInfo *open_file;
1320         struct cifsTconInfo *tcon;
1321         struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1322         struct page *page;
1323         struct pagevec pvec;
1324         int rc = 0;
1325         int scanned = 0;
1326         int xid, long_op;
1327
1328         cifs_sb = CIFS_SB(mapping->host->i_sb);
1329
1330         /*
1331          * If wsize is smaller that the page cache size, default to writing
1332          * one page at a time via cifs_writepage
1333          */
1334         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1335                 return generic_writepages(mapping, wbc);
1336
1337         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1338         if (iov == NULL)
1339                 return generic_writepages(mapping, wbc);
1340
1341         /*
1342          * if there's no open file, then this is likely to fail too,
1343          * but it'll at least handle the return. Maybe it should be
1344          * a BUG() instead?
1345          */
1346         open_file = find_writable_file(CIFS_I(mapping->host), false);
1347         if (!open_file) {
1348                 kfree(iov);
1349                 return generic_writepages(mapping, wbc);
1350         }
1351
1352         tcon = tlink_tcon(open_file->tlink);
1353         if (!experimEnabled && tcon->ses->server->secMode &
1354                         (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1355                 cifsFileInfo_put(open_file);
1356                 return generic_writepages(mapping, wbc);
1357         }
1358         cifsFileInfo_put(open_file);
1359
1360         xid = GetXid();
1361
1362         pagevec_init(&pvec, 0);
1363         if (wbc->range_cyclic) {
1364                 index = mapping->writeback_index; /* Start from prev offset */
1365                 end = -1;
1366         } else {
1367                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1368                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1369                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1370                         range_whole = 1;
1371                 scanned = 1;
1372         }
1373 retry:
1374         while (!done && (index <= end) &&
1375                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1376                         PAGECACHE_TAG_DIRTY,
1377                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1378                 int first;
1379                 unsigned int i;
1380
1381                 first = -1;
1382                 next = 0;
1383                 n_iov = 0;
1384                 bytes_to_write = 0;
1385
1386                 for (i = 0; i < nr_pages; i++) {
1387                         page = pvec.pages[i];
1388                         /*
1389                          * At this point we hold neither mapping->tree_lock nor
1390                          * lock on the page itself: the page may be truncated or
1391                          * invalidated (changing page->mapping to NULL), or even
1392                          * swizzled back from swapper_space to tmpfs file
1393                          * mapping
1394                          */
1395
1396                         if (first < 0)
1397                                 lock_page(page);
1398                         else if (!trylock_page(page))
1399                                 break;
1400
1401                         if (unlikely(page->mapping != mapping)) {
1402                                 unlock_page(page);
1403                                 break;
1404                         }
1405
1406                         if (!wbc->range_cyclic && page->index > end) {
1407                                 done = 1;
1408                                 unlock_page(page);
1409                                 break;
1410                         }
1411
1412                         if (next && (page->index != next)) {
1413                                 /* Not next consecutive page */
1414                                 unlock_page(page);
1415                                 break;
1416                         }
1417
1418                         if (wbc->sync_mode != WB_SYNC_NONE)
1419                                 wait_on_page_writeback(page);
1420
1421                         if (PageWriteback(page) ||
1422                                         !clear_page_dirty_for_io(page)) {
1423                                 unlock_page(page);
1424                                 break;
1425                         }
1426
1427                         /*
1428                          * This actually clears the dirty bit in the radix tree.
1429                          * See cifs_writepage() for more commentary.
1430                          */
1431                         set_page_writeback(page);
1432
1433                         if (page_offset(page) >= mapping->host->i_size) {
1434                                 done = 1;
1435                                 unlock_page(page);
1436                                 end_page_writeback(page);
1437                                 break;
1438                         }
1439
1440                         /*
1441                          * BB can we get rid of this?  pages are held by pvec
1442                          */
1443                         page_cache_get(page);
1444
1445                         len = min(mapping->host->i_size - page_offset(page),
1446                                   (loff_t)PAGE_CACHE_SIZE);
1447
1448                         /* reserve iov[0] for the smb header */
1449                         n_iov++;
1450                         iov[n_iov].iov_base = kmap(page);
1451                         iov[n_iov].iov_len = len;
1452                         bytes_to_write += len;
1453
1454                         if (first < 0) {
1455                                 first = i;
1456                                 offset = page_offset(page);
1457                         }
1458                         next = page->index + 1;
1459                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1460                                 break;
1461                 }
1462                 if (n_iov) {
1463                         open_file = find_writable_file(CIFS_I(mapping->host),
1464                                                         false);
1465                         if (!open_file) {
1466                                 cERROR(1, "No writable handles for inode");
1467                                 rc = -EBADF;
1468                         } else {
1469                                 long_op = cifs_write_timeout(cifsi, offset);
1470                                 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1471                                                    bytes_to_write, offset,
1472                                                    &bytes_written, iov, n_iov,
1473                                                    long_op);
1474                                 cifsFileInfo_put(open_file);
1475                                 cifs_update_eof(cifsi, offset, bytes_written);
1476                         }
1477
1478                         if (rc || bytes_written < bytes_to_write) {
1479                                 cERROR(1, "Write2 ret %d, wrote %d",
1480                                           rc, bytes_written);
1481                                 /* BB what if continued retry is
1482                                    requested via mount flags? */
1483                                 if (rc == -ENOSPC)
1484                                         set_bit(AS_ENOSPC, &mapping->flags);
1485                                 else
1486                                         set_bit(AS_EIO, &mapping->flags);
1487                         } else {
1488                                 cifs_stats_bytes_written(tcon, bytes_written);
1489                         }
1490
1491                         for (i = 0; i < n_iov; i++) {
1492                                 page = pvec.pages[first + i];
1493                                 /* Should we also set page error on
1494                                 success rc but too little data written? */
1495                                 /* BB investigate retry logic on temporary
1496                                 server crash cases and how recovery works
1497                                 when page marked as error */
1498                                 if (rc)
1499                                         SetPageError(page);
1500                                 kunmap(page);
1501                                 unlock_page(page);
1502                                 end_page_writeback(page);
1503                                 page_cache_release(page);
1504                         }
1505                         if ((wbc->nr_to_write -= n_iov) <= 0)
1506                                 done = 1;
1507                         index = next;
1508                 } else
1509                         /* Need to re-find the pages we skipped */
1510                         index = pvec.pages[0]->index + 1;
1511
1512                 pagevec_release(&pvec);
1513         }
1514         if (!scanned && !done) {
1515                 /*
1516                  * We hit the last page and there is more work to be done: wrap
1517                  * back to the start of the file
1518                  */
1519                 scanned = 1;
1520                 index = 0;
1521                 goto retry;
1522         }
1523         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1524                 mapping->writeback_index = index;
1525
1526         FreeXid(xid);
1527         kfree(iov);
1528         return rc;
1529 }
1530
1531 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1532 {
1533         int rc = -EFAULT;
1534         int xid;
1535
1536         xid = GetXid();
1537 /* BB add check for wbc flags */
1538         page_cache_get(page);
1539         if (!PageUptodate(page))
1540                 cFYI(1, "ppw - page not up to date");
1541
1542         /*
1543          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1544          *
1545          * A writepage() implementation always needs to do either this,
1546          * or re-dirty the page with "redirty_page_for_writepage()" in
1547          * the case of a failure.
1548          *
1549          * Just unlocking the page will cause the radix tree tag-bits
1550          * to fail to update with the state of the page correctly.
1551          */
1552         set_page_writeback(page);
1553         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1554         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1555         unlock_page(page);
1556         end_page_writeback(page);
1557         page_cache_release(page);
1558         FreeXid(xid);
1559         return rc;
1560 }
1561
1562 static int cifs_write_end(struct file *file, struct address_space *mapping,
1563                         loff_t pos, unsigned len, unsigned copied,
1564                         struct page *page, void *fsdata)
1565 {
1566         int rc;
1567         struct inode *inode = mapping->host;
1568
1569         cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1570                  page, pos, copied);
1571
1572         if (PageChecked(page)) {
1573                 if (copied == len)
1574                         SetPageUptodate(page);
1575                 ClearPageChecked(page);
1576         } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1577                 SetPageUptodate(page);
1578
1579         if (!PageUptodate(page)) {
1580                 char *page_data;
1581                 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1582                 int xid;
1583
1584                 xid = GetXid();
1585                 /* this is probably better than directly calling
1586                    partialpage_write since in this function the file handle is
1587                    known which we might as well leverage */
1588                 /* BB check if anything else missing out of ppw
1589                    such as updating last write time */
1590                 page_data = kmap(page);
1591                 rc = cifs_write(file->private_data, page_data + offset,
1592                                 copied, &pos);
1593                 /* if (rc < 0) should we set writebehind rc? */
1594                 kunmap(page);
1595
1596                 FreeXid(xid);
1597         } else {
1598                 rc = copied;
1599                 pos += copied;
1600                 set_page_dirty(page);
1601         }
1602
1603         if (rc > 0) {
1604                 spin_lock(&inode->i_lock);
1605                 if (pos > inode->i_size)
1606                         i_size_write(inode, pos);
1607                 spin_unlock(&inode->i_lock);
1608         }
1609
1610         unlock_page(page);
1611         page_cache_release(page);
1612
1613         return rc;
1614 }
1615
1616 int cifs_fsync(struct file *file, int datasync)
1617 {
1618         int xid;
1619         int rc = 0;
1620         struct cifsTconInfo *tcon;
1621         struct cifsFileInfo *smbfile = file->private_data;
1622         struct inode *inode = file->f_path.dentry->d_inode;
1623
1624         xid = GetXid();
1625
1626         cFYI(1, "Sync file - name: %s datasync: 0x%x",
1627                 file->f_path.dentry->d_name.name, datasync);
1628
1629         rc = filemap_write_and_wait(inode->i_mapping);
1630         if (rc == 0) {
1631                 rc = CIFS_I(inode)->write_behind_rc;
1632                 CIFS_I(inode)->write_behind_rc = 0;
1633                 tcon = tlink_tcon(smbfile->tlink);
1634                 if (!rc && tcon && smbfile &&
1635                    !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1636                         rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1637         }
1638
1639         FreeXid(xid);
1640         return rc;
1641 }
1642
1643 /* static void cifs_sync_page(struct page *page)
1644 {
1645         struct address_space *mapping;
1646         struct inode *inode;
1647         unsigned long index = page->index;
1648         unsigned int rpages = 0;
1649         int rc = 0;
1650
1651         cFYI(1, "sync page %p", page);
1652         mapping = page->mapping;
1653         if (!mapping)
1654                 return 0;
1655         inode = mapping->host;
1656         if (!inode)
1657                 return; */
1658
1659 /*      fill in rpages then
1660         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1661
1662 /*      cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1663
1664 #if 0
1665         if (rc < 0)
1666                 return rc;
1667         return 0;
1668 #endif
1669 } */
1670
1671 /*
1672  * As file closes, flush all cached write data for this inode checking
1673  * for write behind errors.
1674  */
1675 int cifs_flush(struct file *file, fl_owner_t id)
1676 {
1677         struct inode *inode = file->f_path.dentry->d_inode;
1678         int rc = 0;
1679
1680         /* Rather than do the steps manually:
1681            lock the inode for writing
1682            loop through pages looking for write behind data (dirty pages)
1683            coalesce into contiguous 16K (or smaller) chunks to write to server
1684            send to server (prefer in parallel)
1685            deal with writebehind errors
1686            unlock inode for writing
1687            filemapfdatawrite appears easier for the time being */
1688
1689         rc = filemap_fdatawrite(inode->i_mapping);
1690         /* reset wb rc if we were able to write out dirty pages */
1691         if (!rc) {
1692                 rc = CIFS_I(inode)->write_behind_rc;
1693                 CIFS_I(inode)->write_behind_rc = 0;
1694         }
1695
1696         cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1697
1698         return rc;
1699 }
1700
1701 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1702         size_t read_size, loff_t *poffset)
1703 {
1704         int rc = -EACCES;
1705         unsigned int bytes_read = 0;
1706         unsigned int total_read = 0;
1707         unsigned int current_read_size;
1708         struct cifs_sb_info *cifs_sb;
1709         struct cifsTconInfo *pTcon;
1710         int xid;
1711         struct cifsFileInfo *open_file;
1712         char *smb_read_data;
1713         char __user *current_offset;
1714         struct smb_com_read_rsp *pSMBr;
1715
1716         xid = GetXid();
1717         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1718
1719         if (file->private_data == NULL) {
1720                 rc = -EBADF;
1721                 FreeXid(xid);
1722                 return rc;
1723         }
1724         open_file = file->private_data;
1725         pTcon = tlink_tcon(open_file->tlink);
1726
1727         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1728                 cFYI(1, "attempting read on write only file instance");
1729
1730         for (total_read = 0, current_offset = read_data;
1731              read_size > total_read;
1732              total_read += bytes_read, current_offset += bytes_read) {
1733                 current_read_size = min_t(const int, read_size - total_read,
1734                                           cifs_sb->rsize);
1735                 rc = -EAGAIN;
1736                 smb_read_data = NULL;
1737                 while (rc == -EAGAIN) {
1738                         int buf_type = CIFS_NO_BUFFER;
1739                         if (open_file->invalidHandle) {
1740                                 rc = cifs_reopen_file(open_file, true);
1741                                 if (rc != 0)
1742                                         break;
1743                         }
1744                         rc = CIFSSMBRead(xid, pTcon,
1745                                          open_file->netfid,
1746                                          current_read_size, *poffset,
1747                                          &bytes_read, &smb_read_data,
1748                                          &buf_type);
1749                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1750                         if (smb_read_data) {
1751                                 if (copy_to_user(current_offset,
1752                                                 smb_read_data +
1753                                                 4 /* RFC1001 length field */ +
1754                                                 le16_to_cpu(pSMBr->DataOffset),
1755                                                 bytes_read))
1756                                         rc = -EFAULT;
1757
1758                                 if (buf_type == CIFS_SMALL_BUFFER)
1759                                         cifs_small_buf_release(smb_read_data);
1760                                 else if (buf_type == CIFS_LARGE_BUFFER)
1761                                         cifs_buf_release(smb_read_data);
1762                                 smb_read_data = NULL;
1763                         }
1764                 }
1765                 if (rc || (bytes_read == 0)) {
1766                         if (total_read) {
1767                                 break;
1768                         } else {
1769                                 FreeXid(xid);
1770                                 return rc;
1771                         }
1772                 } else {
1773                         cifs_stats_bytes_read(pTcon, bytes_read);
1774                         *poffset += bytes_read;
1775                 }
1776         }
1777         FreeXid(xid);
1778         return total_read;
1779 }
1780
1781
1782 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1783         loff_t *poffset)
1784 {
1785         int rc = -EACCES;
1786         unsigned int bytes_read = 0;
1787         unsigned int total_read;
1788         unsigned int current_read_size;
1789         struct cifs_sb_info *cifs_sb;
1790         struct cifsTconInfo *pTcon;
1791         int xid;
1792         char *current_offset;
1793         struct cifsFileInfo *open_file;
1794         int buf_type = CIFS_NO_BUFFER;
1795
1796         xid = GetXid();
1797         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1798
1799         if (file->private_data == NULL) {
1800                 rc = -EBADF;
1801                 FreeXid(xid);
1802                 return rc;
1803         }
1804         open_file = file->private_data;
1805         pTcon = tlink_tcon(open_file->tlink);
1806
1807         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1808                 cFYI(1, "attempting read on write only file instance");
1809
1810         for (total_read = 0, current_offset = read_data;
1811              read_size > total_read;
1812              total_read += bytes_read, current_offset += bytes_read) {
1813                 current_read_size = min_t(const int, read_size - total_read,
1814                                           cifs_sb->rsize);
1815                 /* For windows me and 9x we do not want to request more
1816                 than it negotiated since it will refuse the read then */
1817                 if ((pTcon->ses) &&
1818                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1819                         current_read_size = min_t(const int, current_read_size,
1820                                         pTcon->ses->server->maxBuf - 128);
1821                 }
1822                 rc = -EAGAIN;
1823                 while (rc == -EAGAIN) {
1824                         if (open_file->invalidHandle) {
1825                                 rc = cifs_reopen_file(open_file, true);
1826                                 if (rc != 0)
1827                                         break;
1828                         }
1829                         rc = CIFSSMBRead(xid, pTcon,
1830                                          open_file->netfid,
1831                                          current_read_size, *poffset,
1832                                          &bytes_read, &current_offset,
1833                                          &buf_type);
1834                 }
1835                 if (rc || (bytes_read == 0)) {
1836                         if (total_read) {
1837                                 break;
1838                         } else {
1839                                 FreeXid(xid);
1840                                 return rc;
1841                         }
1842                 } else {
1843                         cifs_stats_bytes_read(pTcon, total_read);
1844                         *poffset += bytes_read;
1845                 }
1846         }
1847         FreeXid(xid);
1848         return total_read;
1849 }
1850
1851 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1852 {
1853         int rc, xid;
1854
1855         xid = GetXid();
1856         rc = cifs_revalidate_file(file);
1857         if (rc) {
1858                 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1859                 FreeXid(xid);
1860                 return rc;
1861         }
1862         rc = generic_file_mmap(file, vma);
1863         FreeXid(xid);
1864         return rc;
1865 }
1866
1867
1868 static void cifs_copy_cache_pages(struct address_space *mapping,
1869         struct list_head *pages, int bytes_read, char *data)
1870 {
1871         struct page *page;
1872         char *target;
1873
1874         while (bytes_read > 0) {
1875                 if (list_empty(pages))
1876                         break;
1877
1878                 page = list_entry(pages->prev, struct page, lru);
1879                 list_del(&page->lru);
1880
1881                 if (add_to_page_cache_lru(page, mapping, page->index,
1882                                       GFP_KERNEL)) {
1883                         page_cache_release(page);
1884                         cFYI(1, "Add page cache failed");
1885                         data += PAGE_CACHE_SIZE;
1886                         bytes_read -= PAGE_CACHE_SIZE;
1887                         continue;
1888                 }
1889                 page_cache_release(page);
1890
1891                 target = kmap_atomic(page, KM_USER0);
1892
1893                 if (PAGE_CACHE_SIZE > bytes_read) {
1894                         memcpy(target, data, bytes_read);
1895                         /* zero the tail end of this partial page */
1896                         memset(target + bytes_read, 0,
1897                                PAGE_CACHE_SIZE - bytes_read);
1898                         bytes_read = 0;
1899                 } else {
1900                         memcpy(target, data, PAGE_CACHE_SIZE);
1901                         bytes_read -= PAGE_CACHE_SIZE;
1902                 }
1903                 kunmap_atomic(target, KM_USER0);
1904
1905                 flush_dcache_page(page);
1906                 SetPageUptodate(page);
1907                 unlock_page(page);
1908                 data += PAGE_CACHE_SIZE;
1909
1910                 /* add page to FS-Cache */
1911                 cifs_readpage_to_fscache(mapping->host, page);
1912         }
1913         return;
1914 }
1915
1916 static int cifs_readpages(struct file *file, struct address_space *mapping,
1917         struct list_head *page_list, unsigned num_pages)
1918 {
1919         int rc = -EACCES;
1920         int xid;
1921         loff_t offset;
1922         struct page *page;
1923         struct cifs_sb_info *cifs_sb;
1924         struct cifsTconInfo *pTcon;
1925         unsigned int bytes_read = 0;
1926         unsigned int read_size, i;
1927         char *smb_read_data = NULL;
1928         struct smb_com_read_rsp *pSMBr;
1929         struct cifsFileInfo *open_file;
1930         int buf_type = CIFS_NO_BUFFER;
1931
1932         xid = GetXid();
1933         if (file->private_data == NULL) {
1934                 rc = -EBADF;
1935                 FreeXid(xid);
1936                 return rc;
1937         }
1938         open_file = file->private_data;
1939         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1940         pTcon = tlink_tcon(open_file->tlink);
1941
1942         /*
1943          * Reads as many pages as possible from fscache. Returns -ENOBUFS
1944          * immediately if the cookie is negative
1945          */
1946         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
1947                                          &num_pages);
1948         if (rc == 0)
1949                 goto read_complete;
1950
1951         cFYI(DBG2, "rpages: num pages %d", num_pages);
1952         for (i = 0; i < num_pages; ) {
1953                 unsigned contig_pages;
1954                 struct page *tmp_page;
1955                 unsigned long expected_index;
1956
1957                 if (list_empty(page_list))
1958                         break;
1959
1960                 page = list_entry(page_list->prev, struct page, lru);
1961                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1962
1963                 /* count adjacent pages that we will read into */
1964                 contig_pages = 0;
1965                 expected_index =
1966                         list_entry(page_list->prev, struct page, lru)->index;
1967                 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1968                         if (tmp_page->index == expected_index) {
1969                                 contig_pages++;
1970                                 expected_index++;
1971                         } else
1972                                 break;
1973                 }
1974                 if (contig_pages + i >  num_pages)
1975                         contig_pages = num_pages - i;
1976
1977                 /* for reads over a certain size could initiate async
1978                    read ahead */
1979
1980                 read_size = contig_pages * PAGE_CACHE_SIZE;
1981                 /* Read size needs to be in multiples of one page */
1982                 read_size = min_t(const unsigned int, read_size,
1983                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1984                 cFYI(DBG2, "rpages: read size 0x%x  contiguous pages %d",
1985                                 read_size, contig_pages);
1986                 rc = -EAGAIN;
1987                 while (rc == -EAGAIN) {
1988                         if (open_file->invalidHandle) {
1989                                 rc = cifs_reopen_file(open_file, true);
1990                                 if (rc != 0)
1991                                         break;
1992                         }
1993
1994                         rc = CIFSSMBRead(xid, pTcon,
1995                                          open_file->netfid,
1996                                          read_size, offset,
1997                                          &bytes_read, &smb_read_data,
1998                                          &buf_type);
1999                         /* BB more RC checks ? */
2000                         if (rc == -EAGAIN) {
2001                                 if (smb_read_data) {
2002                                         if (buf_type == CIFS_SMALL_BUFFER)
2003                                                 cifs_small_buf_release(smb_read_data);
2004                                         else if (buf_type == CIFS_LARGE_BUFFER)
2005                                                 cifs_buf_release(smb_read_data);
2006                                         smb_read_data = NULL;
2007                                 }
2008                         }
2009                 }
2010                 if ((rc < 0) || (smb_read_data == NULL)) {
2011                         cFYI(1, "Read error in readpages: %d", rc);
2012                         break;
2013                 } else if (bytes_read > 0) {
2014                         task_io_account_read(bytes_read);
2015                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2016                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
2017                                 smb_read_data + 4 /* RFC1001 hdr */ +
2018                                 le16_to_cpu(pSMBr->DataOffset));
2019
2020                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
2021                         cifs_stats_bytes_read(pTcon, bytes_read);
2022                         if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2023                                 i++; /* account for partial page */
2024
2025                                 /* server copy of file can have smaller size
2026                                    than client */
2027                                 /* BB do we need to verify this common case ?
2028                                    this case is ok - if we are at server EOF
2029                                    we will hit it on next read */
2030
2031                                 /* break; */
2032                         }
2033                 } else {
2034                         cFYI(1, "No bytes read (%d) at offset %lld . "
2035                                 "Cleaning remaining pages from readahead list",
2036                                 bytes_read, offset);
2037                         /* BB turn off caching and do new lookup on
2038                            file size at server? */
2039                         break;
2040                 }
2041                 if (smb_read_data) {
2042                         if (buf_type == CIFS_SMALL_BUFFER)
2043                                 cifs_small_buf_release(smb_read_data);
2044                         else if (buf_type == CIFS_LARGE_BUFFER)
2045                                 cifs_buf_release(smb_read_data);
2046                         smb_read_data = NULL;
2047                 }
2048                 bytes_read = 0;
2049         }
2050
2051 /* need to free smb_read_data buf before exit */
2052         if (smb_read_data) {
2053                 if (buf_type == CIFS_SMALL_BUFFER)
2054                         cifs_small_buf_release(smb_read_data);
2055                 else if (buf_type == CIFS_LARGE_BUFFER)
2056                         cifs_buf_release(smb_read_data);
2057                 smb_read_data = NULL;
2058         }
2059
2060 read_complete:
2061         FreeXid(xid);
2062         return rc;
2063 }
2064
2065 static int cifs_readpage_worker(struct file *file, struct page *page,
2066         loff_t *poffset)
2067 {
2068         char *read_data;
2069         int rc;
2070
2071         /* Is the page cached? */
2072         rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2073         if (rc == 0)
2074                 goto read_complete;
2075
2076         page_cache_get(page);
2077         read_data = kmap(page);
2078         /* for reads over a certain size could initiate async read ahead */
2079
2080         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2081
2082         if (rc < 0)
2083                 goto io_error;
2084         else
2085                 cFYI(1, "Bytes read %d", rc);
2086
2087         file->f_path.dentry->d_inode->i_atime =
2088                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2089
2090         if (PAGE_CACHE_SIZE > rc)
2091                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2092
2093         flush_dcache_page(page);
2094         SetPageUptodate(page);
2095
2096         /* send this page to the cache */
2097         cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2098
2099         rc = 0;
2100
2101 io_error:
2102         kunmap(page);
2103         page_cache_release(page);
2104
2105 read_complete:
2106         return rc;
2107 }
2108
2109 static int cifs_readpage(struct file *file, struct page *page)
2110 {
2111         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2112         int rc = -EACCES;
2113         int xid;
2114
2115         xid = GetXid();
2116
2117         if (file->private_data == NULL) {
2118                 rc = -EBADF;
2119                 FreeXid(xid);
2120                 return rc;
2121         }
2122
2123         cFYI(1, "readpage %p at offset %d 0x%x\n",
2124                  page, (int)offset, (int)offset);
2125
2126         rc = cifs_readpage_worker(file, page, &offset);
2127
2128         unlock_page(page);
2129
2130         FreeXid(xid);
2131         return rc;
2132 }
2133
2134 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2135 {
2136         struct cifsFileInfo *open_file;
2137
2138         spin_lock(&cifs_file_list_lock);
2139         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2140                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2141                         spin_unlock(&cifs_file_list_lock);
2142                         return 1;
2143                 }
2144         }
2145         spin_unlock(&cifs_file_list_lock);
2146         return 0;
2147 }
2148
2149 /* We do not want to update the file size from server for inodes
2150    open for write - to avoid races with writepage extending
2151    the file - in the future we could consider allowing
2152    refreshing the inode only on increases in the file size
2153    but this is tricky to do without racing with writebehind
2154    page caching in the current Linux kernel design */
2155 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2156 {
2157         if (!cifsInode)
2158                 return true;
2159
2160         if (is_inode_writable(cifsInode)) {
2161                 /* This inode is open for write at least once */
2162                 struct cifs_sb_info *cifs_sb;
2163
2164                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2165                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2166                         /* since no page cache to corrupt on directio
2167                         we can change size safely */
2168                         return true;
2169                 }
2170
2171                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2172                         return true;
2173
2174                 return false;
2175         } else
2176                 return true;
2177 }
2178
2179 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2180                         loff_t pos, unsigned len, unsigned flags,
2181                         struct page **pagep, void **fsdata)
2182 {
2183         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2184         loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2185         loff_t page_start = pos & PAGE_MASK;
2186         loff_t i_size;
2187         struct page *page;
2188         int rc = 0;
2189
2190         cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2191
2192         page = grab_cache_page_write_begin(mapping, index, flags);
2193         if (!page) {
2194                 rc = -ENOMEM;
2195                 goto out;
2196         }
2197
2198         if (PageUptodate(page))
2199                 goto out;
2200
2201         /*
2202          * If we write a full page it will be up to date, no need to read from
2203          * the server. If the write is short, we'll end up doing a sync write
2204          * instead.
2205          */
2206         if (len == PAGE_CACHE_SIZE)
2207                 goto out;
2208
2209         /*
2210          * optimize away the read when we have an oplock, and we're not
2211          * expecting to use any of the data we'd be reading in. That
2212          * is, when the page lies beyond the EOF, or straddles the EOF
2213          * and the write will cover all of the existing data.
2214          */
2215         if (CIFS_I(mapping->host)->clientCanCacheRead) {
2216                 i_size = i_size_read(mapping->host);
2217                 if (page_start >= i_size ||
2218                     (offset == 0 && (pos + len) >= i_size)) {
2219                         zero_user_segments(page, 0, offset,
2220                                            offset + len,
2221                                            PAGE_CACHE_SIZE);
2222                         /*
2223                          * PageChecked means that the parts of the page
2224                          * to which we're not writing are considered up
2225                          * to date. Once the data is copied to the
2226                          * page, it can be set uptodate.
2227                          */
2228                         SetPageChecked(page);
2229                         goto out;
2230                 }
2231         }
2232
2233         if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2234                 /*
2235                  * might as well read a page, it is fast enough. If we get
2236                  * an error, we don't need to return it. cifs_write_end will
2237                  * do a sync write instead since PG_uptodate isn't set.
2238                  */
2239                 cifs_readpage_worker(file, page, &page_start);
2240         } else {
2241                 /* we could try using another file handle if there is one -
2242                    but how would we lock it to prevent close of that handle
2243                    racing with this read? In any case
2244                    this will be written out by write_end so is fine */
2245         }
2246 out:
2247         *pagep = page;
2248         return rc;
2249 }
2250
2251 static int cifs_release_page(struct page *page, gfp_t gfp)
2252 {
2253         if (PagePrivate(page))
2254                 return 0;
2255
2256         return cifs_fscache_release_page(page, gfp);
2257 }
2258
2259 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2260 {
2261         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2262
2263         if (offset == 0)
2264                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2265 }
2266
2267 void cifs_oplock_break(struct work_struct *work)
2268 {
2269         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2270                                                   oplock_break);
2271         struct inode *inode = cfile->dentry->d_inode;
2272         struct cifsInodeInfo *cinode = CIFS_I(inode);
2273         int rc, waitrc = 0;
2274
2275         if (inode && S_ISREG(inode->i_mode)) {
2276                 if (cinode->clientCanCacheRead)
2277                         break_lease(inode, O_RDONLY);
2278                 else
2279                         break_lease(inode, O_WRONLY);
2280                 rc = filemap_fdatawrite(inode->i_mapping);
2281                 if (cinode->clientCanCacheRead == 0) {
2282                         waitrc = filemap_fdatawait(inode->i_mapping);
2283                         invalidate_remote_inode(inode);
2284                 }
2285                 if (!rc)
2286                         rc = waitrc;
2287                 if (rc)
2288                         cinode->write_behind_rc = rc;
2289                 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2290         }
2291
2292         /*
2293          * releasing stale oplock after recent reconnect of smb session using
2294          * a now incorrect file handle is not a data integrity issue but do
2295          * not bother sending an oplock release if session to server still is
2296          * disconnected since oplock already released by the server
2297          */
2298         if (!cfile->oplock_break_cancelled) {
2299                 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2300                                  0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
2301                 cFYI(1, "Oplock release rc = %d", rc);
2302         }
2303
2304         /*
2305          * We might have kicked in before is_valid_oplock_break()
2306          * finished grabbing reference for us.  Make sure it's done by
2307          * waiting for GlobalSMSSeslock.
2308          */
2309         spin_lock(&cifs_file_list_lock);
2310         spin_unlock(&cifs_file_list_lock);
2311
2312         cifs_oplock_break_put(cfile);
2313 }
2314
2315 void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2316 {
2317         cifs_sb_active(cfile->dentry->d_sb);
2318         cifsFileInfo_get(cfile);
2319 }
2320
2321 void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2322 {
2323         cifsFileInfo_put(cfile);
2324         cifs_sb_deactive(cfile->dentry->d_sb);
2325 }
2326
2327 const struct address_space_operations cifs_addr_ops = {
2328         .readpage = cifs_readpage,
2329         .readpages = cifs_readpages,
2330         .writepage = cifs_writepage,
2331         .writepages = cifs_writepages,
2332         .write_begin = cifs_write_begin,
2333         .write_end = cifs_write_end,
2334         .set_page_dirty = __set_page_dirty_nobuffers,
2335         .releasepage = cifs_release_page,
2336         .invalidatepage = cifs_invalidate_page,
2337         /* .sync_page = cifs_sync_page, */
2338         /* .direct_IO = */
2339 };
2340
2341 /*
2342  * cifs_readpages requires the server to support a buffer large enough to
2343  * contain the header plus one complete page of data.  Otherwise, we need
2344  * to leave cifs_readpages out of the address space operations.
2345  */
2346 const struct address_space_operations cifs_addr_ops_smallbuf = {
2347         .readpage = cifs_readpage,
2348         .writepage = cifs_writepage,
2349         .writepages = cifs_writepages,
2350         .write_begin = cifs_write_begin,
2351         .write_end = cifs_write_end,
2352         .set_page_dirty = __set_page_dirty_nobuffers,
2353         .releasepage = cifs_release_page,
2354         .invalidatepage = cifs_invalidate_page,
2355         /* .sync_page = cifs_sync_page, */
2356         /* .direct_IO = */
2357 };