398a15a99a1b61e67995a6768eab9f2a21ce079d
[pandora-kernel.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <asm/div64.h>
36 #include "cifsfs.h"
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_unicode.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "fscache.h"
44
45 static inline int cifs_convert_flags(unsigned int flags)
46 {
47         if ((flags & O_ACCMODE) == O_RDONLY)
48                 return GENERIC_READ;
49         else if ((flags & O_ACCMODE) == O_WRONLY)
50                 return GENERIC_WRITE;
51         else if ((flags & O_ACCMODE) == O_RDWR) {
52                 /* GENERIC_ALL is too much permission to request
53                    can cause unnecessary access denied on create */
54                 /* return GENERIC_ALL; */
55                 return (GENERIC_READ | GENERIC_WRITE);
56         }
57
58         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60                 FILE_READ_DATA);
61 }
62
63 static u32 cifs_posix_convert_flags(unsigned int flags)
64 {
65         u32 posix_flags = 0;
66
67         if ((flags & O_ACCMODE) == O_RDONLY)
68                 posix_flags = SMB_O_RDONLY;
69         else if ((flags & O_ACCMODE) == O_WRONLY)
70                 posix_flags = SMB_O_WRONLY;
71         else if ((flags & O_ACCMODE) == O_RDWR)
72                 posix_flags = SMB_O_RDWR;
73
74         if (flags & O_CREAT)
75                 posix_flags |= SMB_O_CREAT;
76         if (flags & O_EXCL)
77                 posix_flags |= SMB_O_EXCL;
78         if (flags & O_TRUNC)
79                 posix_flags |= SMB_O_TRUNC;
80         /* be safe and imply O_SYNC for O_DSYNC */
81         if (flags & O_DSYNC)
82                 posix_flags |= SMB_O_SYNC;
83         if (flags & O_DIRECTORY)
84                 posix_flags |= SMB_O_DIRECTORY;
85         if (flags & O_NOFOLLOW)
86                 posix_flags |= SMB_O_NOFOLLOW;
87         if (flags & O_DIRECT)
88                 posix_flags |= SMB_O_DIRECT;
89
90         return posix_flags;
91 }
92
93 static inline int cifs_get_disposition(unsigned int flags)
94 {
95         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96                 return FILE_CREATE;
97         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98                 return FILE_OVERWRITE_IF;
99         else if ((flags & O_CREAT) == O_CREAT)
100                 return FILE_OPEN_IF;
101         else if ((flags & O_TRUNC) == O_TRUNC)
102                 return FILE_OVERWRITE;
103         else
104                 return FILE_OPEN;
105 }
106
107 static inline int cifs_open_inode_helper(struct inode *inode,
108         struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
109         char *full_path, int xid)
110 {
111         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
112         struct timespec temp;
113         int rc;
114
115         if (pCifsInode->clientCanCacheRead) {
116                 /* we have the inode open somewhere else
117                    no need to discard cache data */
118                 goto client_can_cache;
119         }
120
121         /* BB need same check in cifs_create too? */
122         /* if not oplocked, invalidate inode pages if mtime or file
123            size changed */
124         temp = cifs_NTtimeToUnix(buf->LastWriteTime);
125         if (timespec_equal(&inode->i_mtime, &temp) &&
126                            (inode->i_size ==
127                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
128                 cFYI(1, "inode unchanged on server");
129         } else {
130                 if (inode->i_mapping) {
131                         /* BB no need to lock inode until after invalidate
132                         since namei code should already have it locked? */
133                         rc = filemap_write_and_wait(inode->i_mapping);
134                         mapping_set_error(inode->i_mapping, rc);
135                 }
136                 cFYI(1, "invalidating remote inode since open detected it "
137                          "changed");
138                 invalidate_remote_inode(inode);
139         }
140
141 client_can_cache:
142         if (pTcon->unix_ext)
143                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
144                                               xid);
145         else
146                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
147                                          xid, NULL);
148
149         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
150                 pCifsInode->clientCanCacheAll = true;
151                 pCifsInode->clientCanCacheRead = true;
152                 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
153         } else if ((oplock & 0xF) == OPLOCK_READ)
154                 pCifsInode->clientCanCacheRead = true;
155
156         return rc;
157 }
158
159 int cifs_posix_open(char *full_path, struct inode **pinode,
160                         struct super_block *sb, int mode, unsigned int f_flags,
161                         __u32 *poplock, __u16 *pnetfid, int xid)
162 {
163         int rc;
164         FILE_UNIX_BASIC_INFO *presp_data;
165         __u32 posix_flags = 0;
166         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
167         struct cifs_fattr fattr;
168         struct tcon_link *tlink;
169         struct cifsTconInfo *tcon;
170
171         cFYI(1, "posix open %s", full_path);
172
173         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
174         if (presp_data == NULL)
175                 return -ENOMEM;
176
177         tlink = cifs_sb_tlink(cifs_sb);
178         if (IS_ERR(tlink)) {
179                 rc = PTR_ERR(tlink);
180                 goto posix_open_ret;
181         }
182
183         tcon = tlink_tcon(tlink);
184         mode &= ~current_umask();
185
186         posix_flags = cifs_posix_convert_flags(f_flags);
187         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
188                              poplock, full_path, cifs_sb->local_nls,
189                              cifs_sb->mnt_cifs_flags &
190                                         CIFS_MOUNT_MAP_SPECIAL_CHR);
191         cifs_put_tlink(tlink);
192
193         if (rc)
194                 goto posix_open_ret;
195
196         if (presp_data->Type == cpu_to_le32(-1))
197                 goto posix_open_ret; /* open ok, caller does qpathinfo */
198
199         if (!pinode)
200                 goto posix_open_ret; /* caller does not need info */
201
202         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
203
204         /* get new inode and set it up */
205         if (*pinode == NULL) {
206                 cifs_fill_uniqueid(sb, &fattr);
207                 *pinode = cifs_iget(sb, &fattr);
208                 if (!*pinode) {
209                         rc = -ENOMEM;
210                         goto posix_open_ret;
211                 }
212         } else {
213                 cifs_fattr_to_inode(*pinode, &fattr);
214         }
215
216 posix_open_ret:
217         kfree(presp_data);
218         return rc;
219 }
220
221 struct cifsFileInfo *
222 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
223                   struct tcon_link *tlink, __u32 oplock)
224 {
225         struct dentry *dentry = file->f_path.dentry;
226         struct inode *inode = dentry->d_inode;
227         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
228         struct cifsFileInfo *pCifsFile;
229
230         pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
231         if (pCifsFile == NULL)
232                 return pCifsFile;
233
234         pCifsFile->count = 1;
235         pCifsFile->netfid = fileHandle;
236         pCifsFile->pid = current->tgid;
237         pCifsFile->uid = current_fsuid();
238         pCifsFile->dentry = dget(dentry);
239         pCifsFile->f_flags = file->f_flags;
240         pCifsFile->invalidHandle = false;
241         pCifsFile->tlink = cifs_get_tlink(tlink);
242         mutex_init(&pCifsFile->fh_mutex);
243         mutex_init(&pCifsFile->lock_mutex);
244         INIT_LIST_HEAD(&pCifsFile->llist);
245         INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
246
247         spin_lock(&cifs_file_list_lock);
248         list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
249         /* if readable file instance put first in list*/
250         if (file->f_mode & FMODE_READ)
251                 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
252         else
253                 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
254         spin_unlock(&cifs_file_list_lock);
255
256         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
257                 pCifsInode->clientCanCacheAll = true;
258                 pCifsInode->clientCanCacheRead = true;
259                 cFYI(1, "Exclusive Oplock inode %p", inode);
260         } else if ((oplock & 0xF) == OPLOCK_READ)
261                 pCifsInode->clientCanCacheRead = true;
262
263         file->private_data = pCifsFile;
264         return pCifsFile;
265 }
266
267 /*
268  * Release a reference on the file private data. This may involve closing
269  * the filehandle out on the server. Must be called without holding
270  * cifs_file_list_lock.
271  */
272 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
273 {
274         struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
275         struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
276         struct cifsLockInfo *li, *tmp;
277
278         spin_lock(&cifs_file_list_lock);
279         if (--cifs_file->count > 0) {
280                 spin_unlock(&cifs_file_list_lock);
281                 return;
282         }
283
284         /* remove it from the lists */
285         list_del(&cifs_file->flist);
286         list_del(&cifs_file->tlist);
287
288         if (list_empty(&cifsi->openFileList)) {
289                 cFYI(1, "closing last open instance for inode %p",
290                         cifs_file->dentry->d_inode);
291                 cifsi->clientCanCacheRead = false;
292                 cifsi->clientCanCacheAll  = false;
293         }
294         spin_unlock(&cifs_file_list_lock);
295
296         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
297                 int xid, rc;
298
299                 xid = GetXid();
300                 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
301                 FreeXid(xid);
302         }
303
304         /* Delete any outstanding lock records. We'll lose them when the file
305          * is closed anyway.
306          */
307         mutex_lock(&cifs_file->lock_mutex);
308         list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
309                 list_del(&li->llist);
310                 kfree(li);
311         }
312         mutex_unlock(&cifs_file->lock_mutex);
313
314         cifs_put_tlink(cifs_file->tlink);
315         dput(cifs_file->dentry);
316         kfree(cifs_file);
317 }
318
319 int cifs_open(struct inode *inode, struct file *file)
320 {
321         int rc = -EACCES;
322         int xid;
323         __u32 oplock;
324         struct cifs_sb_info *cifs_sb;
325         struct cifsTconInfo *tcon;
326         struct tcon_link *tlink;
327         struct cifsFileInfo *pCifsFile = NULL;
328         struct cifsInodeInfo *pCifsInode;
329         char *full_path = NULL;
330         int desiredAccess;
331         int disposition;
332         __u16 netfid;
333         FILE_ALL_INFO *buf = NULL;
334
335         xid = GetXid();
336
337         cifs_sb = CIFS_SB(inode->i_sb);
338         tlink = cifs_sb_tlink(cifs_sb);
339         if (IS_ERR(tlink)) {
340                 FreeXid(xid);
341                 return PTR_ERR(tlink);
342         }
343         tcon = tlink_tcon(tlink);
344
345         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
346
347         full_path = build_path_from_dentry(file->f_path.dentry);
348         if (full_path == NULL) {
349                 rc = -ENOMEM;
350                 goto out;
351         }
352
353         cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
354                  inode, file->f_flags, full_path);
355
356         if (oplockEnabled)
357                 oplock = REQ_OPLOCK;
358         else
359                 oplock = 0;
360
361         if (!tcon->broken_posix_open && tcon->unix_ext &&
362             (tcon->ses->capabilities & CAP_UNIX) &&
363             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
364                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
365                 /* can not refresh inode info since size could be stale */
366                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
367                                 cifs_sb->mnt_file_mode /* ignored */,
368                                 file->f_flags, &oplock, &netfid, xid);
369                 if (rc == 0) {
370                         cFYI(1, "posix open succeeded");
371
372                         pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
373                                                       oplock);
374                         if (pCifsFile == NULL) {
375                                 CIFSSMBClose(xid, tcon, netfid);
376                                 rc = -ENOMEM;
377                         }
378
379                         cifs_fscache_set_inode_cookie(inode, file);
380
381                         goto out;
382                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
383                         if (tcon->ses->serverNOS)
384                                 cERROR(1, "server %s of type %s returned"
385                                            " unexpected error on SMB posix open"
386                                            ", disabling posix open support."
387                                            " Check if server update available.",
388                                            tcon->ses->serverName,
389                                            tcon->ses->serverNOS);
390                         tcon->broken_posix_open = true;
391                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
392                          (rc != -EOPNOTSUPP)) /* path not found or net err */
393                         goto out;
394                 /* else fallthrough to retry open the old way on network i/o
395                    or DFS errors */
396         }
397
398         desiredAccess = cifs_convert_flags(file->f_flags);
399
400 /*********************************************************************
401  *  open flag mapping table:
402  *
403  *      POSIX Flag            CIFS Disposition
404  *      ----------            ----------------
405  *      O_CREAT               FILE_OPEN_IF
406  *      O_CREAT | O_EXCL      FILE_CREATE
407  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
408  *      O_TRUNC               FILE_OVERWRITE
409  *      none of the above     FILE_OPEN
410  *
411  *      Note that there is not a direct match between disposition
412  *      FILE_SUPERSEDE (ie create whether or not file exists although
413  *      O_CREAT | O_TRUNC is similar but truncates the existing
414  *      file rather than creating a new file as FILE_SUPERSEDE does
415  *      (which uses the attributes / metadata passed in on open call)
416  *?
417  *?  O_SYNC is a reasonable match to CIFS writethrough flag
418  *?  and the read write flags match reasonably.  O_LARGEFILE
419  *?  is irrelevant because largefile support is always used
420  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
421  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
422  *********************************************************************/
423
424         disposition = cifs_get_disposition(file->f_flags);
425
426         /* BB pass O_SYNC flag through on file attributes .. BB */
427
428         /* Also refresh inode by passing in file_info buf returned by SMBOpen
429            and calling get_inode_info with returned buf (at least helps
430            non-Unix server case) */
431
432         /* BB we can not do this if this is the second open of a file
433            and the first handle has writebehind data, we might be
434            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
435         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
436         if (!buf) {
437                 rc = -ENOMEM;
438                 goto out;
439         }
440
441         if (tcon->ses->capabilities & CAP_NT_SMBS)
442                 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
443                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
444                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
445                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
446         else
447                 rc = -EIO; /* no NT SMB support fall into legacy open below */
448
449         if (rc == -EIO) {
450                 /* Old server, try legacy style OpenX */
451                 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
452                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
453                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
454                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
455         }
456         if (rc) {
457                 cFYI(1, "cifs_open returned 0x%x", rc);
458                 goto out;
459         }
460
461         rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
462         if (rc != 0)
463                 goto out;
464
465         pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
466         if (pCifsFile == NULL) {
467                 rc = -ENOMEM;
468                 goto out;
469         }
470
471         cifs_fscache_set_inode_cookie(inode, file);
472
473         if (oplock & CIFS_CREATE_ACTION) {
474                 /* time to set mode which we can not set earlier due to
475                    problems creating new read-only files */
476                 if (tcon->unix_ext) {
477                         struct cifs_unix_set_info_args args = {
478                                 .mode   = inode->i_mode,
479                                 .uid    = NO_CHANGE_64,
480                                 .gid    = NO_CHANGE_64,
481                                 .ctime  = NO_CHANGE_64,
482                                 .atime  = NO_CHANGE_64,
483                                 .mtime  = NO_CHANGE_64,
484                                 .device = 0,
485                         };
486                         CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
487                                                cifs_sb->local_nls,
488                                                cifs_sb->mnt_cifs_flags &
489                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
490                 }
491         }
492
493 out:
494         kfree(buf);
495         kfree(full_path);
496         FreeXid(xid);
497         cifs_put_tlink(tlink);
498         return rc;
499 }
500
501 /* Try to reacquire byte range locks that were released when session */
502 /* to server was lost */
503 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
504 {
505         int rc = 0;
506
507 /* BB list all locks open on this file and relock */
508
509         return rc;
510 }
511
512 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
513 {
514         int rc = -EACCES;
515         int xid;
516         __u32 oplock;
517         struct cifs_sb_info *cifs_sb;
518         struct cifsTconInfo *tcon;
519         struct cifsInodeInfo *pCifsInode;
520         struct inode *inode;
521         char *full_path = NULL;
522         int desiredAccess;
523         int disposition = FILE_OPEN;
524         __u16 netfid;
525
526         xid = GetXid();
527         mutex_lock(&pCifsFile->fh_mutex);
528         if (!pCifsFile->invalidHandle) {
529                 mutex_unlock(&pCifsFile->fh_mutex);
530                 rc = 0;
531                 FreeXid(xid);
532                 return rc;
533         }
534
535         inode = pCifsFile->dentry->d_inode;
536         cifs_sb = CIFS_SB(inode->i_sb);
537         tcon = tlink_tcon(pCifsFile->tlink);
538
539 /* can not grab rename sem here because various ops, including
540    those that already have the rename sem can end up causing writepage
541    to get called and if the server was down that means we end up here,
542    and we can never tell if the caller already has the rename_sem */
543         full_path = build_path_from_dentry(pCifsFile->dentry);
544         if (full_path == NULL) {
545                 rc = -ENOMEM;
546                 mutex_unlock(&pCifsFile->fh_mutex);
547                 FreeXid(xid);
548                 return rc;
549         }
550
551         cFYI(1, "inode = 0x%p file flags 0x%x for %s",
552                  inode, pCifsFile->f_flags, full_path);
553
554         if (oplockEnabled)
555                 oplock = REQ_OPLOCK;
556         else
557                 oplock = 0;
558
559         if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
560             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
561                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
562
563                 /*
564                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
565                  * original open. Must mask them off for a reopen.
566                  */
567                 unsigned int oflags = pCifsFile->f_flags &
568                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
569
570                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
571                                 cifs_sb->mnt_file_mode /* ignored */,
572                                 oflags, &oplock, &netfid, xid);
573                 if (rc == 0) {
574                         cFYI(1, "posix reopen succeeded");
575                         goto reopen_success;
576                 }
577                 /* fallthrough to retry open the old way on errors, especially
578                    in the reconnect path it is important to retry hard */
579         }
580
581         desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
582
583         /* Can not refresh inode by passing in file_info buf to be returned
584            by SMBOpen and then calling get_inode_info with returned buf
585            since file might have write behind data that needs to be flushed
586            and server version of file size can be stale. If we knew for sure
587            that inode was not dirty locally we could do this */
588
589         rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
590                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
591                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
592                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
593         if (rc) {
594                 mutex_unlock(&pCifsFile->fh_mutex);
595                 cFYI(1, "cifs_open returned 0x%x", rc);
596                 cFYI(1, "oplock: %d", oplock);
597                 goto reopen_error_exit;
598         }
599
600 reopen_success:
601         pCifsFile->netfid = netfid;
602         pCifsFile->invalidHandle = false;
603         mutex_unlock(&pCifsFile->fh_mutex);
604         pCifsInode = CIFS_I(inode);
605
606         if (can_flush) {
607                 rc = filemap_write_and_wait(inode->i_mapping);
608                 mapping_set_error(inode->i_mapping, rc);
609
610                 pCifsInode->clientCanCacheAll = false;
611                 pCifsInode->clientCanCacheRead = false;
612                 if (tcon->unix_ext)
613                         rc = cifs_get_inode_info_unix(&inode,
614                                 full_path, inode->i_sb, xid);
615                 else
616                         rc = cifs_get_inode_info(&inode,
617                                 full_path, NULL, inode->i_sb,
618                                 xid, NULL);
619         } /* else we are writing out data to server already
620              and could deadlock if we tried to flush data, and
621              since we do not know if we have data that would
622              invalidate the current end of file on the server
623              we can not go to the server to get the new inod
624              info */
625         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
626                 pCifsInode->clientCanCacheAll = true;
627                 pCifsInode->clientCanCacheRead = true;
628                 cFYI(1, "Exclusive Oplock granted on inode %p",
629                          pCifsFile->dentry->d_inode);
630         } else if ((oplock & 0xF) == OPLOCK_READ) {
631                 pCifsInode->clientCanCacheRead = true;
632                 pCifsInode->clientCanCacheAll = false;
633         } else {
634                 pCifsInode->clientCanCacheRead = false;
635                 pCifsInode->clientCanCacheAll = false;
636         }
637         cifs_relock_file(pCifsFile);
638
639 reopen_error_exit:
640         kfree(full_path);
641         FreeXid(xid);
642         return rc;
643 }
644
645 int cifs_close(struct inode *inode, struct file *file)
646 {
647         cifsFileInfo_put(file->private_data);
648         file->private_data = NULL;
649
650         /* return code from the ->release op is always ignored */
651         return 0;
652 }
653
654 int cifs_closedir(struct inode *inode, struct file *file)
655 {
656         int rc = 0;
657         int xid;
658         struct cifsFileInfo *pCFileStruct = file->private_data;
659         char *ptmp;
660
661         cFYI(1, "Closedir inode = 0x%p", inode);
662
663         xid = GetXid();
664
665         if (pCFileStruct) {
666                 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
667
668                 cFYI(1, "Freeing private data in close dir");
669                 spin_lock(&cifs_file_list_lock);
670                 if (!pCFileStruct->srch_inf.endOfSearch &&
671                     !pCFileStruct->invalidHandle) {
672                         pCFileStruct->invalidHandle = true;
673                         spin_unlock(&cifs_file_list_lock);
674                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
675                         cFYI(1, "Closing uncompleted readdir with rc %d",
676                                  rc);
677                         /* not much we can do if it fails anyway, ignore rc */
678                         rc = 0;
679                 } else
680                         spin_unlock(&cifs_file_list_lock);
681                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
682                 if (ptmp) {
683                         cFYI(1, "closedir free smb buf in srch struct");
684                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
685                         if (pCFileStruct->srch_inf.smallBuf)
686                                 cifs_small_buf_release(ptmp);
687                         else
688                                 cifs_buf_release(ptmp);
689                 }
690                 cifs_put_tlink(pCFileStruct->tlink);
691                 kfree(file->private_data);
692                 file->private_data = NULL;
693         }
694         /* BB can we lock the filestruct while this is going on? */
695         FreeXid(xid);
696         return rc;
697 }
698
699 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
700                                 __u64 offset, __u8 lockType)
701 {
702         struct cifsLockInfo *li =
703                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
704         if (li == NULL)
705                 return -ENOMEM;
706         li->offset = offset;
707         li->length = len;
708         li->type = lockType;
709         mutex_lock(&fid->lock_mutex);
710         list_add(&li->llist, &fid->llist);
711         mutex_unlock(&fid->lock_mutex);
712         return 0;
713 }
714
715 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
716 {
717         int rc, xid;
718         __u32 numLock = 0;
719         __u32 numUnlock = 0;
720         __u64 length;
721         bool wait_flag = false;
722         struct cifs_sb_info *cifs_sb;
723         struct cifsTconInfo *tcon;
724         __u16 netfid;
725         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
726         bool posix_locking = 0;
727
728         length = 1 + pfLock->fl_end - pfLock->fl_start;
729         rc = -EACCES;
730         xid = GetXid();
731
732         cFYI(1, "Lock parm: 0x%x flockflags: "
733                  "0x%x flocktype: 0x%x start: %lld end: %lld",
734                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
735                 pfLock->fl_end);
736
737         if (pfLock->fl_flags & FL_POSIX)
738                 cFYI(1, "Posix");
739         if (pfLock->fl_flags & FL_FLOCK)
740                 cFYI(1, "Flock");
741         if (pfLock->fl_flags & FL_SLEEP) {
742                 cFYI(1, "Blocking lock");
743                 wait_flag = true;
744         }
745         if (pfLock->fl_flags & FL_ACCESS)
746                 cFYI(1, "Process suspended by mandatory locking - "
747                          "not implemented yet");
748         if (pfLock->fl_flags & FL_LEASE)
749                 cFYI(1, "Lease on file - not implemented yet");
750         if (pfLock->fl_flags &
751             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
752                 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
753
754         if (pfLock->fl_type == F_WRLCK) {
755                 cFYI(1, "F_WRLCK ");
756                 numLock = 1;
757         } else if (pfLock->fl_type == F_UNLCK) {
758                 cFYI(1, "F_UNLCK");
759                 numUnlock = 1;
760                 /* Check if unlock includes more than
761                 one lock range */
762         } else if (pfLock->fl_type == F_RDLCK) {
763                 cFYI(1, "F_RDLCK");
764                 lockType |= LOCKING_ANDX_SHARED_LOCK;
765                 numLock = 1;
766         } else if (pfLock->fl_type == F_EXLCK) {
767                 cFYI(1, "F_EXLCK");
768                 numLock = 1;
769         } else if (pfLock->fl_type == F_SHLCK) {
770                 cFYI(1, "F_SHLCK");
771                 lockType |= LOCKING_ANDX_SHARED_LOCK;
772                 numLock = 1;
773         } else
774                 cFYI(1, "Unknown type of lock");
775
776         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
777         tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
778
779         if (file->private_data == NULL) {
780                 rc = -EBADF;
781                 FreeXid(xid);
782                 return rc;
783         }
784         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
785
786         if ((tcon->ses->capabilities & CAP_UNIX) &&
787             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
788             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
789                 posix_locking = 1;
790         /* BB add code here to normalize offset and length to
791         account for negative length which we can not accept over the
792         wire */
793         if (IS_GETLK(cmd)) {
794                 if (posix_locking) {
795                         int posix_lock_type;
796                         if (lockType & LOCKING_ANDX_SHARED_LOCK)
797                                 posix_lock_type = CIFS_RDLCK;
798                         else
799                                 posix_lock_type = CIFS_WRLCK;
800                         rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
801                                         length, pfLock,
802                                         posix_lock_type, wait_flag);
803                         FreeXid(xid);
804                         return rc;
805                 }
806
807                 /* BB we could chain these into one lock request BB */
808                 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
809                                  0, 1, lockType, 0 /* wait flag */ );
810                 if (rc == 0) {
811                         rc = CIFSSMBLock(xid, tcon, netfid, length,
812                                          pfLock->fl_start, 1 /* numUnlock */ ,
813                                          0 /* numLock */ , lockType,
814                                          0 /* wait flag */ );
815                         pfLock->fl_type = F_UNLCK;
816                         if (rc != 0)
817                                 cERROR(1, "Error unlocking previously locked "
818                                            "range %d during test of lock", rc);
819                         rc = 0;
820
821                 } else {
822                         /* if rc == ERR_SHARING_VIOLATION ? */
823                         rc = 0;
824
825                         if (lockType & LOCKING_ANDX_SHARED_LOCK) {
826                                 pfLock->fl_type = F_WRLCK;
827                         } else {
828                                 rc = CIFSSMBLock(xid, tcon, netfid, length,
829                                         pfLock->fl_start, 0, 1,
830                                         lockType | LOCKING_ANDX_SHARED_LOCK,
831                                         0 /* wait flag */);
832                                 if (rc == 0) {
833                                         rc = CIFSSMBLock(xid, tcon, netfid,
834                                                 length, pfLock->fl_start, 1, 0,
835                                                 lockType |
836                                                 LOCKING_ANDX_SHARED_LOCK,
837                                                 0 /* wait flag */);
838                                         pfLock->fl_type = F_RDLCK;
839                                         if (rc != 0)
840                                                 cERROR(1, "Error unlocking "
841                                                 "previously locked range %d "
842                                                 "during test of lock", rc);
843                                         rc = 0;
844                                 } else {
845                                         pfLock->fl_type = F_WRLCK;
846                                         rc = 0;
847                                 }
848                         }
849                 }
850
851                 FreeXid(xid);
852                 return rc;
853         }
854
855         if (!numLock && !numUnlock) {
856                 /* if no lock or unlock then nothing
857                 to do since we do not know what it is */
858                 FreeXid(xid);
859                 return -EOPNOTSUPP;
860         }
861
862         if (posix_locking) {
863                 int posix_lock_type;
864                 if (lockType & LOCKING_ANDX_SHARED_LOCK)
865                         posix_lock_type = CIFS_RDLCK;
866                 else
867                         posix_lock_type = CIFS_WRLCK;
868
869                 if (numUnlock == 1)
870                         posix_lock_type = CIFS_UNLCK;
871
872                 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
873                                       length, pfLock,
874                                       posix_lock_type, wait_flag);
875         } else {
876                 struct cifsFileInfo *fid = file->private_data;
877
878                 if (numLock) {
879                         rc = CIFSSMBLock(xid, tcon, netfid, length,
880                                         pfLock->fl_start,
881                                         0, numLock, lockType, wait_flag);
882
883                         if (rc == 0) {
884                                 /* For Windows locks we must store them. */
885                                 rc = store_file_lock(fid, length,
886                                                 pfLock->fl_start, lockType);
887                         }
888                 } else if (numUnlock) {
889                         /* For each stored lock that this unlock overlaps
890                            completely, unlock it. */
891                         int stored_rc = 0;
892                         struct cifsLockInfo *li, *tmp;
893
894                         rc = 0;
895                         mutex_lock(&fid->lock_mutex);
896                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
897                                 if (pfLock->fl_start <= li->offset &&
898                                                 (pfLock->fl_start + length) >=
899                                                 (li->offset + li->length)) {
900                                         stored_rc = CIFSSMBLock(xid, tcon,
901                                                         netfid,
902                                                         li->length, li->offset,
903                                                         1, 0, li->type, false);
904                                         if (stored_rc)
905                                                 rc = stored_rc;
906                                         else {
907                                                 list_del(&li->llist);
908                                                 kfree(li);
909                                         }
910                                 }
911                         }
912                         mutex_unlock(&fid->lock_mutex);
913                 }
914         }
915
916         if (pfLock->fl_flags & FL_POSIX)
917                 posix_lock_file_wait(file, pfLock);
918         FreeXid(xid);
919         return rc;
920 }
921
922 /*
923  * Set the timeout on write requests past EOF. For some servers (Windows)
924  * these calls can be very long.
925  *
926  * If we're writing >10M past the EOF we give a 180s timeout. Anything less
927  * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
928  * The 10M cutoff is totally arbitrary. A better scheme for this would be
929  * welcome if someone wants to suggest one.
930  *
931  * We may be able to do a better job with this if there were some way to
932  * declare that a file should be sparse.
933  */
934 static int
935 cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
936 {
937         if (offset <= cifsi->server_eof)
938                 return CIFS_STD_OP;
939         else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
940                 return CIFS_VLONG_OP;
941         else
942                 return CIFS_LONG_OP;
943 }
944
945 /* update the file size (if needed) after a write */
946 static void
947 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
948                       unsigned int bytes_written)
949 {
950         loff_t end_of_write = offset + bytes_written;
951
952         if (end_of_write > cifsi->server_eof)
953                 cifsi->server_eof = end_of_write;
954 }
955
956 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
957         size_t write_size, loff_t *poffset)
958 {
959         int rc = 0;
960         unsigned int bytes_written = 0;
961         unsigned int total_written;
962         struct cifs_sb_info *cifs_sb;
963         struct cifsTconInfo *pTcon;
964         int xid, long_op;
965         struct cifsFileInfo *open_file;
966         struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
967
968         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
969
970         /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
971            *poffset, file->f_path.dentry->d_name.name); */
972
973         if (file->private_data == NULL)
974                 return -EBADF;
975
976         open_file = file->private_data;
977         pTcon = tlink_tcon(open_file->tlink);
978
979         rc = generic_write_checks(file, poffset, &write_size, 0);
980         if (rc)
981                 return rc;
982
983         xid = GetXid();
984
985         long_op = cifs_write_timeout(cifsi, *poffset);
986         for (total_written = 0; write_size > total_written;
987              total_written += bytes_written) {
988                 rc = -EAGAIN;
989                 while (rc == -EAGAIN) {
990                         if (file->private_data == NULL) {
991                                 /* file has been closed on us */
992                                 FreeXid(xid);
993                         /* if we have gotten here we have written some data
994                            and blocked, and the file has been freed on us while
995                            we blocked so return what we managed to write */
996                                 return total_written;
997                         }
998                         if (open_file->invalidHandle) {
999                                 /* we could deadlock if we called
1000                                    filemap_fdatawait from here so tell
1001                                    reopen_file not to flush data to server
1002                                    now */
1003                                 rc = cifs_reopen_file(open_file, false);
1004                                 if (rc != 0)
1005                                         break;
1006                         }
1007
1008                         rc = CIFSSMBWrite(xid, pTcon,
1009                                 open_file->netfid,
1010                                 min_t(const int, cifs_sb->wsize,
1011                                       write_size - total_written),
1012                                 *poffset, &bytes_written,
1013                                 NULL, write_data + total_written, long_op);
1014                 }
1015                 if (rc || (bytes_written == 0)) {
1016                         if (total_written)
1017                                 break;
1018                         else {
1019                                 FreeXid(xid);
1020                                 return rc;
1021                         }
1022                 } else {
1023                         cifs_update_eof(cifsi, *poffset, bytes_written);
1024                         *poffset += bytes_written;
1025                 }
1026                 long_op = CIFS_STD_OP; /* subsequent writes fast -
1027                                     15 seconds is plenty */
1028         }
1029
1030         cifs_stats_bytes_written(pTcon, total_written);
1031
1032         /* since the write may have blocked check these pointers again */
1033         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1034                 struct inode *inode = file->f_path.dentry->d_inode;
1035 /* Do not update local mtime - server will set its actual value on write
1036  *              inode->i_ctime = inode->i_mtime =
1037  *                      current_fs_time(inode->i_sb);*/
1038                 if (total_written > 0) {
1039                         spin_lock(&inode->i_lock);
1040                         if (*poffset > file->f_path.dentry->d_inode->i_size)
1041                                 i_size_write(file->f_path.dentry->d_inode,
1042                                         *poffset);
1043                         spin_unlock(&inode->i_lock);
1044                 }
1045                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1046         }
1047         FreeXid(xid);
1048         return total_written;
1049 }
1050
1051 static ssize_t cifs_write(struct cifsFileInfo *open_file,
1052                           const char *write_data, size_t write_size,
1053                           loff_t *poffset)
1054 {
1055         int rc = 0;
1056         unsigned int bytes_written = 0;
1057         unsigned int total_written;
1058         struct cifs_sb_info *cifs_sb;
1059         struct cifsTconInfo *pTcon;
1060         int xid, long_op;
1061         struct dentry *dentry = open_file->dentry;
1062         struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1063
1064         cifs_sb = CIFS_SB(dentry->d_sb);
1065
1066         cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1067            *poffset, dentry->d_name.name);
1068
1069         pTcon = tlink_tcon(open_file->tlink);
1070
1071         xid = GetXid();
1072
1073         long_op = cifs_write_timeout(cifsi, *poffset);
1074         for (total_written = 0; write_size > total_written;
1075              total_written += bytes_written) {
1076                 rc = -EAGAIN;
1077                 while (rc == -EAGAIN) {
1078                         if (open_file->invalidHandle) {
1079                                 /* we could deadlock if we called
1080                                    filemap_fdatawait from here so tell
1081                                    reopen_file not to flush data to
1082                                    server now */
1083                                 rc = cifs_reopen_file(open_file, false);
1084                                 if (rc != 0)
1085                                         break;
1086                         }
1087                         if (experimEnabled || (pTcon->ses->server &&
1088                                 ((pTcon->ses->server->secMode &
1089                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1090                                 == 0))) {
1091                                 struct kvec iov[2];
1092                                 unsigned int len;
1093
1094                                 len = min((size_t)cifs_sb->wsize,
1095                                           write_size - total_written);
1096                                 /* iov[0] is reserved for smb header */
1097                                 iov[1].iov_base = (char *)write_data +
1098                                                   total_written;
1099                                 iov[1].iov_len = len;
1100                                 rc = CIFSSMBWrite2(xid, pTcon,
1101                                                 open_file->netfid, len,
1102                                                 *poffset, &bytes_written,
1103                                                 iov, 1, long_op);
1104                         } else
1105                                 rc = CIFSSMBWrite(xid, pTcon,
1106                                          open_file->netfid,
1107                                          min_t(const int, cifs_sb->wsize,
1108                                                write_size - total_written),
1109                                          *poffset, &bytes_written,
1110                                          write_data + total_written,
1111                                          NULL, long_op);
1112                 }
1113                 if (rc || (bytes_written == 0)) {
1114                         if (total_written)
1115                                 break;
1116                         else {
1117                                 FreeXid(xid);
1118                                 return rc;
1119                         }
1120                 } else {
1121                         cifs_update_eof(cifsi, *poffset, bytes_written);
1122                         *poffset += bytes_written;
1123                 }
1124                 long_op = CIFS_STD_OP; /* subsequent writes fast -
1125                                     15 seconds is plenty */
1126         }
1127
1128         cifs_stats_bytes_written(pTcon, total_written);
1129
1130         if (total_written > 0) {
1131                 spin_lock(&dentry->d_inode->i_lock);
1132                 if (*poffset > dentry->d_inode->i_size)
1133                         i_size_write(dentry->d_inode, *poffset);
1134                 spin_unlock(&dentry->d_inode->i_lock);
1135         }
1136         mark_inode_dirty_sync(dentry->d_inode);
1137         FreeXid(xid);
1138         return total_written;
1139 }
1140
1141 #ifdef CONFIG_CIFS_EXPERIMENTAL
1142 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1143                                         bool fsuid_only)
1144 {
1145         struct cifsFileInfo *open_file = NULL;
1146         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1147
1148         /* only filter by fsuid on multiuser mounts */
1149         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1150                 fsuid_only = false;
1151
1152         spin_lock(&cifs_file_list_lock);
1153         /* we could simply get the first_list_entry since write-only entries
1154            are always at the end of the list but since the first entry might
1155            have a close pending, we go through the whole list */
1156         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1157                 if (fsuid_only && open_file->uid != current_fsuid())
1158                         continue;
1159                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1160                         if (!open_file->invalidHandle) {
1161                                 /* found a good file */
1162                                 /* lock it so it will not be closed on us */
1163                                 cifsFileInfo_get(open_file);
1164                                 spin_unlock(&cifs_file_list_lock);
1165                                 return open_file;
1166                         } /* else might as well continue, and look for
1167                              another, or simply have the caller reopen it
1168                              again rather than trying to fix this handle */
1169                 } else /* write only file */
1170                         break; /* write only files are last so must be done */
1171         }
1172         spin_unlock(&cifs_file_list_lock);
1173         return NULL;
1174 }
1175 #endif
1176
1177 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1178                                         bool fsuid_only)
1179 {
1180         struct cifsFileInfo *open_file;
1181         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1182         bool any_available = false;
1183         int rc;
1184
1185         /* Having a null inode here (because mapping->host was set to zero by
1186         the VFS or MM) should not happen but we had reports of on oops (due to
1187         it being zero) during stress testcases so we need to check for it */
1188
1189         if (cifs_inode == NULL) {
1190                 cERROR(1, "Null inode passed to cifs_writeable_file");
1191                 dump_stack();
1192                 return NULL;
1193         }
1194
1195         /* only filter by fsuid on multiuser mounts */
1196         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1197                 fsuid_only = false;
1198
1199         spin_lock(&cifs_file_list_lock);
1200 refind_writable:
1201         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1202                 if (!any_available && open_file->pid != current->tgid)
1203                         continue;
1204                 if (fsuid_only && open_file->uid != current_fsuid())
1205                         continue;
1206                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1207                         cifsFileInfo_get(open_file);
1208
1209                         if (!open_file->invalidHandle) {
1210                                 /* found a good writable file */
1211                                 spin_unlock(&cifs_file_list_lock);
1212                                 return open_file;
1213                         }
1214
1215                         spin_unlock(&cifs_file_list_lock);
1216
1217                         /* Had to unlock since following call can block */
1218                         rc = cifs_reopen_file(open_file, false);
1219                         if (!rc)
1220                                 return open_file;
1221
1222                         /* if it fails, try another handle if possible */
1223                         cFYI(1, "wp failed on reopen file");
1224                         cifsFileInfo_put(open_file);
1225
1226                         spin_lock(&cifs_file_list_lock);
1227
1228                         /* else we simply continue to the next entry. Thus
1229                            we do not loop on reopen errors.  If we
1230                            can not reopen the file, for example if we
1231                            reconnected to a server with another client
1232                            racing to delete or lock the file we would not
1233                            make progress if we restarted before the beginning
1234                            of the loop here. */
1235                 }
1236         }
1237         /* couldn't find useable FH with same pid, try any available */
1238         if (!any_available) {
1239                 any_available = true;
1240                 goto refind_writable;
1241         }
1242         spin_unlock(&cifs_file_list_lock);
1243         return NULL;
1244 }
1245
1246 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1247 {
1248         struct address_space *mapping = page->mapping;
1249         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1250         char *write_data;
1251         int rc = -EFAULT;
1252         int bytes_written = 0;
1253         struct cifs_sb_info *cifs_sb;
1254         struct inode *inode;
1255         struct cifsFileInfo *open_file;
1256
1257         if (!mapping || !mapping->host)
1258                 return -EFAULT;
1259
1260         inode = page->mapping->host;
1261         cifs_sb = CIFS_SB(inode->i_sb);
1262
1263         offset += (loff_t)from;
1264         write_data = kmap(page);
1265         write_data += from;
1266
1267         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1268                 kunmap(page);
1269                 return -EIO;
1270         }
1271
1272         /* racing with truncate? */
1273         if (offset > mapping->host->i_size) {
1274                 kunmap(page);
1275                 return 0; /* don't care */
1276         }
1277
1278         /* check to make sure that we are not extending the file */
1279         if (mapping->host->i_size - offset < (loff_t)to)
1280                 to = (unsigned)(mapping->host->i_size - offset);
1281
1282         open_file = find_writable_file(CIFS_I(mapping->host), false);
1283         if (open_file) {
1284                 bytes_written = cifs_write(open_file, write_data,
1285                                            to - from, &offset);
1286                 cifsFileInfo_put(open_file);
1287                 /* Does mm or vfs already set times? */
1288                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1289                 if ((bytes_written > 0) && (offset))
1290                         rc = 0;
1291                 else if (bytes_written < 0)
1292                         rc = bytes_written;
1293         } else {
1294                 cFYI(1, "No writeable filehandles for inode");
1295                 rc = -EIO;
1296         }
1297
1298         kunmap(page);
1299         return rc;
1300 }
1301
1302 static int cifs_writepages(struct address_space *mapping,
1303                            struct writeback_control *wbc)
1304 {
1305         struct backing_dev_info *bdi = mapping->backing_dev_info;
1306         unsigned int bytes_to_write;
1307         unsigned int bytes_written;
1308         struct cifs_sb_info *cifs_sb;
1309         int done = 0;
1310         pgoff_t end;
1311         pgoff_t index;
1312         int range_whole = 0;
1313         struct kvec *iov;
1314         int len;
1315         int n_iov = 0;
1316         pgoff_t next;
1317         int nr_pages;
1318         __u64 offset = 0;
1319         struct cifsFileInfo *open_file;
1320         struct cifsTconInfo *tcon;
1321         struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
1322         struct page *page;
1323         struct pagevec pvec;
1324         int rc = 0;
1325         int scanned = 0;
1326         int xid, long_op;
1327
1328         /*
1329          * BB: Is this meaningful for a non-block-device file system?
1330          * If it is, we should test it again after we do I/O
1331          */
1332         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1333                 wbc->encountered_congestion = 1;
1334                 return 0;
1335         }
1336
1337         cifs_sb = CIFS_SB(mapping->host->i_sb);
1338
1339         /*
1340          * If wsize is smaller that the page cache size, default to writing
1341          * one page at a time via cifs_writepage
1342          */
1343         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1344                 return generic_writepages(mapping, wbc);
1345
1346         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1347         if (iov == NULL)
1348                 return generic_writepages(mapping, wbc);
1349
1350         /*
1351          * if there's no open file, then this is likely to fail too,
1352          * but it'll at least handle the return. Maybe it should be
1353          * a BUG() instead?
1354          */
1355         open_file = find_writable_file(CIFS_I(mapping->host), false);
1356         if (!open_file) {
1357                 kfree(iov);
1358                 return generic_writepages(mapping, wbc);
1359         }
1360
1361         tcon = tlink_tcon(open_file->tlink);
1362         if (!experimEnabled && tcon->ses->server->secMode &
1363                         (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1364                 cifsFileInfo_put(open_file);
1365                 kfree(iov);
1366                 return generic_writepages(mapping, wbc);
1367         }
1368         cifsFileInfo_put(open_file);
1369
1370         xid = GetXid();
1371
1372         pagevec_init(&pvec, 0);
1373         if (wbc->range_cyclic) {
1374                 index = mapping->writeback_index; /* Start from prev offset */
1375                 end = -1;
1376         } else {
1377                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1378                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1379                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1380                         range_whole = 1;
1381                 scanned = 1;
1382         }
1383 retry:
1384         while (!done && (index <= end) &&
1385                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1386                         PAGECACHE_TAG_DIRTY,
1387                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1388                 int first;
1389                 unsigned int i;
1390
1391                 first = -1;
1392                 next = 0;
1393                 n_iov = 0;
1394                 bytes_to_write = 0;
1395
1396                 for (i = 0; i < nr_pages; i++) {
1397                         page = pvec.pages[i];
1398                         /*
1399                          * At this point we hold neither mapping->tree_lock nor
1400                          * lock on the page itself: the page may be truncated or
1401                          * invalidated (changing page->mapping to NULL), or even
1402                          * swizzled back from swapper_space to tmpfs file
1403                          * mapping
1404                          */
1405
1406                         if (first < 0)
1407                                 lock_page(page);
1408                         else if (!trylock_page(page))
1409                                 break;
1410
1411                         if (unlikely(page->mapping != mapping)) {
1412                                 unlock_page(page);
1413                                 break;
1414                         }
1415
1416                         if (!wbc->range_cyclic && page->index > end) {
1417                                 done = 1;
1418                                 unlock_page(page);
1419                                 break;
1420                         }
1421
1422                         if (next && (page->index != next)) {
1423                                 /* Not next consecutive page */
1424                                 unlock_page(page);
1425                                 break;
1426                         }
1427
1428                         if (wbc->sync_mode != WB_SYNC_NONE)
1429                                 wait_on_page_writeback(page);
1430
1431                         if (PageWriteback(page) ||
1432                                         !clear_page_dirty_for_io(page)) {
1433                                 unlock_page(page);
1434                                 break;
1435                         }
1436
1437                         /*
1438                          * This actually clears the dirty bit in the radix tree.
1439                          * See cifs_writepage() for more commentary.
1440                          */
1441                         set_page_writeback(page);
1442
1443                         if (page_offset(page) >= mapping->host->i_size) {
1444                                 done = 1;
1445                                 unlock_page(page);
1446                                 end_page_writeback(page);
1447                                 break;
1448                         }
1449
1450                         /*
1451                          * BB can we get rid of this?  pages are held by pvec
1452                          */
1453                         page_cache_get(page);
1454
1455                         len = min(mapping->host->i_size - page_offset(page),
1456                                   (loff_t)PAGE_CACHE_SIZE);
1457
1458                         /* reserve iov[0] for the smb header */
1459                         n_iov++;
1460                         iov[n_iov].iov_base = kmap(page);
1461                         iov[n_iov].iov_len = len;
1462                         bytes_to_write += len;
1463
1464                         if (first < 0) {
1465                                 first = i;
1466                                 offset = page_offset(page);
1467                         }
1468                         next = page->index + 1;
1469                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1470                                 break;
1471                 }
1472                 if (n_iov) {
1473                         open_file = find_writable_file(CIFS_I(mapping->host),
1474                                                         false);
1475                         if (!open_file) {
1476                                 cERROR(1, "No writable handles for inode");
1477                                 rc = -EBADF;
1478                         } else {
1479                                 long_op = cifs_write_timeout(cifsi, offset);
1480                                 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
1481                                                    bytes_to_write, offset,
1482                                                    &bytes_written, iov, n_iov,
1483                                                    long_op);
1484                                 cifsFileInfo_put(open_file);
1485                                 cifs_update_eof(cifsi, offset, bytes_written);
1486                         }
1487
1488                         if (rc || bytes_written < bytes_to_write) {
1489                                 cERROR(1, "Write2 ret %d, wrote %d",
1490                                           rc, bytes_written);
1491                                 mapping_set_error(mapping, rc);
1492                         } else {
1493                                 cifs_stats_bytes_written(tcon, bytes_written);
1494                         }
1495
1496                         for (i = 0; i < n_iov; i++) {
1497                                 page = pvec.pages[first + i];
1498                                 /* Should we also set page error on
1499                                 success rc but too little data written? */
1500                                 /* BB investigate retry logic on temporary
1501                                 server crash cases and how recovery works
1502                                 when page marked as error */
1503                                 if (rc)
1504                                         SetPageError(page);
1505                                 kunmap(page);
1506                                 unlock_page(page);
1507                                 end_page_writeback(page);
1508                                 page_cache_release(page);
1509                         }
1510                         if ((wbc->nr_to_write -= n_iov) <= 0)
1511                                 done = 1;
1512                         index = next;
1513                 } else
1514                         /* Need to re-find the pages we skipped */
1515                         index = pvec.pages[0]->index + 1;
1516
1517                 pagevec_release(&pvec);
1518         }
1519         if (!scanned && !done) {
1520                 /*
1521                  * We hit the last page and there is more work to be done: wrap
1522                  * back to the start of the file
1523                  */
1524                 scanned = 1;
1525                 index = 0;
1526                 goto retry;
1527         }
1528         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1529                 mapping->writeback_index = index;
1530
1531         FreeXid(xid);
1532         kfree(iov);
1533         return rc;
1534 }
1535
1536 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1537 {
1538         int rc = -EFAULT;
1539         int xid;
1540
1541         xid = GetXid();
1542 /* BB add check for wbc flags */
1543         page_cache_get(page);
1544         if (!PageUptodate(page))
1545                 cFYI(1, "ppw - page not up to date");
1546
1547         /*
1548          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1549          *
1550          * A writepage() implementation always needs to do either this,
1551          * or re-dirty the page with "redirty_page_for_writepage()" in
1552          * the case of a failure.
1553          *
1554          * Just unlocking the page will cause the radix tree tag-bits
1555          * to fail to update with the state of the page correctly.
1556          */
1557         set_page_writeback(page);
1558         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1559         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1560         unlock_page(page);
1561         end_page_writeback(page);
1562         page_cache_release(page);
1563         FreeXid(xid);
1564         return rc;
1565 }
1566
1567 static int cifs_write_end(struct file *file, struct address_space *mapping,
1568                         loff_t pos, unsigned len, unsigned copied,
1569                         struct page *page, void *fsdata)
1570 {
1571         int rc;
1572         struct inode *inode = mapping->host;
1573
1574         cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1575                  page, pos, copied);
1576
1577         if (PageChecked(page)) {
1578                 if (copied == len)
1579                         SetPageUptodate(page);
1580                 ClearPageChecked(page);
1581         } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1582                 SetPageUptodate(page);
1583
1584         if (!PageUptodate(page)) {
1585                 char *page_data;
1586                 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1587                 int xid;
1588
1589                 xid = GetXid();
1590                 /* this is probably better than directly calling
1591                    partialpage_write since in this function the file handle is
1592                    known which we might as well leverage */
1593                 /* BB check if anything else missing out of ppw
1594                    such as updating last write time */
1595                 page_data = kmap(page);
1596                 rc = cifs_write(file->private_data, page_data + offset,
1597                                 copied, &pos);
1598                 /* if (rc < 0) should we set writebehind rc? */
1599                 kunmap(page);
1600
1601                 FreeXid(xid);
1602         } else {
1603                 rc = copied;
1604                 pos += copied;
1605                 set_page_dirty(page);
1606         }
1607
1608         if (rc > 0) {
1609                 spin_lock(&inode->i_lock);
1610                 if (pos > inode->i_size)
1611                         i_size_write(inode, pos);
1612                 spin_unlock(&inode->i_lock);
1613         }
1614
1615         unlock_page(page);
1616         page_cache_release(page);
1617
1618         return rc;
1619 }
1620
1621 int cifs_fsync(struct file *file, int datasync)
1622 {
1623         int xid;
1624         int rc = 0;
1625         struct cifsTconInfo *tcon;
1626         struct cifsFileInfo *smbfile = file->private_data;
1627         struct inode *inode = file->f_path.dentry->d_inode;
1628
1629         xid = GetXid();
1630
1631         cFYI(1, "Sync file - name: %s datasync: 0x%x",
1632                 file->f_path.dentry->d_name.name, datasync);
1633
1634         rc = filemap_write_and_wait(inode->i_mapping);
1635         if (rc == 0) {
1636                 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1637
1638                 tcon = tlink_tcon(smbfile->tlink);
1639                 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1640                         rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1641         }
1642
1643         FreeXid(xid);
1644         return rc;
1645 }
1646
1647 /* static void cifs_sync_page(struct page *page)
1648 {
1649         struct address_space *mapping;
1650         struct inode *inode;
1651         unsigned long index = page->index;
1652         unsigned int rpages = 0;
1653         int rc = 0;
1654
1655         cFYI(1, "sync page %p", page);
1656         mapping = page->mapping;
1657         if (!mapping)
1658                 return 0;
1659         inode = mapping->host;
1660         if (!inode)
1661                 return; */
1662
1663 /*      fill in rpages then
1664         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1665
1666 /*      cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1667
1668 #if 0
1669         if (rc < 0)
1670                 return rc;
1671         return 0;
1672 #endif
1673 } */
1674
1675 /*
1676  * As file closes, flush all cached write data for this inode checking
1677  * for write behind errors.
1678  */
1679 int cifs_flush(struct file *file, fl_owner_t id)
1680 {
1681         struct inode *inode = file->f_path.dentry->d_inode;
1682         int rc = 0;
1683
1684         if (file->f_mode & FMODE_WRITE)
1685                 rc = filemap_write_and_wait(inode->i_mapping);
1686
1687         cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1688
1689         return rc;
1690 }
1691
1692 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1693         size_t read_size, loff_t *poffset)
1694 {
1695         int rc = -EACCES;
1696         unsigned int bytes_read = 0;
1697         unsigned int total_read = 0;
1698         unsigned int current_read_size;
1699         struct cifs_sb_info *cifs_sb;
1700         struct cifsTconInfo *pTcon;
1701         int xid;
1702         struct cifsFileInfo *open_file;
1703         char *smb_read_data;
1704         char __user *current_offset;
1705         struct smb_com_read_rsp *pSMBr;
1706
1707         xid = GetXid();
1708         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1709
1710         if (file->private_data == NULL) {
1711                 rc = -EBADF;
1712                 FreeXid(xid);
1713                 return rc;
1714         }
1715         open_file = file->private_data;
1716         pTcon = tlink_tcon(open_file->tlink);
1717
1718         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1719                 cFYI(1, "attempting read on write only file instance");
1720
1721         for (total_read = 0, current_offset = read_data;
1722              read_size > total_read;
1723              total_read += bytes_read, current_offset += bytes_read) {
1724                 current_read_size = min_t(const int, read_size - total_read,
1725                                           cifs_sb->rsize);
1726                 rc = -EAGAIN;
1727                 smb_read_data = NULL;
1728                 while (rc == -EAGAIN) {
1729                         int buf_type = CIFS_NO_BUFFER;
1730                         if (open_file->invalidHandle) {
1731                                 rc = cifs_reopen_file(open_file, true);
1732                                 if (rc != 0)
1733                                         break;
1734                         }
1735                         rc = CIFSSMBRead(xid, pTcon,
1736                                          open_file->netfid,
1737                                          current_read_size, *poffset,
1738                                          &bytes_read, &smb_read_data,
1739                                          &buf_type);
1740                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1741                         if (smb_read_data) {
1742                                 if (copy_to_user(current_offset,
1743                                                 smb_read_data +
1744                                                 4 /* RFC1001 length field */ +
1745                                                 le16_to_cpu(pSMBr->DataOffset),
1746                                                 bytes_read))
1747                                         rc = -EFAULT;
1748
1749                                 if (buf_type == CIFS_SMALL_BUFFER)
1750                                         cifs_small_buf_release(smb_read_data);
1751                                 else if (buf_type == CIFS_LARGE_BUFFER)
1752                                         cifs_buf_release(smb_read_data);
1753                                 smb_read_data = NULL;
1754                         }
1755                 }
1756                 if (rc || (bytes_read == 0)) {
1757                         if (total_read) {
1758                                 break;
1759                         } else {
1760                                 FreeXid(xid);
1761                                 return rc;
1762                         }
1763                 } else {
1764                         cifs_stats_bytes_read(pTcon, bytes_read);
1765                         *poffset += bytes_read;
1766                 }
1767         }
1768         FreeXid(xid);
1769         return total_read;
1770 }
1771
1772
1773 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1774         loff_t *poffset)
1775 {
1776         int rc = -EACCES;
1777         unsigned int bytes_read = 0;
1778         unsigned int total_read;
1779         unsigned int current_read_size;
1780         struct cifs_sb_info *cifs_sb;
1781         struct cifsTconInfo *pTcon;
1782         int xid;
1783         char *current_offset;
1784         struct cifsFileInfo *open_file;
1785         int buf_type = CIFS_NO_BUFFER;
1786
1787         xid = GetXid();
1788         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1789
1790         if (file->private_data == NULL) {
1791                 rc = -EBADF;
1792                 FreeXid(xid);
1793                 return rc;
1794         }
1795         open_file = file->private_data;
1796         pTcon = tlink_tcon(open_file->tlink);
1797
1798         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1799                 cFYI(1, "attempting read on write only file instance");
1800
1801         for (total_read = 0, current_offset = read_data;
1802              read_size > total_read;
1803              total_read += bytes_read, current_offset += bytes_read) {
1804                 current_read_size = min_t(const int, read_size - total_read,
1805                                           cifs_sb->rsize);
1806                 /* For windows me and 9x we do not want to request more
1807                 than it negotiated since it will refuse the read then */
1808                 if ((pTcon->ses) &&
1809                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1810                         current_read_size = min_t(const int, current_read_size,
1811                                         pTcon->ses->server->maxBuf - 128);
1812                 }
1813                 rc = -EAGAIN;
1814                 while (rc == -EAGAIN) {
1815                         if (open_file->invalidHandle) {
1816                                 rc = cifs_reopen_file(open_file, true);
1817                                 if (rc != 0)
1818                                         break;
1819                         }
1820                         rc = CIFSSMBRead(xid, pTcon,
1821                                          open_file->netfid,
1822                                          current_read_size, *poffset,
1823                                          &bytes_read, &current_offset,
1824                                          &buf_type);
1825                 }
1826                 if (rc || (bytes_read == 0)) {
1827                         if (total_read) {
1828                                 break;
1829                         } else {
1830                                 FreeXid(xid);
1831                                 return rc;
1832                         }
1833                 } else {
1834                         cifs_stats_bytes_read(pTcon, total_read);
1835                         *poffset += bytes_read;
1836                 }
1837         }
1838         FreeXid(xid);
1839         return total_read;
1840 }
1841
1842 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1843 {
1844         int rc, xid;
1845
1846         xid = GetXid();
1847         rc = cifs_revalidate_file(file);
1848         if (rc) {
1849                 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1850                 FreeXid(xid);
1851                 return rc;
1852         }
1853         rc = generic_file_mmap(file, vma);
1854         FreeXid(xid);
1855         return rc;
1856 }
1857
1858
1859 static void cifs_copy_cache_pages(struct address_space *mapping,
1860         struct list_head *pages, int bytes_read, char *data)
1861 {
1862         struct page *page;
1863         char *target;
1864
1865         while (bytes_read > 0) {
1866                 if (list_empty(pages))
1867                         break;
1868
1869                 page = list_entry(pages->prev, struct page, lru);
1870                 list_del(&page->lru);
1871
1872                 if (add_to_page_cache_lru(page, mapping, page->index,
1873                                       GFP_KERNEL)) {
1874                         page_cache_release(page);
1875                         cFYI(1, "Add page cache failed");
1876                         data += PAGE_CACHE_SIZE;
1877                         bytes_read -= PAGE_CACHE_SIZE;
1878                         continue;
1879                 }
1880                 page_cache_release(page);
1881
1882                 target = kmap_atomic(page, KM_USER0);
1883
1884                 if (PAGE_CACHE_SIZE > bytes_read) {
1885                         memcpy(target, data, bytes_read);
1886                         /* zero the tail end of this partial page */
1887                         memset(target + bytes_read, 0,
1888                                PAGE_CACHE_SIZE - bytes_read);
1889                         bytes_read = 0;
1890                 } else {
1891                         memcpy(target, data, PAGE_CACHE_SIZE);
1892                         bytes_read -= PAGE_CACHE_SIZE;
1893                 }
1894                 kunmap_atomic(target, KM_USER0);
1895
1896                 flush_dcache_page(page);
1897                 SetPageUptodate(page);
1898                 unlock_page(page);
1899                 data += PAGE_CACHE_SIZE;
1900
1901                 /* add page to FS-Cache */
1902                 cifs_readpage_to_fscache(mapping->host, page);
1903         }
1904         return;
1905 }
1906
1907 static int cifs_readpages(struct file *file, struct address_space *mapping,
1908         struct list_head *page_list, unsigned num_pages)
1909 {
1910         int rc = -EACCES;
1911         int xid;
1912         loff_t offset;
1913         struct page *page;
1914         struct cifs_sb_info *cifs_sb;
1915         struct cifsTconInfo *pTcon;
1916         unsigned int bytes_read = 0;
1917         unsigned int read_size, i;
1918         char *smb_read_data = NULL;
1919         struct smb_com_read_rsp *pSMBr;
1920         struct cifsFileInfo *open_file;
1921         int buf_type = CIFS_NO_BUFFER;
1922
1923         xid = GetXid();
1924         if (file->private_data == NULL) {
1925                 rc = -EBADF;
1926                 FreeXid(xid);
1927                 return rc;
1928         }
1929         open_file = file->private_data;
1930         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1931         pTcon = tlink_tcon(open_file->tlink);
1932
1933         /*
1934          * Reads as many pages as possible from fscache. Returns -ENOBUFS
1935          * immediately if the cookie is negative
1936          */
1937         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
1938                                          &num_pages);
1939         if (rc == 0)
1940                 goto read_complete;
1941
1942         cFYI(DBG2, "rpages: num pages %d", num_pages);
1943         for (i = 0; i < num_pages; ) {
1944                 unsigned contig_pages;
1945                 struct page *tmp_page;
1946                 unsigned long expected_index;
1947
1948                 if (list_empty(page_list))
1949                         break;
1950
1951                 page = list_entry(page_list->prev, struct page, lru);
1952                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1953
1954                 /* count adjacent pages that we will read into */
1955                 contig_pages = 0;
1956                 expected_index =
1957                         list_entry(page_list->prev, struct page, lru)->index;
1958                 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1959                         if (tmp_page->index == expected_index) {
1960                                 contig_pages++;
1961                                 expected_index++;
1962                         } else
1963                                 break;
1964                 }
1965                 if (contig_pages + i >  num_pages)
1966                         contig_pages = num_pages - i;
1967
1968                 /* for reads over a certain size could initiate async
1969                    read ahead */
1970
1971                 read_size = contig_pages * PAGE_CACHE_SIZE;
1972                 /* Read size needs to be in multiples of one page */
1973                 read_size = min_t(const unsigned int, read_size,
1974                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1975                 cFYI(DBG2, "rpages: read size 0x%x  contiguous pages %d",
1976                                 read_size, contig_pages);
1977                 rc = -EAGAIN;
1978                 while (rc == -EAGAIN) {
1979                         if (open_file->invalidHandle) {
1980                                 rc = cifs_reopen_file(open_file, true);
1981                                 if (rc != 0)
1982                                         break;
1983                         }
1984
1985                         rc = CIFSSMBRead(xid, pTcon,
1986                                          open_file->netfid,
1987                                          read_size, offset,
1988                                          &bytes_read, &smb_read_data,
1989                                          &buf_type);
1990                         /* BB more RC checks ? */
1991                         if (rc == -EAGAIN) {
1992                                 if (smb_read_data) {
1993                                         if (buf_type == CIFS_SMALL_BUFFER)
1994                                                 cifs_small_buf_release(smb_read_data);
1995                                         else if (buf_type == CIFS_LARGE_BUFFER)
1996                                                 cifs_buf_release(smb_read_data);
1997                                         smb_read_data = NULL;
1998                                 }
1999                         }
2000                 }
2001                 if ((rc < 0) || (smb_read_data == NULL)) {
2002                         cFYI(1, "Read error in readpages: %d", rc);
2003                         break;
2004                 } else if (bytes_read > 0) {
2005                         task_io_account_read(bytes_read);
2006                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2007                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
2008                                 smb_read_data + 4 /* RFC1001 hdr */ +
2009                                 le16_to_cpu(pSMBr->DataOffset));
2010
2011                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
2012                         cifs_stats_bytes_read(pTcon, bytes_read);
2013                         if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2014                                 i++; /* account for partial page */
2015
2016                                 /* server copy of file can have smaller size
2017                                    than client */
2018                                 /* BB do we need to verify this common case ?
2019                                    this case is ok - if we are at server EOF
2020                                    we will hit it on next read */
2021
2022                                 /* break; */
2023                         }
2024                 } else {
2025                         cFYI(1, "No bytes read (%d) at offset %lld . "
2026                                 "Cleaning remaining pages from readahead list",
2027                                 bytes_read, offset);
2028                         /* BB turn off caching and do new lookup on
2029                            file size at server? */
2030                         break;
2031                 }
2032                 if (smb_read_data) {
2033                         if (buf_type == CIFS_SMALL_BUFFER)
2034                                 cifs_small_buf_release(smb_read_data);
2035                         else if (buf_type == CIFS_LARGE_BUFFER)
2036                                 cifs_buf_release(smb_read_data);
2037                         smb_read_data = NULL;
2038                 }
2039                 bytes_read = 0;
2040         }
2041
2042 /* need to free smb_read_data buf before exit */
2043         if (smb_read_data) {
2044                 if (buf_type == CIFS_SMALL_BUFFER)
2045                         cifs_small_buf_release(smb_read_data);
2046                 else if (buf_type == CIFS_LARGE_BUFFER)
2047                         cifs_buf_release(smb_read_data);
2048                 smb_read_data = NULL;
2049         }
2050
2051 read_complete:
2052         FreeXid(xid);
2053         return rc;
2054 }
2055
2056 static int cifs_readpage_worker(struct file *file, struct page *page,
2057         loff_t *poffset)
2058 {
2059         char *read_data;
2060         int rc;
2061
2062         /* Is the page cached? */
2063         rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2064         if (rc == 0)
2065                 goto read_complete;
2066
2067         page_cache_get(page);
2068         read_data = kmap(page);
2069         /* for reads over a certain size could initiate async read ahead */
2070
2071         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2072
2073         if (rc < 0)
2074                 goto io_error;
2075         else
2076                 cFYI(1, "Bytes read %d", rc);
2077
2078         file->f_path.dentry->d_inode->i_atime =
2079                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2080
2081         if (PAGE_CACHE_SIZE > rc)
2082                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2083
2084         flush_dcache_page(page);
2085         SetPageUptodate(page);
2086
2087         /* send this page to the cache */
2088         cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2089
2090         rc = 0;
2091
2092 io_error:
2093         kunmap(page);
2094         page_cache_release(page);
2095
2096 read_complete:
2097         return rc;
2098 }
2099
2100 static int cifs_readpage(struct file *file, struct page *page)
2101 {
2102         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2103         int rc = -EACCES;
2104         int xid;
2105
2106         xid = GetXid();
2107
2108         if (file->private_data == NULL) {
2109                 rc = -EBADF;
2110                 FreeXid(xid);
2111                 return rc;
2112         }
2113
2114         cFYI(1, "readpage %p at offset %d 0x%x\n",
2115                  page, (int)offset, (int)offset);
2116
2117         rc = cifs_readpage_worker(file, page, &offset);
2118
2119         unlock_page(page);
2120
2121         FreeXid(xid);
2122         return rc;
2123 }
2124
2125 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2126 {
2127         struct cifsFileInfo *open_file;
2128
2129         spin_lock(&cifs_file_list_lock);
2130         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2131                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2132                         spin_unlock(&cifs_file_list_lock);
2133                         return 1;
2134                 }
2135         }
2136         spin_unlock(&cifs_file_list_lock);
2137         return 0;
2138 }
2139
2140 /* We do not want to update the file size from server for inodes
2141    open for write - to avoid races with writepage extending
2142    the file - in the future we could consider allowing
2143    refreshing the inode only on increases in the file size
2144    but this is tricky to do without racing with writebehind
2145    page caching in the current Linux kernel design */
2146 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2147 {
2148         if (!cifsInode)
2149                 return true;
2150
2151         if (is_inode_writable(cifsInode)) {
2152                 /* This inode is open for write at least once */
2153                 struct cifs_sb_info *cifs_sb;
2154
2155                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2156                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2157                         /* since no page cache to corrupt on directio
2158                         we can change size safely */
2159                         return true;
2160                 }
2161
2162                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2163                         return true;
2164
2165                 return false;
2166         } else
2167                 return true;
2168 }
2169
2170 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2171                         loff_t pos, unsigned len, unsigned flags,
2172                         struct page **pagep, void **fsdata)
2173 {
2174         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2175         loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2176         loff_t page_start = pos & PAGE_MASK;
2177         loff_t i_size;
2178         struct page *page;
2179         int rc = 0;
2180
2181         cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2182
2183         page = grab_cache_page_write_begin(mapping, index, flags);
2184         if (!page) {
2185                 rc = -ENOMEM;
2186                 goto out;
2187         }
2188
2189         if (PageUptodate(page))
2190                 goto out;
2191
2192         /*
2193          * If we write a full page it will be up to date, no need to read from
2194          * the server. If the write is short, we'll end up doing a sync write
2195          * instead.
2196          */
2197         if (len == PAGE_CACHE_SIZE)
2198                 goto out;
2199
2200         /*
2201          * optimize away the read when we have an oplock, and we're not
2202          * expecting to use any of the data we'd be reading in. That
2203          * is, when the page lies beyond the EOF, or straddles the EOF
2204          * and the write will cover all of the existing data.
2205          */
2206         if (CIFS_I(mapping->host)->clientCanCacheRead) {
2207                 i_size = i_size_read(mapping->host);
2208                 if (page_start >= i_size ||
2209                     (offset == 0 && (pos + len) >= i_size)) {
2210                         zero_user_segments(page, 0, offset,
2211                                            offset + len,
2212                                            PAGE_CACHE_SIZE);
2213                         /*
2214                          * PageChecked means that the parts of the page
2215                          * to which we're not writing are considered up
2216                          * to date. Once the data is copied to the
2217                          * page, it can be set uptodate.
2218                          */
2219                         SetPageChecked(page);
2220                         goto out;
2221                 }
2222         }
2223
2224         if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2225                 /*
2226                  * might as well read a page, it is fast enough. If we get
2227                  * an error, we don't need to return it. cifs_write_end will
2228                  * do a sync write instead since PG_uptodate isn't set.
2229                  */
2230                 cifs_readpage_worker(file, page, &page_start);
2231         } else {
2232                 /* we could try using another file handle if there is one -
2233                    but how would we lock it to prevent close of that handle
2234                    racing with this read? In any case
2235                    this will be written out by write_end so is fine */
2236         }
2237 out:
2238         *pagep = page;
2239         return rc;
2240 }
2241
2242 static int cifs_release_page(struct page *page, gfp_t gfp)
2243 {
2244         if (PagePrivate(page))
2245                 return 0;
2246
2247         return cifs_fscache_release_page(page, gfp);
2248 }
2249
2250 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2251 {
2252         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2253
2254         if (offset == 0)
2255                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2256 }
2257
2258 void cifs_oplock_break(struct work_struct *work)
2259 {
2260         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2261                                                   oplock_break);
2262         struct inode *inode = cfile->dentry->d_inode;
2263         struct cifsInodeInfo *cinode = CIFS_I(inode);
2264         int rc = 0;
2265
2266         if (inode && S_ISREG(inode->i_mode)) {
2267                 if (cinode->clientCanCacheRead)
2268                         break_lease(inode, O_RDONLY);
2269                 else
2270                         break_lease(inode, O_WRONLY);
2271                 rc = filemap_fdatawrite(inode->i_mapping);
2272                 if (cinode->clientCanCacheRead == 0) {
2273                         rc = filemap_fdatawait(inode->i_mapping);
2274                         mapping_set_error(inode->i_mapping, rc);
2275                         invalidate_remote_inode(inode);
2276                 }
2277                 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2278         }
2279
2280         /*
2281          * releasing stale oplock after recent reconnect of smb session using
2282          * a now incorrect file handle is not a data integrity issue but do
2283          * not bother sending an oplock release if session to server still is
2284          * disconnected since oplock already released by the server
2285          */
2286         if (!cfile->oplock_break_cancelled) {
2287                 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2288                                  0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
2289                 cFYI(1, "Oplock release rc = %d", rc);
2290         }
2291
2292         /*
2293          * We might have kicked in before is_valid_oplock_break()
2294          * finished grabbing reference for us.  Make sure it's done by
2295          * waiting for cifs_file_list_lock.
2296          */
2297         spin_lock(&cifs_file_list_lock);
2298         spin_unlock(&cifs_file_list_lock);
2299
2300         cifs_oplock_break_put(cfile);
2301 }
2302
2303 /* must be called while holding cifs_file_list_lock */
2304 void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2305 {
2306         cifs_sb_active(cfile->dentry->d_sb);
2307         cifsFileInfo_get(cfile);
2308 }
2309
2310 void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2311 {
2312         cifsFileInfo_put(cfile);
2313         cifs_sb_deactive(cfile->dentry->d_sb);
2314 }
2315
2316 const struct address_space_operations cifs_addr_ops = {
2317         .readpage = cifs_readpage,
2318         .readpages = cifs_readpages,
2319         .writepage = cifs_writepage,
2320         .writepages = cifs_writepages,
2321         .write_begin = cifs_write_begin,
2322         .write_end = cifs_write_end,
2323         .set_page_dirty = __set_page_dirty_nobuffers,
2324         .releasepage = cifs_release_page,
2325         .invalidatepage = cifs_invalidate_page,
2326         /* .sync_page = cifs_sync_page, */
2327         /* .direct_IO = */
2328 };
2329
2330 /*
2331  * cifs_readpages requires the server to support a buffer large enough to
2332  * contain the header plus one complete page of data.  Otherwise, we need
2333  * to leave cifs_readpages out of the address space operations.
2334  */
2335 const struct address_space_operations cifs_addr_ops_smallbuf = {
2336         .readpage = cifs_readpage,
2337         .writepage = cifs_writepage,
2338         .writepages = cifs_writepages,
2339         .write_begin = cifs_write_begin,
2340         .write_end = cifs_write_end,
2341         .set_page_dirty = __set_page_dirty_nobuffers,
2342         .releasepage = cifs_release_page,
2343         .invalidatepage = cifs_invalidate_page,
2344         /* .sync_page = cifs_sync_page, */
2345         /* .direct_IO = */
2346 };