fs/9p: Add direct IO support in cached mode
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Mon, 28 Feb 2011 11:34:04 +0000 (17:04 +0530)
committerEric Van Hensbergen <ericvh@gmail.com>
Tue, 15 Mar 2011 14:57:40 +0000 (09:57 -0500)
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
fs/9p/vfs_addr.c
fs/9p/vfs_file.c

index ee45552..2524e4c 100644 (file)
@@ -254,8 +254,9 @@ static int v9fs_launder_page(struct page *page)
  * with an error.
  *
  */
-ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-               loff_t pos, unsigned long nr_segs)
+static ssize_t
+v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+              loff_t pos, unsigned long nr_segs)
 {
        /*
         * FIXME
index 3cff25e..78bcb97 100644 (file)
@@ -579,15 +579,124 @@ out_unlock:
        return VM_FAULT_NOPAGE;
 }
 
+static ssize_t
+v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
+                loff_t *offsetp)
+{
+       loff_t size, offset;
+       struct inode *inode;
+       struct address_space *mapping;
+
+       offset = *offsetp;
+       mapping = filp->f_mapping;
+       inode = mapping->host;
+       if (!count)
+               return 0;
+       size = i_size_read(inode);
+       if (offset < size)
+               filemap_write_and_wait_range(mapping, offset,
+                                            offset + count - 1);
+
+       return v9fs_file_read(filp, udata, count, offsetp);
+}
+
+/**
+ * v9fs_cached_file_read - read from a file
+ * @filp: file pointer to read
+ * @udata: user data buffer to read data into
+ * @count: size of buffer
+ * @offset: offset at which to read data
+ *
+ */
+static ssize_t
+v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
+                     loff_t *offset)
+{
+       if (filp->f_flags & O_DIRECT)
+               return v9fs_direct_read(filp, data, count, offset);
+       return do_sync_read(filp, data, count, offset);
+}
+
+static ssize_t
+v9fs_direct_write(struct file *filp, const char __user * data,
+                 size_t count, loff_t *offsetp)
+{
+       loff_t offset;
+       ssize_t retval;
+       struct inode *inode;
+       struct address_space *mapping;
+
+       offset = *offsetp;
+       mapping = filp->f_mapping;
+       inode = mapping->host;
+       if (!count)
+               return 0;
+
+       mutex_lock(&inode->i_mutex);
+       retval = filemap_write_and_wait_range(mapping, offset,
+                                             offset + count - 1);
+       if (retval)
+               goto err_out;
+       /*
+        * After a write we want buffered reads to be sure to go to disk to get
+        * the new data.  We invalidate clean cached page from the region we're
+        * about to write.  We do this *before* the write so that if we fail
+        * here we fall back to buffered write
+        */
+       if (mapping->nrpages) {
+               pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
+               pgoff_t pg_end   = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+
+               retval = invalidate_inode_pages2_range(mapping,
+                                                       pg_start, pg_end);
+               /*
+                * If a page can not be invalidated, fall back
+                * to buffered write.
+                */
+               if (retval) {
+                       if (retval == -EBUSY)
+                               goto buff_write;
+                       goto err_out;
+               }
+       }
+       retval = v9fs_file_write(filp, data, count, offsetp);
+err_out:
+       mutex_unlock(&inode->i_mutex);
+       return retval;
+
+buff_write:
+       mutex_unlock(&inode->i_mutex);
+       return do_sync_write(filp, data, count, offsetp);
+}
+
+/**
+ * v9fs_cached_file_write - write to a file
+ * @filp: file pointer to write
+ * @data: data buffer to write data from
+ * @count: size of buffer
+ * @offset: offset at which to write data
+ *
+ */
+static ssize_t
+v9fs_cached_file_write(struct file *filp, const char __user * data,
+                      size_t count, loff_t *offset)
+{
+
+       if (filp->f_flags & O_DIRECT)
+               return v9fs_direct_write(filp, data, count, offset);
+       return do_sync_write(filp, data, count, offset);
+}
+
 static const struct vm_operations_struct v9fs_file_vm_ops = {
        .fault = filemap_fault,
        .page_mkwrite = v9fs_vm_page_mkwrite,
 };
 
+
 const struct file_operations v9fs_cached_file_operations = {
        .llseek = generic_file_llseek,
-       .read = do_sync_read,
-       .write = do_sync_write,
+       .read = v9fs_cached_file_read,
+       .write = v9fs_cached_file_write,
        .aio_read = generic_file_aio_read,
        .aio_write = generic_file_aio_write,
        .open = v9fs_file_open,
@@ -599,8 +708,8 @@ const struct file_operations v9fs_cached_file_operations = {
 
 const struct file_operations v9fs_cached_file_operations_dotl = {
        .llseek = generic_file_llseek,
-       .read = do_sync_read,
-       .write = do_sync_write,
+       .read = v9fs_cached_file_read,
+       .write = v9fs_cached_file_write,
        .aio_read = generic_file_aio_read,
        .aio_write = generic_file_aio_write,
        .open = v9fs_file_open,