fuse: implement poll support
[pandora-kernel.git] / fs / fuse / dev.c
index af63980..6176e44 100644 (file)
@@ -1,6 +1,6 @@
 /*
   FUSE: Filesystem in Userspace
-  Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>
+  Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
 
   This program can be distributed under the terms of the GNU GPL.
   See the file COPYING.
@@ -47,6 +47,14 @@ struct fuse_req *fuse_request_alloc(void)
        return req;
 }
 
+struct fuse_req *fuse_request_alloc_nofs(void)
+{
+       struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
+       if (req)
+               fuse_request_init(req);
+       return req;
+}
+
 void fuse_request_free(struct fuse_req *req)
 {
        kmem_cache_free(fuse_req_cachep, req);
@@ -285,12 +293,12 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
        wake_up(&req->waitq);
        if (end)
                end(fc, req);
-       else
-               fuse_put_request(fc, req);
+       fuse_put_request(fc, req);
 }
 
 static void wait_answer_interruptible(struct fuse_conn *fc,
                                      struct fuse_req *req)
+       __releases(fc->lock) __acquires(fc->lock)
 {
        if (signal_pending(current))
                return;
@@ -307,8 +315,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
        kill_fasync(&fc->fasync, SIGIO, POLL_IN);
 }
 
-/* Called with fc->lock held.  Releases, and then reacquires it. */
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
+       __releases(fc->lock) __acquires(fc->lock)
 {
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
@@ -429,6 +437,17 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
        request_send_nowait(fc, req);
 }
 
+/*
+ * Called under fc->lock
+ *
+ * fc->connected must have been checked previously
+ */
+void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req)
+{
+       req->isreply = 1;
+       request_send_nowait_locked(fc, req);
+}
+
 /*
  * Lock the request.  Up to the next unlock_request() there mustn't be
  * anything that could cause a page-fault.  If the request was already
@@ -519,8 +538,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                BUG_ON(!cs->nr_segs);
                cs->seglen = cs->iov[0].iov_len;
                cs->addr = (unsigned long) cs->iov[0].iov_base;
-               cs->iov ++;
-               cs->nr_segs --;
+               cs->iov++;
+               cs->nr_segs--;
        }
        down_read(&current->mm->mmap_sem);
        err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
@@ -569,9 +588,11 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
                kunmap_atomic(mapaddr, KM_USER1);
        }
        while (count) {
-               int err;
-               if (!cs->len && (err = fuse_copy_fill(cs)))
-                       return err;
+               if (!cs->len) {
+                       int err = fuse_copy_fill(cs);
+                       if (err)
+                               return err;
+               }
                if (page) {
                        void *mapaddr = kmap_atomic(page, KM_USER1);
                        void *buf = mapaddr + offset;
@@ -611,9 +632,11 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
 {
        while (size) {
-               int err;
-               if (!cs->len && (err = fuse_copy_fill(cs)))
-                       return err;
+               if (!cs->len) {
+                       int err = fuse_copy_fill(cs);
+                       if (err)
+                               return err;
+               }
                fuse_copy_do(cs, &val, &size);
        }
        return 0;
@@ -793,6 +816,34 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
        return err;
 }
 
+static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
+                           struct fuse_copy_state *cs)
+{
+       struct fuse_notify_poll_wakeup_out outarg;
+       int err;
+
+       if (size != sizeof(outarg))
+               return -EINVAL;
+
+       err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+       if (err)
+               return err;
+
+       return fuse_notify_poll_wakeup(fc, &outarg);
+}
+
+static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
+                      unsigned int size, struct fuse_copy_state *cs)
+{
+       switch (code) {
+       case FUSE_NOTIFY_POLL:
+               return fuse_notify_poll(fc, size, cs);
+
+       default:
+               return -EINVAL;
+       }
+}
+
 /* Look up request on processing list by unique ID */
 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
 {
@@ -856,9 +907,23 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
        err = fuse_copy_one(&cs, &oh, sizeof(oh));
        if (err)
                goto err_finish;
+
+       err = -EINVAL;
+       if (oh.len != nbytes)
+               goto err_finish;
+
+       /*
+        * Zero oh.unique indicates unsolicited notification message
+        * and error contains notification code.
+        */
+       if (!oh.unique) {
+               err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
+               fuse_copy_finish(&cs);
+               return err ? err : nbytes;
+       }
+
        err = -EINVAL;
-       if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
-           oh.len != nbytes)
+       if (oh.error <= -1000 || oh.error > 0)
                goto err_finish;
 
        spin_lock(&fc->lock);
@@ -968,6 +1033,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
+       __releases(fc->lock) __acquires(fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
@@ -981,11 +1047,11 @@ static void end_io_requests(struct fuse_conn *fc)
                wake_up(&req->waitq);
                if (end) {
                        req->end = NULL;
-                       /* The end function will consume this reference */
                        __fuse_get_request(req);
                        spin_unlock(&fc->lock);
                        wait_event(req->waitq, !req->locked);
                        end(fc, req);
+                       fuse_put_request(fc, req);
                        spin_lock(&fc->lock);
                }
        }
@@ -1035,7 +1101,6 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
                end_requests(fc, &fc->pending);
                end_requests(fc, &fc->processing);
                spin_unlock(&fc->lock);
-               fasync_helper(-1, file, 0, &fc->fasync);
                fuse_conn_put(fc);
        }