mac80211: Mark a destination sequence number as valid when a PREQ is received.
[pandora-kernel.git] / fs / aio.c
index 76da125..d065b2c 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
 {
        assert_spin_locked(&ctx->ctx_lock);
 
+       if (req->ki_eventfd != NULL)
+               eventfd_ctx_put(req->ki_eventfd);
        if (req->ki_dtor)
                req->ki_dtor(req);
        if (req->ki_iovec != &req->ki_inline_vec)
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data)
                /* Complete the fput(s) */
                if (req->ki_filp != NULL)
                        __fput(req->ki_filp);
-               if (req->ki_eventfd != NULL)
-                       __fput(req->ki_eventfd);
 
                /* Link the iocb into the context's free list */
                spin_lock_irq(&ctx->ctx_lock);
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data)
  */
 static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
 {
-       int schedule_putreq = 0;
-
        dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
                req, atomic_long_read(&req->ki_filp->f_count));
 
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
         * we would not be holding the last reference to the file*, so
         * this function will be executed w/out any aio kthread wakeup.
         */
-       if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
-               schedule_putreq++;
-       else
-               req->ki_filp = NULL;
-       if (req->ki_eventfd != NULL) {
-               if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
-                       schedule_putreq++;
-               else
-                       req->ki_eventfd = NULL;
-       }
-       if (unlikely(schedule_putreq)) {
+       if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
                get_ioctx(ctx);
                spin_lock(&fput_lock);
                list_add(&req->ki_list, &fput_head);
                spin_unlock(&fput_lock);
                queue_work(aio_wq, &fput_work);
-       } else
+       } else {
+               req->ki_filp = NULL;
                really_put_req(ctx, req);
+       }
        return 1;
 }
 
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                 * an eventfd() fd, and will be signaled for each completed
                 * event using the eventfd_signal() function.
                 */
-               req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
+               req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
                if (IS_ERR(req->ki_eventfd)) {
                        ret = PTR_ERR(req->ki_eventfd);
                        req->ki_eventfd = NULL;