struct fuse_file *ff = file->private_data;
do {
- wait_event(fc->blocked_waitq, ff->reserved_req);
+ wait_event(fc->reserved_req_waitq, ff->reserved_req);
spin_lock(&fc->lock);
if (ff->reserved_req) {
req = ff->reserved_req;
fuse_request_init(req);
BUG_ON(ff->reserved_req);
ff->reserved_req = req;
- wake_up(&fc->blocked_waitq);
+ wake_up_all(&fc->reserved_req_waitq);
spin_unlock(&fc->lock);
fput(file);
}
}
}
+static unsigned len_args(unsigned numargs, struct fuse_arg *args)
+{
+ unsigned nbytes = 0;
+ unsigned i;
+
+ for (i = 0; i < numargs; i++)
+ nbytes += args[i].size;
+
+ return nbytes;
+}
+
+static u64 fuse_get_unique(struct fuse_conn *fc)
+{
+ fc->reqctr++;
+ /* zero is special */
+ if (fc->reqctr == 0)
+ fc->reqctr = 1;
+
+ return fc->reqctr;
+}
+
+static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
+{
+ req->in.h.unique = fuse_get_unique(fc);
+ req->in.h.len = sizeof(struct fuse_in_header) +
+ len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+ list_add_tail(&req->list, &fc->pending);
+ req->state = FUSE_REQ_PENDING;
+ if (!req->waiting) {
+ req->waiting = 1;
+ atomic_inc(&fc->num_waiting);
+ }
+ wake_up(&fc->waitq);
+ kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+}
+
+static void flush_bg_queue(struct fuse_conn *fc)
+{
+ while (fc->active_background < FUSE_MAX_BACKGROUND &&
+ !list_empty(&fc->bg_queue)) {
+ struct fuse_req *req;
+
+ req = list_entry(fc->bg_queue.next, struct fuse_req, list);
+ list_del(&req->list);
+ fc->active_background++;
+ queue_request(fc, req);
+ }
+}
+
/*
* This function is called when a request is finished. Either a reply
* has arrived or it was aborted (and not yet sent) or some error
fc->blocked = 0;
wake_up_all(&fc->blocked_waitq);
}
+ if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
+ clear_bdi_congested(&fc->bdi, READ);
+ clear_bdi_congested(&fc->bdi, WRITE);
+ }
fc->num_background--;
+ fc->active_background--;
+ flush_bg_queue(fc);
}
spin_unlock(&fc->lock);
- dput(req->dentry);
- mntput(req->vfsmount);
- if (req->file)
- fput(req->file);
wake_up(&req->waitq);
if (end)
end(fc, req);
queue_interrupt(fc, req);
}
- if (req->force) {
- spin_unlock(&fc->lock);
- wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
- spin_lock(&fc->lock);
- } else {
+ if (!req->force) {
sigset_t oldset;
/* Only fatal signals may interrupt this */
block_sigs(&oldset);
wait_answer_interruptible(fc, req);
restore_sigs(&oldset);
+
+ if (req->aborted)
+ goto aborted;
+ if (req->state == FUSE_REQ_FINISHED)
+ return;
+
+ /* Request is not yet in userspace, bail out */
+ if (req->state == FUSE_REQ_PENDING) {
+ list_del(&req->list);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return;
+ }
}
- if (req->aborted)
- goto aborted;
- if (req->state == FUSE_REQ_FINISHED)
- return;
+ /*
+ * Either request is already in userspace, or it was forced.
+ * Wait it out.
+ */
+ spin_unlock(&fc->lock);
+ wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+ spin_lock(&fc->lock);
- req->out.h.error = -EINTR;
- req->aborted = 1;
+ if (!req->aborted)
+ return;
aborted:
+ BUG_ON(req->state != FUSE_REQ_FINISHED);
if (req->locked) {
/* This is uninterruptible sleep, because data is
being copied to/from the buffers of req. During
wait_event(req->waitq, !req->locked);
spin_lock(&fc->lock);
}
- if (req->state == FUSE_REQ_PENDING) {
- list_del(&req->list);
- __fuse_put_request(req);
- } else if (req->state == FUSE_REQ_SENT) {
- spin_unlock(&fc->lock);
- wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
- spin_lock(&fc->lock);
- }
-}
-
-static unsigned len_args(unsigned numargs, struct fuse_arg *args)
-{
- unsigned nbytes = 0;
- unsigned i;
-
- for (i = 0; i < numargs; i++)
- nbytes += args[i].size;
-
- return nbytes;
-}
-
-static u64 fuse_get_unique(struct fuse_conn *fc)
- {
- fc->reqctr++;
- /* zero is special */
- if (fc->reqctr == 0)
- fc->reqctr = 1;
-
- return fc->reqctr;
-}
-
-static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
-{
- req->in.h.unique = fuse_get_unique(fc);
- req->in.h.len = sizeof(struct fuse_in_header) +
- len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
- list_add_tail(&req->list, &fc->pending);
- req->state = FUSE_REQ_PENDING;
- if (!req->waiting) {
- req->waiting = 1;
- atomic_inc(&fc->num_waiting);
- }
- wake_up(&fc->waitq);
- kill_fasync(&fc->fasync, SIGIO, POLL_IN);
}
void request_send(struct fuse_conn *fc, struct fuse_req *req)
spin_unlock(&fc->lock);
}
+static void request_send_nowait_locked(struct fuse_conn *fc,
+ struct fuse_req *req)
+{
+ req->background = 1;
+ fc->num_background++;
+ if (fc->num_background == FUSE_MAX_BACKGROUND)
+ fc->blocked = 1;
+ if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
+ set_bdi_congested(&fc->bdi, READ);
+ set_bdi_congested(&fc->bdi, WRITE);
+ }
+ list_add_tail(&req->list, &fc->bg_queue);
+ flush_bg_queue(fc);
+}
+
static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
{
spin_lock(&fc->lock);
if (fc->connected) {
- req->background = 1;
- fc->num_background++;
- if (fc->num_background == FUSE_MAX_BACKGROUND)
- fc->blocked = 1;
-
- queue_request(fc, req);
+ request_send_nowait_locked(fc, req);
spin_unlock(&fc->lock);
} else {
req->out.h.error = -ENOTCONN;
fuse_copy_finish(&cs);
spin_lock(&fc->lock);
req->locked = 0;
- if (!err && req->aborted)
- err = -ENOENT;
+ if (req->aborted) {
+ request_end(fc, req);
+ return -ENODEV;
+ }
if (err) {
- if (!req->aborted)
- req->out.h.error = -EIO;
+ req->out.h.error = -EIO;
request_end(fc, req);
return err;
}