2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
25 static struct kmem_cache *fuse_req_cachep;
27 static struct fuse_conn *fuse_get_conn(struct file *file)
30 * Lockless access is OK, because file->private data is set
31 * once during mount and is valid until the file is released.
33 return file->private_data;
36 static void fuse_request_init(struct fuse_req *req)
38 memset(req, 0, sizeof(*req));
39 INIT_LIST_HEAD(&req->list);
40 INIT_LIST_HEAD(&req->intr_entry);
41 init_waitqueue_head(&req->waitq);
42 atomic_set(&req->count, 1);
45 struct fuse_req *fuse_request_alloc(void)
47 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
49 fuse_request_init(req);
52 EXPORT_SYMBOL_GPL(fuse_request_alloc);
54 struct fuse_req *fuse_request_alloc_nofs(void)
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
58 fuse_request_init(req);
62 void fuse_request_free(struct fuse_req *req)
64 kmem_cache_free(fuse_req_cachep, req);
67 static void block_sigs(sigset_t *oldset)
71 siginitsetinv(&mask, sigmask(SIGKILL));
72 sigprocmask(SIG_BLOCK, &mask, oldset);
75 static void restore_sigs(sigset_t *oldset)
77 sigprocmask(SIG_SETMASK, oldset, NULL);
80 static void __fuse_get_request(struct fuse_req *req)
82 atomic_inc(&req->count);
85 /* Must be called with > 1 refcount */
86 static void __fuse_put_request(struct fuse_req *req)
88 BUG_ON(atomic_read(&req->count) < 2);
89 atomic_dec(&req->count);
92 static void fuse_req_init_context(struct fuse_req *req)
94 req->in.h.uid = current_fsuid();
95 req->in.h.gid = current_fsgid();
96 req->in.h.pid = current->pid;
99 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
101 struct fuse_req *req;
106 atomic_inc(&fc->num_waiting);
108 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
109 restore_sigs(&oldset);
118 req = fuse_request_alloc();
123 fuse_req_init_context(req);
128 atomic_dec(&fc->num_waiting);
131 EXPORT_SYMBOL_GPL(fuse_get_req);
134 * Return request in fuse_file->reserved_req. However that may
135 * currently be in use. If that is the case, wait for it to become
138 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
141 struct fuse_req *req = NULL;
142 struct fuse_file *ff = file->private_data;
145 wait_event(fc->reserved_req_waitq, ff->reserved_req);
146 spin_lock(&fc->lock);
147 if (ff->reserved_req) {
148 req = ff->reserved_req;
149 ff->reserved_req = NULL;
151 req->stolen_file = file;
153 spin_unlock(&fc->lock);
160 * Put stolen request back into fuse_file->reserved_req
162 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
164 struct file *file = req->stolen_file;
165 struct fuse_file *ff = file->private_data;
167 spin_lock(&fc->lock);
168 fuse_request_init(req);
169 BUG_ON(ff->reserved_req);
170 ff->reserved_req = req;
171 wake_up_all(&fc->reserved_req_waitq);
172 spin_unlock(&fc->lock);
177 * Gets a requests for a file operation, always succeeds
179 * This is used for sending the FLUSH request, which must get to
180 * userspace, due to POSIX locks which may need to be unlocked.
182 * If allocation fails due to OOM, use the reserved request in
185 * This is very unlikely to deadlock accidentally, since the
186 * filesystem should not have it's own file open. If deadlock is
187 * intentional, it can still be broken by "aborting" the filesystem.
189 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
191 struct fuse_req *req;
193 atomic_inc(&fc->num_waiting);
194 wait_event(fc->blocked_waitq, !fc->blocked);
195 req = fuse_request_alloc();
197 req = get_reserved_req(fc, file);
199 fuse_req_init_context(req);
204 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
206 if (atomic_dec_and_test(&req->count)) {
208 atomic_dec(&fc->num_waiting);
210 if (req->stolen_file)
211 put_reserved_req(fc, req);
213 fuse_request_free(req);
216 EXPORT_SYMBOL_GPL(fuse_put_request);
218 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
223 for (i = 0; i < numargs; i++)
224 nbytes += args[i].size;
229 static u64 fuse_get_unique(struct fuse_conn *fc)
232 /* zero is special */
239 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
241 req->in.h.unique = fuse_get_unique(fc);
242 req->in.h.len = sizeof(struct fuse_in_header) +
243 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
244 list_add_tail(&req->list, &fc->pending);
245 req->state = FUSE_REQ_PENDING;
248 atomic_inc(&fc->num_waiting);
251 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
254 static void flush_bg_queue(struct fuse_conn *fc)
256 while (fc->active_background < fc->max_background &&
257 !list_empty(&fc->bg_queue)) {
258 struct fuse_req *req;
260 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
261 list_del(&req->list);
262 fc->active_background++;
263 queue_request(fc, req);
268 * This function is called when a request is finished. Either a reply
269 * has arrived or it was aborted (and not yet sent) or some error
270 * occurred during communication with userspace, or the device file
271 * was closed. The requester thread is woken up (if still waiting),
272 * the 'end' callback is called if given, else the reference to the
273 * request is released
275 * Called with fc->lock, unlocks it
277 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
278 __releases(&fc->lock)
280 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
282 list_del(&req->list);
283 list_del(&req->intr_entry);
284 req->state = FUSE_REQ_FINISHED;
285 if (req->background) {
286 if (fc->num_background == fc->max_background) {
288 wake_up_all(&fc->blocked_waitq);
290 if (fc->num_background == fc->congestion_threshold &&
291 fc->connected && fc->bdi_initialized) {
292 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
293 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
295 fc->num_background--;
296 fc->active_background--;
299 spin_unlock(&fc->lock);
300 wake_up(&req->waitq);
303 fuse_put_request(fc, req);
306 static void wait_answer_interruptible(struct fuse_conn *fc,
307 struct fuse_req *req)
308 __releases(&fc->lock)
309 __acquires(&fc->lock)
311 if (signal_pending(current))
314 spin_unlock(&fc->lock);
315 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
316 spin_lock(&fc->lock);
319 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
321 list_add_tail(&req->intr_entry, &fc->interrupts);
323 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
326 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
327 __releases(&fc->lock)
328 __acquires(&fc->lock)
330 if (!fc->no_interrupt) {
331 /* Any signal may interrupt this */
332 wait_answer_interruptible(fc, req);
336 if (req->state == FUSE_REQ_FINISHED)
339 req->interrupted = 1;
340 if (req->state == FUSE_REQ_SENT)
341 queue_interrupt(fc, req);
347 /* Only fatal signals may interrupt this */
349 wait_answer_interruptible(fc, req);
350 restore_sigs(&oldset);
354 if (req->state == FUSE_REQ_FINISHED)
357 /* Request is not yet in userspace, bail out */
358 if (req->state == FUSE_REQ_PENDING) {
359 list_del(&req->list);
360 __fuse_put_request(req);
361 req->out.h.error = -EINTR;
367 * Either request is already in userspace, or it was forced.
370 spin_unlock(&fc->lock);
371 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
372 spin_lock(&fc->lock);
378 BUG_ON(req->state != FUSE_REQ_FINISHED);
380 /* This is uninterruptible sleep, because data is
381 being copied to/from the buffers of req. During
382 locked state, there mustn't be any filesystem
383 operation (e.g. page fault), since that could lead
385 spin_unlock(&fc->lock);
386 wait_event(req->waitq, !req->locked);
387 spin_lock(&fc->lock);
391 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
394 spin_lock(&fc->lock);
396 req->out.h.error = -ENOTCONN;
397 else if (fc->conn_error)
398 req->out.h.error = -ECONNREFUSED;
400 queue_request(fc, req);
401 /* acquire extra reference, since request is still needed
402 after request_end() */
403 __fuse_get_request(req);
405 request_wait_answer(fc, req);
407 spin_unlock(&fc->lock);
409 EXPORT_SYMBOL_GPL(fuse_request_send);
411 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
412 struct fuse_req *req)
415 fc->num_background++;
416 if (fc->num_background == fc->max_background)
418 if (fc->num_background == fc->congestion_threshold &&
419 fc->bdi_initialized) {
420 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
421 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
423 list_add_tail(&req->list, &fc->bg_queue);
427 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
429 spin_lock(&fc->lock);
431 fuse_request_send_nowait_locked(fc, req);
432 spin_unlock(&fc->lock);
434 req->out.h.error = -ENOTCONN;
435 request_end(fc, req);
439 void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
442 fuse_request_send_nowait(fc, req);
445 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
448 fuse_request_send_nowait(fc, req);
450 EXPORT_SYMBOL_GPL(fuse_request_send_background);
453 * Called under fc->lock
455 * fc->connected must have been checked previously
457 void fuse_request_send_background_locked(struct fuse_conn *fc,
458 struct fuse_req *req)
461 fuse_request_send_nowait_locked(fc, req);
465 * Lock the request. Up to the next unlock_request() there mustn't be
466 * anything that could cause a page-fault. If the request was already
469 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
473 spin_lock(&fc->lock);
478 spin_unlock(&fc->lock);
484 * Unlock request. If it was aborted during being locked, the
485 * requester thread is currently waiting for it to be unlocked, so
488 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
491 spin_lock(&fc->lock);
494 wake_up(&req->waitq);
495 spin_unlock(&fc->lock);
499 struct fuse_copy_state {
500 struct fuse_conn *fc;
502 struct fuse_req *req;
503 const struct iovec *iov;
504 struct pipe_buffer *pipebufs;
505 struct pipe_buffer *currbuf;
506 struct pipe_inode_info *pipe;
507 unsigned long nr_segs;
508 unsigned long seglen;
514 unsigned move_pages:1;
517 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
519 const struct iovec *iov, unsigned long nr_segs)
521 memset(cs, 0, sizeof(*cs));
525 cs->nr_segs = nr_segs;
528 /* Unmap and put previous page of userspace buffer */
529 static void fuse_copy_finish(struct fuse_copy_state *cs)
532 struct pipe_buffer *buf = cs->currbuf;
535 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
537 kunmap_atomic(cs->mapaddr, KM_USER0);
538 buf->len = PAGE_SIZE - cs->len;
542 } else if (cs->mapaddr) {
543 kunmap_atomic(cs->mapaddr, KM_USER0);
545 flush_dcache_page(cs->pg);
546 set_page_dirty_lock(cs->pg);
554 * Get another pagefull of userspace buffer, and map it to kernel
555 * address space, and lock request
557 static int fuse_copy_fill(struct fuse_copy_state *cs)
559 unsigned long offset;
562 unlock_request(cs->fc, cs->req);
563 fuse_copy_finish(cs);
565 struct pipe_buffer *buf = cs->pipebufs;
568 err = buf->ops->confirm(cs->pipe, buf);
572 BUG_ON(!cs->nr_segs);
574 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
576 cs->buf = cs->mapaddr + buf->offset;
582 if (cs->nr_segs == cs->pipe->buffers)
585 page = alloc_page(GFP_HIGHUSER);
594 cs->mapaddr = kmap_atomic(page, KM_USER0);
595 cs->buf = cs->mapaddr;
602 BUG_ON(!cs->nr_segs);
603 cs->seglen = cs->iov[0].iov_len;
604 cs->addr = (unsigned long) cs->iov[0].iov_base;
608 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
612 offset = cs->addr % PAGE_SIZE;
613 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
614 cs->buf = cs->mapaddr + offset;
615 cs->len = min(PAGE_SIZE - offset, cs->seglen);
616 cs->seglen -= cs->len;
620 return lock_request(cs->fc, cs->req);
623 /* Do as much copy to/from userspace buffer as we can */
624 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
626 unsigned ncpy = min(*size, cs->len);
629 memcpy(cs->buf, *val, ncpy);
631 memcpy(*val, cs->buf, ncpy);
640 static int fuse_check_page(struct page *page)
642 if (page_mapcount(page) ||
643 page->mapping != NULL ||
644 page_count(page) != 1 ||
645 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
652 printk(KERN_WARNING "fuse: trying to steal weird page\n");
653 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
659 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
662 struct page *oldpage = *pagep;
663 struct page *newpage;
664 struct pipe_buffer *buf = cs->pipebufs;
665 struct address_space *mapping;
668 unlock_request(cs->fc, cs->req);
669 fuse_copy_finish(cs);
671 err = buf->ops->confirm(cs->pipe, buf);
675 BUG_ON(!cs->nr_segs);
681 if (cs->len != PAGE_SIZE)
684 if (buf->ops->steal(cs->pipe, buf) != 0)
689 if (WARN_ON(!PageUptodate(newpage)))
692 ClearPageMappedToDisk(newpage);
694 if (fuse_check_page(newpage) != 0)
695 goto out_fallback_unlock;
697 mapping = oldpage->mapping;
698 index = oldpage->index;
701 * This is a new and locked page, it shouldn't be mapped or
702 * have any special flags on it
704 if (WARN_ON(page_mapped(oldpage)))
705 goto out_fallback_unlock;
706 if (WARN_ON(page_has_private(oldpage)))
707 goto out_fallback_unlock;
708 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
709 goto out_fallback_unlock;
710 if (WARN_ON(PageMlocked(oldpage)))
711 goto out_fallback_unlock;
713 remove_from_page_cache(oldpage);
714 page_cache_release(oldpage);
716 err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
718 printk(KERN_WARNING "fuse_try_move_page: failed to add page");
719 goto out_fallback_unlock;
721 page_cache_get(newpage);
723 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
724 lru_cache_add_file(newpage);
727 spin_lock(&cs->fc->lock);
728 if (cs->req->aborted)
732 spin_unlock(&cs->fc->lock);
735 unlock_page(newpage);
736 page_cache_release(newpage);
740 unlock_page(oldpage);
741 page_cache_release(oldpage);
747 unlock_page(newpage);
749 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
750 cs->buf = cs->mapaddr + buf->offset;
752 err = lock_request(cs->fc, cs->req);
759 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
760 unsigned offset, unsigned count)
762 struct pipe_buffer *buf;
764 if (cs->nr_segs == cs->pipe->buffers)
767 unlock_request(cs->fc, cs->req);
768 fuse_copy_finish(cs);
771 page_cache_get(page);
773 buf->offset = offset;
784 * Copy a page in the request to/from the userspace buffer. Must be
787 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
788 unsigned offset, unsigned count, int zeroing)
791 struct page *page = *pagep;
793 if (page && zeroing && count < PAGE_SIZE) {
794 void *mapaddr = kmap_atomic(page, KM_USER1);
795 memset(mapaddr, 0, PAGE_SIZE);
796 kunmap_atomic(mapaddr, KM_USER1);
799 if (cs->write && cs->pipebufs && page) {
800 return fuse_ref_page(cs, page, offset, count);
801 } else if (!cs->len) {
802 if (cs->move_pages && page &&
803 offset == 0 && count == PAGE_SIZE) {
804 err = fuse_try_move_page(cs, pagep);
808 err = fuse_copy_fill(cs);
814 void *mapaddr = kmap_atomic(page, KM_USER1);
815 void *buf = mapaddr + offset;
816 offset += fuse_copy_do(cs, &buf, &count);
817 kunmap_atomic(mapaddr, KM_USER1);
819 offset += fuse_copy_do(cs, NULL, &count);
821 if (page && !cs->write)
822 flush_dcache_page(page);
826 /* Copy pages in the request to/from userspace buffer */
827 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
831 struct fuse_req *req = cs->req;
832 unsigned offset = req->page_offset;
833 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
835 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
838 err = fuse_copy_page(cs, &req->pages[i], offset, count,
844 count = min(nbytes, (unsigned) PAGE_SIZE);
850 /* Copy a single argument in the request to/from userspace buffer */
851 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
855 int err = fuse_copy_fill(cs);
859 fuse_copy_do(cs, &val, &size);
864 /* Copy request arguments to/from userspace buffer */
865 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
866 unsigned argpages, struct fuse_arg *args,
872 for (i = 0; !err && i < numargs; i++) {
873 struct fuse_arg *arg = &args[i];
874 if (i == numargs - 1 && argpages)
875 err = fuse_copy_pages(cs, arg->size, zeroing);
877 err = fuse_copy_one(cs, arg->value, arg->size);
882 static int request_pending(struct fuse_conn *fc)
884 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
887 /* Wait until a request is available on the pending list */
888 static void request_wait(struct fuse_conn *fc)
889 __releases(&fc->lock)
890 __acquires(&fc->lock)
892 DECLARE_WAITQUEUE(wait, current);
894 add_wait_queue_exclusive(&fc->waitq, &wait);
895 while (fc->connected && !request_pending(fc)) {
896 set_current_state(TASK_INTERRUPTIBLE);
897 if (signal_pending(current))
900 spin_unlock(&fc->lock);
902 spin_lock(&fc->lock);
904 set_current_state(TASK_RUNNING);
905 remove_wait_queue(&fc->waitq, &wait);
909 * Transfer an interrupt request to userspace
911 * Unlike other requests this is assembled on demand, without a need
912 * to allocate a separate fuse_req structure.
914 * Called with fc->lock held, releases it
916 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
917 size_t nbytes, struct fuse_req *req)
918 __releases(&fc->lock)
920 struct fuse_in_header ih;
921 struct fuse_interrupt_in arg;
922 unsigned reqsize = sizeof(ih) + sizeof(arg);
925 list_del_init(&req->intr_entry);
926 req->intr_unique = fuse_get_unique(fc);
927 memset(&ih, 0, sizeof(ih));
928 memset(&arg, 0, sizeof(arg));
930 ih.opcode = FUSE_INTERRUPT;
931 ih.unique = req->intr_unique;
932 arg.unique = req->in.h.unique;
934 spin_unlock(&fc->lock);
935 if (nbytes < reqsize)
938 err = fuse_copy_one(cs, &ih, sizeof(ih));
940 err = fuse_copy_one(cs, &arg, sizeof(arg));
941 fuse_copy_finish(cs);
943 return err ? err : reqsize;
947 * Read a single request into the userspace filesystem's buffer. This
948 * function waits until a request is available, then removes it from
949 * the pending list and copies request data to userspace buffer. If
950 * no reply is needed (FORGET) or request has been aborted or there
951 * was an error during the copying then it's finished by calling
952 * request_end(). Otherwise add it to the processing list, and set
955 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
956 struct fuse_copy_state *cs, size_t nbytes)
959 struct fuse_req *req;
964 spin_lock(&fc->lock);
966 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
967 !request_pending(fc))
975 if (!request_pending(fc))
978 if (!list_empty(&fc->interrupts)) {
979 req = list_entry(fc->interrupts.next, struct fuse_req,
981 return fuse_read_interrupt(fc, cs, nbytes, req);
984 req = list_entry(fc->pending.next, struct fuse_req, list);
985 req->state = FUSE_REQ_READING;
986 list_move(&req->list, &fc->io);
990 /* If request is too large, reply with an error and restart the read */
991 if (nbytes < reqsize) {
992 req->out.h.error = -EIO;
993 /* SETXATTR is special, since it may contain too large data */
994 if (in->h.opcode == FUSE_SETXATTR)
995 req->out.h.error = -E2BIG;
996 request_end(fc, req);
999 spin_unlock(&fc->lock);
1001 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1003 err = fuse_copy_args(cs, in->numargs, in->argpages,
1004 (struct fuse_arg *) in->args, 0);
1005 fuse_copy_finish(cs);
1006 spin_lock(&fc->lock);
1009 request_end(fc, req);
1013 req->out.h.error = -EIO;
1014 request_end(fc, req);
1018 request_end(fc, req);
1020 req->state = FUSE_REQ_SENT;
1021 list_move_tail(&req->list, &fc->processing);
1022 if (req->interrupted)
1023 queue_interrupt(fc, req);
1024 spin_unlock(&fc->lock);
1029 spin_unlock(&fc->lock);
1033 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1034 unsigned long nr_segs, loff_t pos)
1036 struct fuse_copy_state cs;
1037 struct file *file = iocb->ki_filp;
1038 struct fuse_conn *fc = fuse_get_conn(file);
1042 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1044 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1047 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1048 struct pipe_buffer *buf)
1053 static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1055 .map = generic_pipe_buf_map,
1056 .unmap = generic_pipe_buf_unmap,
1057 .confirm = generic_pipe_buf_confirm,
1058 .release = generic_pipe_buf_release,
1059 .steal = fuse_dev_pipe_buf_steal,
1060 .get = generic_pipe_buf_get,
1063 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1064 struct pipe_inode_info *pipe,
1065 size_t len, unsigned int flags)
1070 struct pipe_buffer *bufs;
1071 struct fuse_copy_state cs;
1072 struct fuse_conn *fc = fuse_get_conn(in);
1076 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1080 fuse_copy_init(&cs, fc, 1, NULL, 0);
1083 ret = fuse_dev_do_read(fc, in, &cs, len);
1090 if (!pipe->readers) {
1091 send_sig(SIGPIPE, current, 0);
1097 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1102 while (page_nr < cs.nr_segs) {
1103 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1104 struct pipe_buffer *buf = pipe->bufs + newbuf;
1106 buf->page = bufs[page_nr].page;
1107 buf->offset = bufs[page_nr].offset;
1108 buf->len = bufs[page_nr].len;
1109 buf->ops = &fuse_dev_pipe_buf_ops;
1124 if (waitqueue_active(&pipe->wait))
1125 wake_up_interruptible(&pipe->wait);
1126 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1130 for (; page_nr < cs.nr_segs; page_nr++)
1131 page_cache_release(bufs[page_nr].page);
1137 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1138 struct fuse_copy_state *cs)
1140 struct fuse_notify_poll_wakeup_out outarg;
1143 if (size != sizeof(outarg))
1146 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1150 fuse_copy_finish(cs);
1151 return fuse_notify_poll_wakeup(fc, &outarg);
1154 fuse_copy_finish(cs);
1158 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1159 struct fuse_copy_state *cs)
1161 struct fuse_notify_inval_inode_out outarg;
1164 if (size != sizeof(outarg))
1167 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1170 fuse_copy_finish(cs);
1172 down_read(&fc->killsb);
1175 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1176 outarg.off, outarg.len);
1178 up_read(&fc->killsb);
1182 fuse_copy_finish(cs);
1186 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1187 struct fuse_copy_state *cs)
1189 struct fuse_notify_inval_entry_out outarg;
1194 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1199 if (size < sizeof(outarg))
1202 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1206 err = -ENAMETOOLONG;
1207 if (outarg.namelen > FUSE_NAME_MAX)
1211 name.len = outarg.namelen;
1212 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1215 fuse_copy_finish(cs);
1216 buf[outarg.namelen] = 0;
1217 name.hash = full_name_hash(name.name, name.len);
1219 down_read(&fc->killsb);
1222 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
1223 up_read(&fc->killsb);
1229 fuse_copy_finish(cs);
1233 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1234 unsigned int size, struct fuse_copy_state *cs)
1237 case FUSE_NOTIFY_POLL:
1238 return fuse_notify_poll(fc, size, cs);
1240 case FUSE_NOTIFY_INVAL_INODE:
1241 return fuse_notify_inval_inode(fc, size, cs);
1243 case FUSE_NOTIFY_INVAL_ENTRY:
1244 return fuse_notify_inval_entry(fc, size, cs);
1247 fuse_copy_finish(cs);
1252 /* Look up request on processing list by unique ID */
1253 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1255 struct list_head *entry;
1257 list_for_each(entry, &fc->processing) {
1258 struct fuse_req *req;
1259 req = list_entry(entry, struct fuse_req, list);
1260 if (req->in.h.unique == unique || req->intr_unique == unique)
1266 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1269 unsigned reqsize = sizeof(struct fuse_out_header);
1272 return nbytes != reqsize ? -EINVAL : 0;
1274 reqsize += len_args(out->numargs, out->args);
1276 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1278 else if (reqsize > nbytes) {
1279 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1280 unsigned diffsize = reqsize - nbytes;
1281 if (diffsize > lastarg->size)
1283 lastarg->size -= diffsize;
1285 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1290 * Write a single reply to a request. First the header is copied from
1291 * the write buffer. The request is then searched on the processing
1292 * list by the unique ID found in the header. If found, then remove
1293 * it from the list and copy the rest of the buffer to the request.
1294 * The request is finished by calling request_end()
1296 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1297 struct fuse_copy_state *cs, size_t nbytes)
1300 struct fuse_req *req;
1301 struct fuse_out_header oh;
1303 if (nbytes < sizeof(struct fuse_out_header))
1306 err = fuse_copy_one(cs, &oh, sizeof(oh));
1311 if (oh.len != nbytes)
1315 * Zero oh.unique indicates unsolicited notification message
1316 * and error contains notification code.
1319 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1320 return err ? err : nbytes;
1324 if (oh.error <= -1000 || oh.error > 0)
1327 spin_lock(&fc->lock);
1332 req = request_find(fc, oh.unique);
1337 spin_unlock(&fc->lock);
1338 fuse_copy_finish(cs);
1339 spin_lock(&fc->lock);
1340 request_end(fc, req);
1343 /* Is it an interrupt reply? */
1344 if (req->intr_unique == oh.unique) {
1346 if (nbytes != sizeof(struct fuse_out_header))
1349 if (oh.error == -ENOSYS)
1350 fc->no_interrupt = 1;
1351 else if (oh.error == -EAGAIN)
1352 queue_interrupt(fc, req);
1354 spin_unlock(&fc->lock);
1355 fuse_copy_finish(cs);
1359 req->state = FUSE_REQ_WRITING;
1360 list_move(&req->list, &fc->io);
1364 if (!req->out.page_replace)
1366 spin_unlock(&fc->lock);
1368 err = copy_out_args(cs, &req->out, nbytes);
1369 fuse_copy_finish(cs);
1371 spin_lock(&fc->lock);
1376 } else if (!req->aborted)
1377 req->out.h.error = -EIO;
1378 request_end(fc, req);
1380 return err ? err : nbytes;
1383 spin_unlock(&fc->lock);
1385 fuse_copy_finish(cs);
1389 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1390 unsigned long nr_segs, loff_t pos)
1392 struct fuse_copy_state cs;
1393 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1397 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1399 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1402 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1403 struct file *out, loff_t *ppos,
1404 size_t len, unsigned int flags)
1408 struct pipe_buffer *bufs;
1409 struct fuse_copy_state cs;
1410 struct fuse_conn *fc;
1414 fc = fuse_get_conn(out);
1418 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1425 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1426 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1436 struct pipe_buffer *ibuf;
1437 struct pipe_buffer *obuf;
1439 BUG_ON(nbuf >= pipe->buffers);
1440 BUG_ON(!pipe->nrbufs);
1441 ibuf = &pipe->bufs[pipe->curbuf];
1444 if (rem >= ibuf->len) {
1447 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1450 ibuf->ops->get(pipe, ibuf);
1452 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1454 ibuf->offset += obuf->len;
1455 ibuf->len -= obuf->len;
1462 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1466 if (flags & SPLICE_F_MOVE)
1469 ret = fuse_dev_do_write(fc, &cs, len);
1471 for (idx = 0; idx < nbuf; idx++) {
1472 struct pipe_buffer *buf = &bufs[idx];
1473 buf->ops->release(pipe, buf);
1480 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1482 unsigned mask = POLLOUT | POLLWRNORM;
1483 struct fuse_conn *fc = fuse_get_conn(file);
1487 poll_wait(file, &fc->waitq, wait);
1489 spin_lock(&fc->lock);
1492 else if (request_pending(fc))
1493 mask |= POLLIN | POLLRDNORM;
1494 spin_unlock(&fc->lock);
1500 * Abort all requests on the given list (pending or processing)
1502 * This function releases and reacquires fc->lock
1504 static void end_requests(struct fuse_conn *fc, struct list_head *head)
1505 __releases(&fc->lock)
1506 __acquires(&fc->lock)
1508 while (!list_empty(head)) {
1509 struct fuse_req *req;
1510 req = list_entry(head->next, struct fuse_req, list);
1511 req->out.h.error = -ECONNABORTED;
1512 request_end(fc, req);
1513 spin_lock(&fc->lock);
1518 * Abort requests under I/O
1520 * The requests are set to aborted and finished, and the request
1521 * waiter is woken up. This will make request_wait_answer() wait
1522 * until the request is unlocked and then return.
1524 * If the request is asynchronous, then the end function needs to be
1525 * called after waiting for the request to be unlocked (if it was
1528 static void end_io_requests(struct fuse_conn *fc)
1529 __releases(&fc->lock)
1530 __acquires(&fc->lock)
1532 while (!list_empty(&fc->io)) {
1533 struct fuse_req *req =
1534 list_entry(fc->io.next, struct fuse_req, list);
1535 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1538 req->out.h.error = -ECONNABORTED;
1539 req->state = FUSE_REQ_FINISHED;
1540 list_del_init(&req->list);
1541 wake_up(&req->waitq);
1544 __fuse_get_request(req);
1545 spin_unlock(&fc->lock);
1546 wait_event(req->waitq, !req->locked);
1548 fuse_put_request(fc, req);
1549 spin_lock(&fc->lock);
1555 * Abort all requests.
1557 * Emergency exit in case of a malicious or accidental deadlock, or
1558 * just a hung filesystem.
1560 * The same effect is usually achievable through killing the
1561 * filesystem daemon and all users of the filesystem. The exception
1562 * is the combination of an asynchronous request and the tricky
1563 * deadlock (see Documentation/filesystems/fuse.txt).
1565 * During the aborting, progression of requests from the pending and
1566 * processing lists onto the io list, and progression of new requests
1567 * onto the pending list is prevented by req->connected being false.
1569 * Progression of requests under I/O to the processing list is
1570 * prevented by the req->aborted flag being true for these requests.
1571 * For this reason requests on the io list must be aborted first.
1573 void fuse_abort_conn(struct fuse_conn *fc)
1575 spin_lock(&fc->lock);
1576 if (fc->connected) {
1579 end_io_requests(fc);
1580 end_requests(fc, &fc->pending);
1581 end_requests(fc, &fc->processing);
1582 wake_up_all(&fc->waitq);
1583 wake_up_all(&fc->blocked_waitq);
1584 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1586 spin_unlock(&fc->lock);
1588 EXPORT_SYMBOL_GPL(fuse_abort_conn);
1590 int fuse_dev_release(struct inode *inode, struct file *file)
1592 struct fuse_conn *fc = fuse_get_conn(file);
1594 spin_lock(&fc->lock);
1596 end_requests(fc, &fc->pending);
1597 end_requests(fc, &fc->processing);
1598 spin_unlock(&fc->lock);
1604 EXPORT_SYMBOL_GPL(fuse_dev_release);
1606 static int fuse_dev_fasync(int fd, struct file *file, int on)
1608 struct fuse_conn *fc = fuse_get_conn(file);
1612 /* No locking - fasync_helper does its own locking */
1613 return fasync_helper(fd, file, on, &fc->fasync);
1616 const struct file_operations fuse_dev_operations = {
1617 .owner = THIS_MODULE,
1618 .llseek = no_llseek,
1619 .read = do_sync_read,
1620 .aio_read = fuse_dev_read,
1621 .splice_read = fuse_dev_splice_read,
1622 .write = do_sync_write,
1623 .aio_write = fuse_dev_write,
1624 .splice_write = fuse_dev_splice_write,
1625 .poll = fuse_dev_poll,
1626 .release = fuse_dev_release,
1627 .fasync = fuse_dev_fasync,
1629 EXPORT_SYMBOL_GPL(fuse_dev_operations);
1631 static struct miscdevice fuse_miscdevice = {
1632 .minor = FUSE_MINOR,
1634 .fops = &fuse_dev_operations,
1637 int __init fuse_dev_init(void)
1640 fuse_req_cachep = kmem_cache_create("fuse_request",
1641 sizeof(struct fuse_req),
1643 if (!fuse_req_cachep)
1646 err = misc_register(&fuse_miscdevice);
1648 goto out_cache_clean;
1653 kmem_cache_destroy(fuse_req_cachep);
1658 void fuse_dev_cleanup(void)
1660 misc_deregister(&fuse_miscdevice);
1661 kmem_cache_destroy(fuse_req_cachep);