2 FUSE: Filesystem in Userspace
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/sched/signal.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
24 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
25 MODULE_ALIAS("devname:fuse");
27 static struct kmem_cache *fuse_req_cachep;
29 static struct fuse_dev *fuse_get_dev(struct file *file)
32 * Lockless access is OK, because file->private data is set
33 * once during mount and is valid until the file is released.
35 return ACCESS_ONCE(file->private_data);
38 static void fuse_request_init(struct fuse_req *req, struct page **pages,
39 struct fuse_page_desc *page_descs,
42 memset(req, 0, sizeof(*req));
43 memset(pages, 0, sizeof(*pages) * npages);
44 memset(page_descs, 0, sizeof(*page_descs) * npages);
45 INIT_LIST_HEAD(&req->list);
46 INIT_LIST_HEAD(&req->intr_entry);
47 init_waitqueue_head(&req->waitq);
48 atomic_set(&req->count, 1);
50 req->page_descs = page_descs;
51 req->max_pages = npages;
52 __set_bit(FR_PENDING, &req->flags);
55 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
57 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
60 struct fuse_page_desc *page_descs;
62 if (npages <= FUSE_REQ_INLINE_PAGES) {
63 pages = req->inline_pages;
64 page_descs = req->inline_page_descs;
66 pages = kmalloc(sizeof(struct page *) * npages, flags);
67 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
71 if (!pages || !page_descs) {
74 kmem_cache_free(fuse_req_cachep, req);
78 fuse_request_init(req, pages, page_descs, npages);
83 struct fuse_req *fuse_request_alloc(unsigned npages)
85 return __fuse_request_alloc(npages, GFP_KERNEL);
87 EXPORT_SYMBOL_GPL(fuse_request_alloc);
89 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
91 return __fuse_request_alloc(npages, GFP_NOFS);
94 void fuse_request_free(struct fuse_req *req)
96 if (req->pages != req->inline_pages) {
98 kfree(req->page_descs);
100 kmem_cache_free(fuse_req_cachep, req);
103 void __fuse_get_request(struct fuse_req *req)
105 atomic_inc(&req->count);
108 /* Must be called with > 1 refcount */
109 static void __fuse_put_request(struct fuse_req *req)
111 BUG_ON(atomic_read(&req->count) < 2);
112 atomic_dec(&req->count);
115 static void fuse_req_init_context(struct fuse_req *req)
117 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
118 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
119 req->in.h.pid = current->pid;
122 void fuse_set_initialized(struct fuse_conn *fc)
124 /* Make sure stores before this are seen on another CPU */
129 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
131 return !fc->initialized || (for_background && fc->blocked);
134 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
137 struct fuse_req *req;
139 atomic_inc(&fc->num_waiting);
141 if (fuse_block_alloc(fc, for_background)) {
143 if (wait_event_killable_exclusive(fc->blocked_waitq,
144 !fuse_block_alloc(fc, for_background)))
147 /* Matches smp_wmb() in fuse_set_initialized() */
158 req = fuse_request_alloc(npages);
162 wake_up(&fc->blocked_waitq);
166 fuse_req_init_context(req);
167 __set_bit(FR_WAITING, &req->flags);
169 __set_bit(FR_BACKGROUND, &req->flags);
174 atomic_dec(&fc->num_waiting);
178 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
180 return __fuse_get_req(fc, npages, false);
182 EXPORT_SYMBOL_GPL(fuse_get_req);
184 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
187 return __fuse_get_req(fc, npages, true);
189 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
192 * Return request in fuse_file->reserved_req. However that may
193 * currently be in use. If that is the case, wait for it to become
196 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
199 struct fuse_req *req = NULL;
200 struct fuse_file *ff = file->private_data;
203 wait_event(fc->reserved_req_waitq, ff->reserved_req);
204 spin_lock(&fc->lock);
205 if (ff->reserved_req) {
206 req = ff->reserved_req;
207 ff->reserved_req = NULL;
208 req->stolen_file = get_file(file);
210 spin_unlock(&fc->lock);
217 * Put stolen request back into fuse_file->reserved_req
219 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
221 struct file *file = req->stolen_file;
222 struct fuse_file *ff = file->private_data;
224 spin_lock(&fc->lock);
225 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
226 BUG_ON(ff->reserved_req);
227 ff->reserved_req = req;
228 wake_up_all(&fc->reserved_req_waitq);
229 spin_unlock(&fc->lock);
234 * Gets a requests for a file operation, always succeeds
236 * This is used for sending the FLUSH request, which must get to
237 * userspace, due to POSIX locks which may need to be unlocked.
239 * If allocation fails due to OOM, use the reserved request in
242 * This is very unlikely to deadlock accidentally, since the
243 * filesystem should not have it's own file open. If deadlock is
244 * intentional, it can still be broken by "aborting" the filesystem.
246 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
249 struct fuse_req *req;
251 atomic_inc(&fc->num_waiting);
252 wait_event(fc->blocked_waitq, fc->initialized);
253 /* Matches smp_wmb() in fuse_set_initialized() */
255 req = fuse_request_alloc(0);
257 req = get_reserved_req(fc, file);
259 fuse_req_init_context(req);
260 __set_bit(FR_WAITING, &req->flags);
261 __clear_bit(FR_BACKGROUND, &req->flags);
265 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
267 if (atomic_dec_and_test(&req->count)) {
268 if (test_bit(FR_BACKGROUND, &req->flags)) {
270 * We get here in the unlikely case that a background
271 * request was allocated but not sent
273 spin_lock(&fc->lock);
275 wake_up(&fc->blocked_waitq);
276 spin_unlock(&fc->lock);
279 if (test_bit(FR_WAITING, &req->flags)) {
280 __clear_bit(FR_WAITING, &req->flags);
281 atomic_dec(&fc->num_waiting);
284 if (req->stolen_file)
285 put_reserved_req(fc, req);
287 fuse_request_free(req);
290 EXPORT_SYMBOL_GPL(fuse_put_request);
292 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
297 for (i = 0; i < numargs; i++)
298 nbytes += args[i].size;
303 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
305 return ++fiq->reqctr;
308 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
310 req->in.h.len = sizeof(struct fuse_in_header) +
311 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
312 list_add_tail(&req->list, &fiq->pending);
313 wake_up_locked(&fiq->waitq);
314 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
317 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
318 u64 nodeid, u64 nlookup)
320 struct fuse_iqueue *fiq = &fc->iq;
322 forget->forget_one.nodeid = nodeid;
323 forget->forget_one.nlookup = nlookup;
325 spin_lock(&fiq->waitq.lock);
326 if (fiq->connected) {
327 fiq->forget_list_tail->next = forget;
328 fiq->forget_list_tail = forget;
329 wake_up_locked(&fiq->waitq);
330 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
334 spin_unlock(&fiq->waitq.lock);
337 static void flush_bg_queue(struct fuse_conn *fc)
339 while (fc->active_background < fc->max_background &&
340 !list_empty(&fc->bg_queue)) {
341 struct fuse_req *req;
342 struct fuse_iqueue *fiq = &fc->iq;
344 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
345 list_del(&req->list);
346 fc->active_background++;
347 spin_lock(&fiq->waitq.lock);
348 req->in.h.unique = fuse_get_unique(fiq);
349 queue_request(fiq, req);
350 spin_unlock(&fiq->waitq.lock);
355 * This function is called when a request is finished. Either a reply
356 * has arrived or it was aborted (and not yet sent) or some error
357 * occurred during communication with userspace, or the device file
358 * was closed. The requester thread is woken up (if still waiting),
359 * the 'end' callback is called if given, else the reference to the
360 * request is released
362 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
364 struct fuse_iqueue *fiq = &fc->iq;
366 if (test_and_set_bit(FR_FINISHED, &req->flags))
369 spin_lock(&fiq->waitq.lock);
370 list_del_init(&req->intr_entry);
371 spin_unlock(&fiq->waitq.lock);
372 WARN_ON(test_bit(FR_PENDING, &req->flags));
373 WARN_ON(test_bit(FR_SENT, &req->flags));
374 if (test_bit(FR_BACKGROUND, &req->flags)) {
375 spin_lock(&fc->lock);
376 clear_bit(FR_BACKGROUND, &req->flags);
377 if (fc->num_background == fc->max_background)
380 /* Wake up next waiter, if any */
381 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
382 wake_up(&fc->blocked_waitq);
384 if (fc->num_background == fc->congestion_threshold &&
385 fc->connected && fc->sb) {
386 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
387 clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
389 fc->num_background--;
390 fc->active_background--;
392 spin_unlock(&fc->lock);
394 wake_up(&req->waitq);
397 fuse_put_request(fc, req);
400 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
402 spin_lock(&fiq->waitq.lock);
403 if (test_bit(FR_FINISHED, &req->flags)) {
404 spin_unlock(&fiq->waitq.lock);
407 if (list_empty(&req->intr_entry)) {
408 list_add_tail(&req->intr_entry, &fiq->interrupts);
409 wake_up_locked(&fiq->waitq);
411 spin_unlock(&fiq->waitq.lock);
412 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
415 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
417 struct fuse_iqueue *fiq = &fc->iq;
420 if (!fc->no_interrupt) {
421 /* Any signal may interrupt this */
422 err = wait_event_interruptible(req->waitq,
423 test_bit(FR_FINISHED, &req->flags));
427 set_bit(FR_INTERRUPTED, &req->flags);
428 /* matches barrier in fuse_dev_do_read() */
429 smp_mb__after_atomic();
430 if (test_bit(FR_SENT, &req->flags))
431 queue_interrupt(fiq, req);
434 if (!test_bit(FR_FORCE, &req->flags)) {
435 /* Only fatal signals may interrupt this */
436 err = wait_event_killable(req->waitq,
437 test_bit(FR_FINISHED, &req->flags));
441 spin_lock(&fiq->waitq.lock);
442 /* Request is not yet in userspace, bail out */
443 if (test_bit(FR_PENDING, &req->flags)) {
444 list_del(&req->list);
445 spin_unlock(&fiq->waitq.lock);
446 __fuse_put_request(req);
447 req->out.h.error = -EINTR;
450 spin_unlock(&fiq->waitq.lock);
454 * Either request is already in userspace, or it was forced.
457 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
460 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
462 struct fuse_iqueue *fiq = &fc->iq;
464 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
465 spin_lock(&fiq->waitq.lock);
466 if (!fiq->connected) {
467 spin_unlock(&fiq->waitq.lock);
468 req->out.h.error = -ENOTCONN;
470 req->in.h.unique = fuse_get_unique(fiq);
471 queue_request(fiq, req);
472 /* acquire extra reference, since request is still needed
473 after request_end() */
474 __fuse_get_request(req);
475 spin_unlock(&fiq->waitq.lock);
477 request_wait_answer(fc, req);
478 /* Pairs with smp_wmb() in request_end() */
483 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
485 __set_bit(FR_ISREPLY, &req->flags);
486 if (!test_bit(FR_WAITING, &req->flags)) {
487 __set_bit(FR_WAITING, &req->flags);
488 atomic_inc(&fc->num_waiting);
490 __fuse_request_send(fc, req);
492 EXPORT_SYMBOL_GPL(fuse_request_send);
494 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
496 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
497 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
500 switch (args->in.h.opcode) {
507 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
511 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
515 if (fc->minor < 12) {
516 switch (args->in.h.opcode) {
518 args->in.args[0].size = sizeof(struct fuse_open_in);
521 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
527 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
529 struct fuse_req *req;
532 req = fuse_get_req(fc, 0);
536 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
537 fuse_adjust_compat(fc, args);
539 req->in.h.opcode = args->in.h.opcode;
540 req->in.h.nodeid = args->in.h.nodeid;
541 req->in.numargs = args->in.numargs;
542 memcpy(req->in.args, args->in.args,
543 args->in.numargs * sizeof(struct fuse_in_arg));
544 req->out.argvar = args->out.argvar;
545 req->out.numargs = args->out.numargs;
546 memcpy(req->out.args, args->out.args,
547 args->out.numargs * sizeof(struct fuse_arg));
548 fuse_request_send(fc, req);
549 ret = req->out.h.error;
550 if (!ret && args->out.argvar) {
551 BUG_ON(args->out.numargs != 1);
552 ret = req->out.args[0].size;
554 fuse_put_request(fc, req);
560 * Called under fc->lock
562 * fc->connected must have been checked previously
564 void fuse_request_send_background_locked(struct fuse_conn *fc,
565 struct fuse_req *req)
567 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
568 if (!test_bit(FR_WAITING, &req->flags)) {
569 __set_bit(FR_WAITING, &req->flags);
570 atomic_inc(&fc->num_waiting);
572 __set_bit(FR_ISREPLY, &req->flags);
573 fc->num_background++;
574 if (fc->num_background == fc->max_background)
576 if (fc->num_background == fc->congestion_threshold && fc->sb) {
577 set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
578 set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
580 list_add_tail(&req->list, &fc->bg_queue);
584 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
587 spin_lock(&fc->lock);
589 fuse_request_send_background_locked(fc, req);
590 spin_unlock(&fc->lock);
592 spin_unlock(&fc->lock);
593 req->out.h.error = -ENOTCONN;
595 fuse_put_request(fc, req);
598 EXPORT_SYMBOL_GPL(fuse_request_send_background);
600 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
601 struct fuse_req *req, u64 unique)
604 struct fuse_iqueue *fiq = &fc->iq;
606 __clear_bit(FR_ISREPLY, &req->flags);
607 req->in.h.unique = unique;
608 spin_lock(&fiq->waitq.lock);
609 if (fiq->connected) {
610 queue_request(fiq, req);
613 spin_unlock(&fiq->waitq.lock);
618 void fuse_force_forget(struct file *file, u64 nodeid)
620 struct inode *inode = file_inode(file);
621 struct fuse_conn *fc = get_fuse_conn(inode);
622 struct fuse_req *req;
623 struct fuse_forget_in inarg;
625 memset(&inarg, 0, sizeof(inarg));
627 req = fuse_get_req_nofail_nopages(fc, file);
628 req->in.h.opcode = FUSE_FORGET;
629 req->in.h.nodeid = nodeid;
631 req->in.args[0].size = sizeof(inarg);
632 req->in.args[0].value = &inarg;
633 __clear_bit(FR_ISREPLY, &req->flags);
634 __fuse_request_send(fc, req);
636 fuse_put_request(fc, req);
640 * Lock the request. Up to the next unlock_request() there mustn't be
641 * anything that could cause a page-fault. If the request was already
644 static int lock_request(struct fuse_req *req)
648 spin_lock(&req->waitq.lock);
649 if (test_bit(FR_ABORTED, &req->flags))
652 set_bit(FR_LOCKED, &req->flags);
653 spin_unlock(&req->waitq.lock);
659 * Unlock request. If it was aborted while locked, caller is responsible
660 * for unlocking and ending the request.
662 static int unlock_request(struct fuse_req *req)
666 spin_lock(&req->waitq.lock);
667 if (test_bit(FR_ABORTED, &req->flags))
670 clear_bit(FR_LOCKED, &req->flags);
671 spin_unlock(&req->waitq.lock);
676 struct fuse_copy_state {
678 struct fuse_req *req;
679 struct iov_iter *iter;
680 struct pipe_buffer *pipebufs;
681 struct pipe_buffer *currbuf;
682 struct pipe_inode_info *pipe;
683 unsigned long nr_segs;
687 unsigned move_pages:1;
690 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
691 struct iov_iter *iter)
693 memset(cs, 0, sizeof(*cs));
698 /* Unmap and put previous page of userspace buffer */
699 static void fuse_copy_finish(struct fuse_copy_state *cs)
702 struct pipe_buffer *buf = cs->currbuf;
705 buf->len = PAGE_SIZE - cs->len;
709 flush_dcache_page(cs->pg);
710 set_page_dirty_lock(cs->pg);
718 * Get another pagefull of userspace buffer, and map it to kernel
719 * address space, and lock request
721 static int fuse_copy_fill(struct fuse_copy_state *cs)
726 err = unlock_request(cs->req);
730 fuse_copy_finish(cs);
732 struct pipe_buffer *buf = cs->pipebufs;
735 err = pipe_buf_confirm(cs->pipe, buf);
739 BUG_ON(!cs->nr_segs);
742 cs->offset = buf->offset;
747 if (cs->nr_segs == cs->pipe->buffers)
750 page = alloc_page(GFP_HIGHUSER);
767 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
774 iov_iter_advance(cs->iter, err);
777 return lock_request(cs->req);
780 /* Do as much copy to/from userspace buffer as we can */
781 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
783 unsigned ncpy = min(*size, cs->len);
785 void *pgaddr = kmap_atomic(cs->pg);
786 void *buf = pgaddr + cs->offset;
789 memcpy(buf, *val, ncpy);
791 memcpy(*val, buf, ncpy);
793 kunmap_atomic(pgaddr);
802 static int fuse_check_page(struct page *page)
804 if (page_mapcount(page) ||
805 page->mapping != NULL ||
806 page_count(page) != 1 ||
807 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
814 printk(KERN_WARNING "fuse: trying to steal weird page\n");
815 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
821 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
824 struct page *oldpage = *pagep;
825 struct page *newpage;
826 struct pipe_buffer *buf = cs->pipebufs;
828 err = unlock_request(cs->req);
832 fuse_copy_finish(cs);
834 err = pipe_buf_confirm(cs->pipe, buf);
838 BUG_ON(!cs->nr_segs);
844 if (cs->len != PAGE_SIZE)
847 if (pipe_buf_steal(cs->pipe, buf) != 0)
852 if (!PageUptodate(newpage))
853 SetPageUptodate(newpage);
855 ClearPageMappedToDisk(newpage);
857 if (fuse_check_page(newpage) != 0)
858 goto out_fallback_unlock;
861 * This is a new and locked page, it shouldn't be mapped or
862 * have any special flags on it
864 if (WARN_ON(page_mapped(oldpage)))
865 goto out_fallback_unlock;
866 if (WARN_ON(page_has_private(oldpage)))
867 goto out_fallback_unlock;
868 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
869 goto out_fallback_unlock;
870 if (WARN_ON(PageMlocked(oldpage)))
871 goto out_fallback_unlock;
873 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
875 unlock_page(newpage);
881 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
882 lru_cache_add_file(newpage);
885 spin_lock(&cs->req->waitq.lock);
886 if (test_bit(FR_ABORTED, &cs->req->flags))
890 spin_unlock(&cs->req->waitq.lock);
893 unlock_page(newpage);
898 unlock_page(oldpage);
905 unlock_page(newpage);
908 cs->offset = buf->offset;
910 err = lock_request(cs->req);
917 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
918 unsigned offset, unsigned count)
920 struct pipe_buffer *buf;
923 if (cs->nr_segs == cs->pipe->buffers)
926 err = unlock_request(cs->req);
930 fuse_copy_finish(cs);
935 buf->offset = offset;
946 * Copy a page in the request to/from the userspace buffer. Must be
949 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
950 unsigned offset, unsigned count, int zeroing)
953 struct page *page = *pagep;
955 if (page && zeroing && count < PAGE_SIZE)
956 clear_highpage(page);
959 if (cs->write && cs->pipebufs && page) {
960 return fuse_ref_page(cs, page, offset, count);
961 } else if (!cs->len) {
962 if (cs->move_pages && page &&
963 offset == 0 && count == PAGE_SIZE) {
964 err = fuse_try_move_page(cs, pagep);
968 err = fuse_copy_fill(cs);
974 void *mapaddr = kmap_atomic(page);
975 void *buf = mapaddr + offset;
976 offset += fuse_copy_do(cs, &buf, &count);
977 kunmap_atomic(mapaddr);
979 offset += fuse_copy_do(cs, NULL, &count);
981 if (page && !cs->write)
982 flush_dcache_page(page);
986 /* Copy pages in the request to/from userspace buffer */
987 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
991 struct fuse_req *req = cs->req;
993 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
995 unsigned offset = req->page_descs[i].offset;
996 unsigned count = min(nbytes, req->page_descs[i].length);
998 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1008 /* Copy a single argument in the request to/from userspace buffer */
1009 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1013 int err = fuse_copy_fill(cs);
1017 fuse_copy_do(cs, &val, &size);
1022 /* Copy request arguments to/from userspace buffer */
1023 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1024 unsigned argpages, struct fuse_arg *args,
1030 for (i = 0; !err && i < numargs; i++) {
1031 struct fuse_arg *arg = &args[i];
1032 if (i == numargs - 1 && argpages)
1033 err = fuse_copy_pages(cs, arg->size, zeroing);
1035 err = fuse_copy_one(cs, arg->value, arg->size);
1040 static int forget_pending(struct fuse_iqueue *fiq)
1042 return fiq->forget_list_head.next != NULL;
1045 static int request_pending(struct fuse_iqueue *fiq)
1047 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1048 forget_pending(fiq);
1052 * Transfer an interrupt request to userspace
1054 * Unlike other requests this is assembled on demand, without a need
1055 * to allocate a separate fuse_req structure.
1057 * Called with fiq->waitq.lock held, releases it
1059 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1060 struct fuse_copy_state *cs,
1061 size_t nbytes, struct fuse_req *req)
1062 __releases(fiq->waitq.lock)
1064 struct fuse_in_header ih;
1065 struct fuse_interrupt_in arg;
1066 unsigned reqsize = sizeof(ih) + sizeof(arg);
1069 list_del_init(&req->intr_entry);
1070 req->intr_unique = fuse_get_unique(fiq);
1071 memset(&ih, 0, sizeof(ih));
1072 memset(&arg, 0, sizeof(arg));
1074 ih.opcode = FUSE_INTERRUPT;
1075 ih.unique = req->intr_unique;
1076 arg.unique = req->in.h.unique;
1078 spin_unlock(&fiq->waitq.lock);
1079 if (nbytes < reqsize)
1082 err = fuse_copy_one(cs, &ih, sizeof(ih));
1084 err = fuse_copy_one(cs, &arg, sizeof(arg));
1085 fuse_copy_finish(cs);
1087 return err ? err : reqsize;
1090 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1094 struct fuse_forget_link *head = fiq->forget_list_head.next;
1095 struct fuse_forget_link **newhead = &head;
1098 for (count = 0; *newhead != NULL && count < max; count++)
1099 newhead = &(*newhead)->next;
1101 fiq->forget_list_head.next = *newhead;
1103 if (fiq->forget_list_head.next == NULL)
1104 fiq->forget_list_tail = &fiq->forget_list_head;
1112 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1113 struct fuse_copy_state *cs,
1115 __releases(fiq->waitq.lock)
1118 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1119 struct fuse_forget_in arg = {
1120 .nlookup = forget->forget_one.nlookup,
1122 struct fuse_in_header ih = {
1123 .opcode = FUSE_FORGET,
1124 .nodeid = forget->forget_one.nodeid,
1125 .unique = fuse_get_unique(fiq),
1126 .len = sizeof(ih) + sizeof(arg),
1129 spin_unlock(&fiq->waitq.lock);
1131 if (nbytes < ih.len)
1134 err = fuse_copy_one(cs, &ih, sizeof(ih));
1136 err = fuse_copy_one(cs, &arg, sizeof(arg));
1137 fuse_copy_finish(cs);
1145 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1146 struct fuse_copy_state *cs, size_t nbytes)
1147 __releases(fiq->waitq.lock)
1150 unsigned max_forgets;
1152 struct fuse_forget_link *head;
1153 struct fuse_batch_forget_in arg = { .count = 0 };
1154 struct fuse_in_header ih = {
1155 .opcode = FUSE_BATCH_FORGET,
1156 .unique = fuse_get_unique(fiq),
1157 .len = sizeof(ih) + sizeof(arg),
1160 if (nbytes < ih.len) {
1161 spin_unlock(&fiq->waitq.lock);
1165 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1166 head = dequeue_forget(fiq, max_forgets, &count);
1167 spin_unlock(&fiq->waitq.lock);
1170 ih.len += count * sizeof(struct fuse_forget_one);
1171 err = fuse_copy_one(cs, &ih, sizeof(ih));
1173 err = fuse_copy_one(cs, &arg, sizeof(arg));
1176 struct fuse_forget_link *forget = head;
1179 err = fuse_copy_one(cs, &forget->forget_one,
1180 sizeof(forget->forget_one));
1182 head = forget->next;
1186 fuse_copy_finish(cs);
1194 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1195 struct fuse_copy_state *cs,
1197 __releases(fiq->waitq.lock)
1199 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1200 return fuse_read_single_forget(fiq, cs, nbytes);
1202 return fuse_read_batch_forget(fiq, cs, nbytes);
1206 * Read a single request into the userspace filesystem's buffer. This
1207 * function waits until a request is available, then removes it from
1208 * the pending list and copies request data to userspace buffer. If
1209 * no reply is needed (FORGET) or request has been aborted or there
1210 * was an error during the copying then it's finished by calling
1211 * request_end(). Otherwise add it to the processing list, and set
1214 static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1215 struct fuse_copy_state *cs, size_t nbytes)
1218 struct fuse_conn *fc = fud->fc;
1219 struct fuse_iqueue *fiq = &fc->iq;
1220 struct fuse_pqueue *fpq = &fud->pq;
1221 struct fuse_req *req;
1226 spin_lock(&fiq->waitq.lock);
1228 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1229 !request_pending(fiq))
1232 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1233 !fiq->connected || request_pending(fiq));
1238 if (!fiq->connected)
1241 if (!list_empty(&fiq->interrupts)) {
1242 req = list_entry(fiq->interrupts.next, struct fuse_req,
1244 return fuse_read_interrupt(fiq, cs, nbytes, req);
1247 if (forget_pending(fiq)) {
1248 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1249 return fuse_read_forget(fc, fiq, cs, nbytes);
1251 if (fiq->forget_batch <= -8)
1252 fiq->forget_batch = 16;
1255 req = list_entry(fiq->pending.next, struct fuse_req, list);
1256 clear_bit(FR_PENDING, &req->flags);
1257 list_del_init(&req->list);
1258 spin_unlock(&fiq->waitq.lock);
1261 reqsize = in->h.len;
1262 /* If request is too large, reply with an error and restart the read */
1263 if (nbytes < reqsize) {
1264 req->out.h.error = -EIO;
1265 /* SETXATTR is special, since it may contain too large data */
1266 if (in->h.opcode == FUSE_SETXATTR)
1267 req->out.h.error = -E2BIG;
1268 request_end(fc, req);
1271 spin_lock(&fpq->lock);
1272 list_add(&req->list, &fpq->io);
1273 spin_unlock(&fpq->lock);
1275 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1277 err = fuse_copy_args(cs, in->numargs, in->argpages,
1278 (struct fuse_arg *) in->args, 0);
1279 fuse_copy_finish(cs);
1280 spin_lock(&fpq->lock);
1281 clear_bit(FR_LOCKED, &req->flags);
1282 if (!fpq->connected) {
1287 req->out.h.error = -EIO;
1290 if (!test_bit(FR_ISREPLY, &req->flags)) {
1294 list_move_tail(&req->list, &fpq->processing);
1295 spin_unlock(&fpq->lock);
1296 set_bit(FR_SENT, &req->flags);
1297 /* matches barrier in request_wait_answer() */
1298 smp_mb__after_atomic();
1299 if (test_bit(FR_INTERRUPTED, &req->flags))
1300 queue_interrupt(fiq, req);
1305 if (!test_bit(FR_PRIVATE, &req->flags))
1306 list_del_init(&req->list);
1307 spin_unlock(&fpq->lock);
1308 request_end(fc, req);
1312 spin_unlock(&fiq->waitq.lock);
1316 static int fuse_dev_open(struct inode *inode, struct file *file)
1319 * The fuse device's file's private_data is used to hold
1320 * the fuse_conn(ection) when it is mounted, and is used to
1321 * keep track of whether the file has been mounted already.
1323 file->private_data = NULL;
1327 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1329 struct fuse_copy_state cs;
1330 struct file *file = iocb->ki_filp;
1331 struct fuse_dev *fud = fuse_get_dev(file);
1336 if (!iter_is_iovec(to))
1339 fuse_copy_init(&cs, 1, to);
1341 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1344 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1345 struct pipe_inode_info *pipe,
1346 size_t len, unsigned int flags)
1350 struct pipe_buffer *bufs;
1351 struct fuse_copy_state cs;
1352 struct fuse_dev *fud = fuse_get_dev(in);
1357 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1361 fuse_copy_init(&cs, 1, NULL);
1364 ret = fuse_dev_do_read(fud, in, &cs, len);
1368 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1373 for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
1375 * Need to be careful about this. Having buf->ops in module
1376 * code can Oops if the buffer persists after module unload.
1378 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1379 bufs[page_nr].flags = 0;
1380 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1381 if (unlikely(ret < 0))
1387 for (; page_nr < cs.nr_segs; page_nr++)
1388 put_page(bufs[page_nr].page);
1394 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1395 struct fuse_copy_state *cs)
1397 struct fuse_notify_poll_wakeup_out outarg;
1400 if (size != sizeof(outarg))
1403 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1407 fuse_copy_finish(cs);
1408 return fuse_notify_poll_wakeup(fc, &outarg);
1411 fuse_copy_finish(cs);
1415 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1416 struct fuse_copy_state *cs)
1418 struct fuse_notify_inval_inode_out outarg;
1421 if (size != sizeof(outarg))
1424 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1427 fuse_copy_finish(cs);
1429 down_read(&fc->killsb);
1432 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1433 outarg.off, outarg.len);
1435 up_read(&fc->killsb);
1439 fuse_copy_finish(cs);
1443 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1444 struct fuse_copy_state *cs)
1446 struct fuse_notify_inval_entry_out outarg;
1451 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1456 if (size < sizeof(outarg))
1459 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1463 err = -ENAMETOOLONG;
1464 if (outarg.namelen > FUSE_NAME_MAX)
1468 if (size != sizeof(outarg) + outarg.namelen + 1)
1472 name.len = outarg.namelen;
1473 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1476 fuse_copy_finish(cs);
1477 buf[outarg.namelen] = 0;
1479 down_read(&fc->killsb);
1482 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1483 up_read(&fc->killsb);
1489 fuse_copy_finish(cs);
1493 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1494 struct fuse_copy_state *cs)
1496 struct fuse_notify_delete_out outarg;
1501 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1506 if (size < sizeof(outarg))
1509 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1513 err = -ENAMETOOLONG;
1514 if (outarg.namelen > FUSE_NAME_MAX)
1518 if (size != sizeof(outarg) + outarg.namelen + 1)
1522 name.len = outarg.namelen;
1523 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1526 fuse_copy_finish(cs);
1527 buf[outarg.namelen] = 0;
1529 down_read(&fc->killsb);
1532 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1533 outarg.child, &name);
1534 up_read(&fc->killsb);
1540 fuse_copy_finish(cs);
1544 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1545 struct fuse_copy_state *cs)
1547 struct fuse_notify_store_out outarg;
1548 struct inode *inode;
1549 struct address_space *mapping;
1553 unsigned int offset;
1559 if (size < sizeof(outarg))
1562 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1567 if (size - sizeof(outarg) != outarg.size)
1570 nodeid = outarg.nodeid;
1572 down_read(&fc->killsb);
1578 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1582 mapping = inode->i_mapping;
1583 index = outarg.offset >> PAGE_SHIFT;
1584 offset = outarg.offset & ~PAGE_MASK;
1585 file_size = i_size_read(inode);
1586 end = outarg.offset + outarg.size;
1587 if (end > file_size) {
1589 fuse_write_update_size(inode, file_size);
1595 unsigned int this_num;
1598 page = find_or_create_page(mapping, index,
1599 mapping_gfp_mask(mapping));
1603 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1604 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1605 if (!err && offset == 0 &&
1606 (this_num == PAGE_SIZE || file_size == end))
1607 SetPageUptodate(page);
1624 up_read(&fc->killsb);
1626 fuse_copy_finish(cs);
1630 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1632 release_pages(req->pages, req->num_pages, false);
1635 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1636 struct fuse_notify_retrieve_out *outarg)
1639 struct address_space *mapping = inode->i_mapping;
1640 struct fuse_req *req;
1644 unsigned int offset;
1645 size_t total_len = 0;
1648 offset = outarg->offset & ~PAGE_MASK;
1649 file_size = i_size_read(inode);
1652 if (outarg->offset > file_size)
1654 else if (outarg->offset + num > file_size)
1655 num = file_size - outarg->offset;
1657 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1658 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1660 req = fuse_get_req(fc, num_pages);
1662 return PTR_ERR(req);
1664 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1665 req->in.h.nodeid = outarg->nodeid;
1666 req->in.numargs = 2;
1667 req->in.argpages = 1;
1668 req->page_descs[0].offset = offset;
1669 req->end = fuse_retrieve_end;
1671 index = outarg->offset >> PAGE_SHIFT;
1673 while (num && req->num_pages < num_pages) {
1675 unsigned int this_num;
1677 page = find_get_page(mapping, index);
1681 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1682 req->pages[req->num_pages] = page;
1683 req->page_descs[req->num_pages].length = this_num;
1688 total_len += this_num;
1691 req->misc.retrieve_in.offset = outarg->offset;
1692 req->misc.retrieve_in.size = total_len;
1693 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1694 req->in.args[0].value = &req->misc.retrieve_in;
1695 req->in.args[1].size = total_len;
1697 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1699 fuse_retrieve_end(fc, req);
1704 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1705 struct fuse_copy_state *cs)
1707 struct fuse_notify_retrieve_out outarg;
1708 struct inode *inode;
1712 if (size != sizeof(outarg))
1715 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1719 fuse_copy_finish(cs);
1721 down_read(&fc->killsb);
1724 u64 nodeid = outarg.nodeid;
1726 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1728 err = fuse_retrieve(fc, inode, &outarg);
1732 up_read(&fc->killsb);
1737 fuse_copy_finish(cs);
1741 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1742 unsigned int size, struct fuse_copy_state *cs)
1744 /* Don't try to move pages (yet) */
1748 case FUSE_NOTIFY_POLL:
1749 return fuse_notify_poll(fc, size, cs);
1751 case FUSE_NOTIFY_INVAL_INODE:
1752 return fuse_notify_inval_inode(fc, size, cs);
1754 case FUSE_NOTIFY_INVAL_ENTRY:
1755 return fuse_notify_inval_entry(fc, size, cs);
1757 case FUSE_NOTIFY_STORE:
1758 return fuse_notify_store(fc, size, cs);
1760 case FUSE_NOTIFY_RETRIEVE:
1761 return fuse_notify_retrieve(fc, size, cs);
1763 case FUSE_NOTIFY_DELETE:
1764 return fuse_notify_delete(fc, size, cs);
1767 fuse_copy_finish(cs);
1772 /* Look up request on processing list by unique ID */
1773 static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1775 struct fuse_req *req;
1777 list_for_each_entry(req, &fpq->processing, list) {
1778 if (req->in.h.unique == unique || req->intr_unique == unique)
1784 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1787 unsigned reqsize = sizeof(struct fuse_out_header);
1790 return nbytes != reqsize ? -EINVAL : 0;
1792 reqsize += len_args(out->numargs, out->args);
1794 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1796 else if (reqsize > nbytes) {
1797 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1798 unsigned diffsize = reqsize - nbytes;
1799 if (diffsize > lastarg->size)
1801 lastarg->size -= diffsize;
1803 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1808 * Write a single reply to a request. First the header is copied from
1809 * the write buffer. The request is then searched on the processing
1810 * list by the unique ID found in the header. If found, then remove
1811 * it from the list and copy the rest of the buffer to the request.
1812 * The request is finished by calling request_end()
1814 static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1815 struct fuse_copy_state *cs, size_t nbytes)
1818 struct fuse_conn *fc = fud->fc;
1819 struct fuse_pqueue *fpq = &fud->pq;
1820 struct fuse_req *req;
1821 struct fuse_out_header oh;
1823 if (nbytes < sizeof(struct fuse_out_header))
1826 err = fuse_copy_one(cs, &oh, sizeof(oh));
1831 if (oh.len != nbytes)
1835 * Zero oh.unique indicates unsolicited notification message
1836 * and error contains notification code.
1839 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1840 return err ? err : nbytes;
1844 if (oh.error <= -1000 || oh.error > 0)
1847 spin_lock(&fpq->lock);
1849 if (!fpq->connected)
1852 req = request_find(fpq, oh.unique);
1856 /* Is it an interrupt reply? */
1857 if (req->intr_unique == oh.unique) {
1858 spin_unlock(&fpq->lock);
1861 if (nbytes != sizeof(struct fuse_out_header))
1864 if (oh.error == -ENOSYS)
1865 fc->no_interrupt = 1;
1866 else if (oh.error == -EAGAIN)
1867 queue_interrupt(&fc->iq, req);
1869 fuse_copy_finish(cs);
1873 clear_bit(FR_SENT, &req->flags);
1874 list_move(&req->list, &fpq->io);
1876 set_bit(FR_LOCKED, &req->flags);
1877 spin_unlock(&fpq->lock);
1879 if (!req->out.page_replace)
1882 err = copy_out_args(cs, &req->out, nbytes);
1883 fuse_copy_finish(cs);
1885 spin_lock(&fpq->lock);
1886 clear_bit(FR_LOCKED, &req->flags);
1887 if (!fpq->connected)
1890 req->out.h.error = -EIO;
1891 if (!test_bit(FR_PRIVATE, &req->flags))
1892 list_del_init(&req->list);
1893 spin_unlock(&fpq->lock);
1895 request_end(fc, req);
1897 return err ? err : nbytes;
1900 spin_unlock(&fpq->lock);
1902 fuse_copy_finish(cs);
1906 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1908 struct fuse_copy_state cs;
1909 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1914 if (!iter_is_iovec(from))
1917 fuse_copy_init(&cs, 0, from);
1919 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1922 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1923 struct file *out, loff_t *ppos,
1924 size_t len, unsigned int flags)
1928 struct pipe_buffer *bufs;
1929 struct fuse_copy_state cs;
1930 struct fuse_dev *fud;
1934 fud = fuse_get_dev(out);
1938 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1945 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1946 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1956 struct pipe_buffer *ibuf;
1957 struct pipe_buffer *obuf;
1959 BUG_ON(nbuf >= pipe->buffers);
1960 BUG_ON(!pipe->nrbufs);
1961 ibuf = &pipe->bufs[pipe->curbuf];
1964 if (rem >= ibuf->len) {
1967 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1970 pipe_buf_get(pipe, ibuf);
1972 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1974 ibuf->offset += obuf->len;
1975 ibuf->len -= obuf->len;
1982 fuse_copy_init(&cs, 0, NULL);
1987 if (flags & SPLICE_F_MOVE)
1990 ret = fuse_dev_do_write(fud, &cs, len);
1992 for (idx = 0; idx < nbuf; idx++)
1993 pipe_buf_release(pipe, &bufs[idx]);
2000 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2002 unsigned mask = POLLOUT | POLLWRNORM;
2003 struct fuse_iqueue *fiq;
2004 struct fuse_dev *fud = fuse_get_dev(file);
2010 poll_wait(file, &fiq->waitq, wait);
2012 spin_lock(&fiq->waitq.lock);
2013 if (!fiq->connected)
2015 else if (request_pending(fiq))
2016 mask |= POLLIN | POLLRDNORM;
2017 spin_unlock(&fiq->waitq.lock);
2023 * Abort all requests on the given list (pending or processing)
2025 * This function releases and reacquires fc->lock
2027 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2029 while (!list_empty(head)) {
2030 struct fuse_req *req;
2031 req = list_entry(head->next, struct fuse_req, list);
2032 req->out.h.error = -ECONNABORTED;
2033 clear_bit(FR_SENT, &req->flags);
2034 list_del_init(&req->list);
2035 request_end(fc, req);
2039 static void end_polls(struct fuse_conn *fc)
2043 p = rb_first(&fc->polled_files);
2046 struct fuse_file *ff;
2047 ff = rb_entry(p, struct fuse_file, polled_node);
2048 wake_up_interruptible_all(&ff->poll_wait);
2055 * Abort all requests.
2057 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2060 * The same effect is usually achievable through killing the filesystem daemon
2061 * and all users of the filesystem. The exception is the combination of an
2062 * asynchronous request and the tricky deadlock (see
2063 * Documentation/filesystems/fuse.txt).
2065 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2066 * requests, they should be finished off immediately. Locked requests will be
2067 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2068 * requests. It is possible that some request will finish before we can. This
2069 * is OK, the request will in that case be removed from the list before we touch
2072 void fuse_abort_conn(struct fuse_conn *fc)
2074 struct fuse_iqueue *fiq = &fc->iq;
2076 spin_lock(&fc->lock);
2077 if (fc->connected) {
2078 struct fuse_dev *fud;
2079 struct fuse_req *req, *next;
2085 fuse_set_initialized(fc);
2086 list_for_each_entry(fud, &fc->devices, entry) {
2087 struct fuse_pqueue *fpq = &fud->pq;
2089 spin_lock(&fpq->lock);
2091 list_for_each_entry_safe(req, next, &fpq->io, list) {
2092 req->out.h.error = -ECONNABORTED;
2093 spin_lock(&req->waitq.lock);
2094 set_bit(FR_ABORTED, &req->flags);
2095 if (!test_bit(FR_LOCKED, &req->flags)) {
2096 set_bit(FR_PRIVATE, &req->flags);
2097 list_move(&req->list, &to_end1);
2099 spin_unlock(&req->waitq.lock);
2101 list_splice_init(&fpq->processing, &to_end2);
2102 spin_unlock(&fpq->lock);
2104 fc->max_background = UINT_MAX;
2107 spin_lock(&fiq->waitq.lock);
2109 list_splice_init(&fiq->pending, &to_end2);
2110 list_for_each_entry(req, &to_end2, list)
2111 clear_bit(FR_PENDING, &req->flags);
2112 while (forget_pending(fiq))
2113 kfree(dequeue_forget(fiq, 1, NULL));
2114 wake_up_all_locked(&fiq->waitq);
2115 spin_unlock(&fiq->waitq.lock);
2116 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2118 wake_up_all(&fc->blocked_waitq);
2119 spin_unlock(&fc->lock);
2121 while (!list_empty(&to_end1)) {
2122 req = list_first_entry(&to_end1, struct fuse_req, list);
2123 __fuse_get_request(req);
2124 list_del_init(&req->list);
2125 request_end(fc, req);
2127 end_requests(fc, &to_end2);
2129 spin_unlock(&fc->lock);
2132 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2134 int fuse_dev_release(struct inode *inode, struct file *file)
2136 struct fuse_dev *fud = fuse_get_dev(file);
2139 struct fuse_conn *fc = fud->fc;
2140 struct fuse_pqueue *fpq = &fud->pq;
2142 WARN_ON(!list_empty(&fpq->io));
2143 end_requests(fc, &fpq->processing);
2144 /* Are we the last open device? */
2145 if (atomic_dec_and_test(&fc->dev_count)) {
2146 WARN_ON(fc->iq.fasync != NULL);
2147 fuse_abort_conn(fc);
2153 EXPORT_SYMBOL_GPL(fuse_dev_release);
2155 static int fuse_dev_fasync(int fd, struct file *file, int on)
2157 struct fuse_dev *fud = fuse_get_dev(file);
2162 /* No locking - fasync_helper does its own locking */
2163 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2166 static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2168 struct fuse_dev *fud;
2170 if (new->private_data)
2173 fud = fuse_dev_alloc(fc);
2177 new->private_data = fud;
2178 atomic_inc(&fc->dev_count);
2183 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2188 if (cmd == FUSE_DEV_IOC_CLONE) {
2192 if (!get_user(oldfd, (__u32 __user *) arg)) {
2193 struct file *old = fget(oldfd);
2197 struct fuse_dev *fud = NULL;
2200 * Check against file->f_op because CUSE
2201 * uses the same ioctl handler.
2203 if (old->f_op == file->f_op &&
2204 old->f_cred->user_ns == file->f_cred->user_ns)
2205 fud = fuse_get_dev(old);
2208 mutex_lock(&fuse_mutex);
2209 err = fuse_device_clone(fud->fc, file);
2210 mutex_unlock(&fuse_mutex);
2219 const struct file_operations fuse_dev_operations = {
2220 .owner = THIS_MODULE,
2221 .open = fuse_dev_open,
2222 .llseek = no_llseek,
2223 .read_iter = fuse_dev_read,
2224 .splice_read = fuse_dev_splice_read,
2225 .write_iter = fuse_dev_write,
2226 .splice_write = fuse_dev_splice_write,
2227 .poll = fuse_dev_poll,
2228 .release = fuse_dev_release,
2229 .fasync = fuse_dev_fasync,
2230 .unlocked_ioctl = fuse_dev_ioctl,
2231 .compat_ioctl = fuse_dev_ioctl,
2233 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2235 static struct miscdevice fuse_miscdevice = {
2236 .minor = FUSE_MINOR,
2238 .fops = &fuse_dev_operations,
2241 int __init fuse_dev_init(void)
2244 fuse_req_cachep = kmem_cache_create("fuse_request",
2245 sizeof(struct fuse_req),
2247 if (!fuse_req_cachep)
2250 err = misc_register(&fuse_miscdevice);
2252 goto out_cache_clean;
2257 kmem_cache_destroy(fuse_req_cachep);
2262 void fuse_dev_cleanup(void)
2264 misc_deregister(&fuse_miscdevice);
2265 kmem_cache_destroy(fuse_req_cachep);