]> Git Repo - linux.git/blob - fs/fuse/dev.c
fuse: use per req lock for lock/unlock_request()
[linux.git] / fs / fuse / dev.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <[email protected]>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24 MODULE_ALIAS("devname:fuse");
25
26 static struct kmem_cache *fuse_req_cachep;
27
28 static struct fuse_conn *fuse_get_conn(struct file *file)
29 {
30         /*
31          * Lockless access is OK, because file->private data is set
32          * once during mount and is valid until the file is released.
33          */
34         return file->private_data;
35 }
36
37 static void fuse_request_init(struct fuse_req *req, struct page **pages,
38                               struct fuse_page_desc *page_descs,
39                               unsigned npages)
40 {
41         memset(req, 0, sizeof(*req));
42         memset(pages, 0, sizeof(*pages) * npages);
43         memset(page_descs, 0, sizeof(*page_descs) * npages);
44         INIT_LIST_HEAD(&req->list);
45         INIT_LIST_HEAD(&req->intr_entry);
46         init_waitqueue_head(&req->waitq);
47         atomic_set(&req->count, 1);
48         req->pages = pages;
49         req->page_descs = page_descs;
50         req->max_pages = npages;
51 }
52
53 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
54 {
55         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
56         if (req) {
57                 struct page **pages;
58                 struct fuse_page_desc *page_descs;
59
60                 if (npages <= FUSE_REQ_INLINE_PAGES) {
61                         pages = req->inline_pages;
62                         page_descs = req->inline_page_descs;
63                 } else {
64                         pages = kmalloc(sizeof(struct page *) * npages, flags);
65                         page_descs = kmalloc(sizeof(struct fuse_page_desc) *
66                                              npages, flags);
67                 }
68
69                 if (!pages || !page_descs) {
70                         kfree(pages);
71                         kfree(page_descs);
72                         kmem_cache_free(fuse_req_cachep, req);
73                         return NULL;
74                 }
75
76                 fuse_request_init(req, pages, page_descs, npages);
77         }
78         return req;
79 }
80
81 struct fuse_req *fuse_request_alloc(unsigned npages)
82 {
83         return __fuse_request_alloc(npages, GFP_KERNEL);
84 }
85 EXPORT_SYMBOL_GPL(fuse_request_alloc);
86
87 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
88 {
89         return __fuse_request_alloc(npages, GFP_NOFS);
90 }
91
92 void fuse_request_free(struct fuse_req *req)
93 {
94         if (req->pages != req->inline_pages) {
95                 kfree(req->pages);
96                 kfree(req->page_descs);
97         }
98         kmem_cache_free(fuse_req_cachep, req);
99 }
100
101 static void block_sigs(sigset_t *oldset)
102 {
103         sigset_t mask;
104
105         siginitsetinv(&mask, sigmask(SIGKILL));
106         sigprocmask(SIG_BLOCK, &mask, oldset);
107 }
108
109 static void restore_sigs(sigset_t *oldset)
110 {
111         sigprocmask(SIG_SETMASK, oldset, NULL);
112 }
113
114 void __fuse_get_request(struct fuse_req *req)
115 {
116         atomic_inc(&req->count);
117 }
118
119 /* Must be called with > 1 refcount */
120 static void __fuse_put_request(struct fuse_req *req)
121 {
122         BUG_ON(atomic_read(&req->count) < 2);
123         atomic_dec(&req->count);
124 }
125
126 static void fuse_req_init_context(struct fuse_req *req)
127 {
128         req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
129         req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
130         req->in.h.pid = current->pid;
131 }
132
133 void fuse_set_initialized(struct fuse_conn *fc)
134 {
135         /* Make sure stores before this are seen on another CPU */
136         smp_wmb();
137         fc->initialized = 1;
138 }
139
140 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
141 {
142         return !fc->initialized || (for_background && fc->blocked);
143 }
144
145 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
146                                        bool for_background)
147 {
148         struct fuse_req *req;
149         int err;
150         atomic_inc(&fc->num_waiting);
151
152         if (fuse_block_alloc(fc, for_background)) {
153                 sigset_t oldset;
154                 int intr;
155
156                 block_sigs(&oldset);
157                 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
158                                 !fuse_block_alloc(fc, for_background));
159                 restore_sigs(&oldset);
160                 err = -EINTR;
161                 if (intr)
162                         goto out;
163         }
164         /* Matches smp_wmb() in fuse_set_initialized() */
165         smp_rmb();
166
167         err = -ENOTCONN;
168         if (!fc->connected)
169                 goto out;
170
171         err = -ECONNREFUSED;
172         if (fc->conn_error)
173                 goto out;
174
175         req = fuse_request_alloc(npages);
176         err = -ENOMEM;
177         if (!req) {
178                 if (for_background)
179                         wake_up(&fc->blocked_waitq);
180                 goto out;
181         }
182
183         fuse_req_init_context(req);
184         __set_bit(FR_WAITING, &req->flags);
185         if (for_background)
186                 __set_bit(FR_BACKGROUND, &req->flags);
187
188         return req;
189
190  out:
191         atomic_dec(&fc->num_waiting);
192         return ERR_PTR(err);
193 }
194
195 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
196 {
197         return __fuse_get_req(fc, npages, false);
198 }
199 EXPORT_SYMBOL_GPL(fuse_get_req);
200
201 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
202                                              unsigned npages)
203 {
204         return __fuse_get_req(fc, npages, true);
205 }
206 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
207
208 /*
209  * Return request in fuse_file->reserved_req.  However that may
210  * currently be in use.  If that is the case, wait for it to become
211  * available.
212  */
213 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
214                                          struct file *file)
215 {
216         struct fuse_req *req = NULL;
217         struct fuse_file *ff = file->private_data;
218
219         do {
220                 wait_event(fc->reserved_req_waitq, ff->reserved_req);
221                 spin_lock(&fc->lock);
222                 if (ff->reserved_req) {
223                         req = ff->reserved_req;
224                         ff->reserved_req = NULL;
225                         req->stolen_file = get_file(file);
226                 }
227                 spin_unlock(&fc->lock);
228         } while (!req);
229
230         return req;
231 }
232
233 /*
234  * Put stolen request back into fuse_file->reserved_req
235  */
236 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
237 {
238         struct file *file = req->stolen_file;
239         struct fuse_file *ff = file->private_data;
240
241         spin_lock(&fc->lock);
242         fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
243         BUG_ON(ff->reserved_req);
244         ff->reserved_req = req;
245         wake_up_all(&fc->reserved_req_waitq);
246         spin_unlock(&fc->lock);
247         fput(file);
248 }
249
250 /*
251  * Gets a requests for a file operation, always succeeds
252  *
253  * This is used for sending the FLUSH request, which must get to
254  * userspace, due to POSIX locks which may need to be unlocked.
255  *
256  * If allocation fails due to OOM, use the reserved request in
257  * fuse_file.
258  *
259  * This is very unlikely to deadlock accidentally, since the
260  * filesystem should not have it's own file open.  If deadlock is
261  * intentional, it can still be broken by "aborting" the filesystem.
262  */
263 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
264                                              struct file *file)
265 {
266         struct fuse_req *req;
267
268         atomic_inc(&fc->num_waiting);
269         wait_event(fc->blocked_waitq, fc->initialized);
270         /* Matches smp_wmb() in fuse_set_initialized() */
271         smp_rmb();
272         req = fuse_request_alloc(0);
273         if (!req)
274                 req = get_reserved_req(fc, file);
275
276         fuse_req_init_context(req);
277         __set_bit(FR_WAITING, &req->flags);
278         __clear_bit(FR_BACKGROUND, &req->flags);
279         return req;
280 }
281
282 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
283 {
284         if (atomic_dec_and_test(&req->count)) {
285                 if (test_bit(FR_BACKGROUND, &req->flags)) {
286                         /*
287                          * We get here in the unlikely case that a background
288                          * request was allocated but not sent
289                          */
290                         spin_lock(&fc->lock);
291                         if (!fc->blocked)
292                                 wake_up(&fc->blocked_waitq);
293                         spin_unlock(&fc->lock);
294                 }
295
296                 if (test_bit(FR_WAITING, &req->flags)) {
297                         __clear_bit(FR_WAITING, &req->flags);
298                         atomic_dec(&fc->num_waiting);
299                 }
300
301                 if (req->stolen_file)
302                         put_reserved_req(fc, req);
303                 else
304                         fuse_request_free(req);
305         }
306 }
307 EXPORT_SYMBOL_GPL(fuse_put_request);
308
309 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
310 {
311         unsigned nbytes = 0;
312         unsigned i;
313
314         for (i = 0; i < numargs; i++)
315                 nbytes += args[i].size;
316
317         return nbytes;
318 }
319
320 static u64 fuse_get_unique(struct fuse_conn *fc)
321 {
322         fc->reqctr++;
323         /* zero is special */
324         if (fc->reqctr == 0)
325                 fc->reqctr = 1;
326
327         return fc->reqctr;
328 }
329
330 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
331 {
332         req->in.h.len = sizeof(struct fuse_in_header) +
333                 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
334         list_add_tail(&req->list, &fc->pending);
335         req->state = FUSE_REQ_PENDING;
336         wake_up(&fc->waitq);
337         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
338 }
339
340 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
341                        u64 nodeid, u64 nlookup)
342 {
343         forget->forget_one.nodeid = nodeid;
344         forget->forget_one.nlookup = nlookup;
345
346         spin_lock(&fc->lock);
347         if (fc->connected) {
348                 fc->forget_list_tail->next = forget;
349                 fc->forget_list_tail = forget;
350                 wake_up(&fc->waitq);
351                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
352         } else {
353                 kfree(forget);
354         }
355         spin_unlock(&fc->lock);
356 }
357
358 static void flush_bg_queue(struct fuse_conn *fc)
359 {
360         while (fc->active_background < fc->max_background &&
361                !list_empty(&fc->bg_queue)) {
362                 struct fuse_req *req;
363
364                 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
365                 list_del(&req->list);
366                 fc->active_background++;
367                 req->in.h.unique = fuse_get_unique(fc);
368                 queue_request(fc, req);
369         }
370 }
371
372 /*
373  * This function is called when a request is finished.  Either a reply
374  * has arrived or it was aborted (and not yet sent) or some error
375  * occurred during communication with userspace, or the device file
376  * was closed.  The requester thread is woken up (if still waiting),
377  * the 'end' callback is called if given, else the reference to the
378  * request is released
379  *
380  * Called with fc->lock, unlocks it
381  */
382 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
383 __releases(fc->lock)
384 {
385         void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
386         req->end = NULL;
387         list_del_init(&req->list);
388         list_del_init(&req->intr_entry);
389         req->state = FUSE_REQ_FINISHED;
390         if (test_bit(FR_BACKGROUND, &req->flags)) {
391                 clear_bit(FR_BACKGROUND, &req->flags);
392                 if (fc->num_background == fc->max_background)
393                         fc->blocked = 0;
394
395                 /* Wake up next waiter, if any */
396                 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
397                         wake_up(&fc->blocked_waitq);
398
399                 if (fc->num_background == fc->congestion_threshold &&
400                     fc->connected && fc->bdi_initialized) {
401                         clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
402                         clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
403                 }
404                 fc->num_background--;
405                 fc->active_background--;
406                 flush_bg_queue(fc);
407         }
408         spin_unlock(&fc->lock);
409         wake_up(&req->waitq);
410         if (end)
411                 end(fc, req);
412         fuse_put_request(fc, req);
413 }
414
415 static void wait_answer_interruptible(struct fuse_conn *fc,
416                                       struct fuse_req *req)
417 __releases(fc->lock)
418 __acquires(fc->lock)
419 {
420         if (signal_pending(current))
421                 return;
422
423         spin_unlock(&fc->lock);
424         wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
425         spin_lock(&fc->lock);
426 }
427
428 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
429 {
430         list_add_tail(&req->intr_entry, &fc->interrupts);
431         wake_up(&fc->waitq);
432         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
433 }
434
435 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
436 __releases(fc->lock)
437 __acquires(fc->lock)
438 {
439         if (!fc->no_interrupt) {
440                 /* Any signal may interrupt this */
441                 wait_answer_interruptible(fc, req);
442
443                 if (req->state == FUSE_REQ_FINISHED)
444                         return;
445
446                 set_bit(FR_INTERRUPTED, &req->flags);
447                 if (req->state == FUSE_REQ_SENT)
448                         queue_interrupt(fc, req);
449         }
450
451         if (!test_bit(FR_FORCE, &req->flags)) {
452                 sigset_t oldset;
453
454                 /* Only fatal signals may interrupt this */
455                 block_sigs(&oldset);
456                 wait_answer_interruptible(fc, req);
457                 restore_sigs(&oldset);
458
459                 if (req->state == FUSE_REQ_FINISHED)
460                         return;
461
462                 /* Request is not yet in userspace, bail out */
463                 if (req->state == FUSE_REQ_PENDING) {
464                         list_del(&req->list);
465                         __fuse_put_request(req);
466                         req->out.h.error = -EINTR;
467                         return;
468                 }
469         }
470
471         /*
472          * Either request is already in userspace, or it was forced.
473          * Wait it out.
474          */
475         spin_unlock(&fc->lock);
476         wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
477         spin_lock(&fc->lock);
478 }
479
480 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
481 {
482         BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
483         spin_lock(&fc->lock);
484         if (!fc->connected)
485                 req->out.h.error = -ENOTCONN;
486         else {
487                 req->in.h.unique = fuse_get_unique(fc);
488                 queue_request(fc, req);
489                 /* acquire extra reference, since request is still needed
490                    after request_end() */
491                 __fuse_get_request(req);
492
493                 request_wait_answer(fc, req);
494         }
495         spin_unlock(&fc->lock);
496 }
497
498 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
499 {
500         __set_bit(FR_ISREPLY, &req->flags);
501         if (!test_bit(FR_WAITING, &req->flags)) {
502                 __set_bit(FR_WAITING, &req->flags);
503                 atomic_inc(&fc->num_waiting);
504         }
505         __fuse_request_send(fc, req);
506 }
507 EXPORT_SYMBOL_GPL(fuse_request_send);
508
509 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
510 {
511         if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
512                 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
513
514         if (fc->minor < 9) {
515                 switch (args->in.h.opcode) {
516                 case FUSE_LOOKUP:
517                 case FUSE_CREATE:
518                 case FUSE_MKNOD:
519                 case FUSE_MKDIR:
520                 case FUSE_SYMLINK:
521                 case FUSE_LINK:
522                         args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
523                         break;
524                 case FUSE_GETATTR:
525                 case FUSE_SETATTR:
526                         args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
527                         break;
528                 }
529         }
530         if (fc->minor < 12) {
531                 switch (args->in.h.opcode) {
532                 case FUSE_CREATE:
533                         args->in.args[0].size = sizeof(struct fuse_open_in);
534                         break;
535                 case FUSE_MKNOD:
536                         args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
537                         break;
538                 }
539         }
540 }
541
542 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
543 {
544         struct fuse_req *req;
545         ssize_t ret;
546
547         req = fuse_get_req(fc, 0);
548         if (IS_ERR(req))
549                 return PTR_ERR(req);
550
551         /* Needs to be done after fuse_get_req() so that fc->minor is valid */
552         fuse_adjust_compat(fc, args);
553
554         req->in.h.opcode = args->in.h.opcode;
555         req->in.h.nodeid = args->in.h.nodeid;
556         req->in.numargs = args->in.numargs;
557         memcpy(req->in.args, args->in.args,
558                args->in.numargs * sizeof(struct fuse_in_arg));
559         req->out.argvar = args->out.argvar;
560         req->out.numargs = args->out.numargs;
561         memcpy(req->out.args, args->out.args,
562                args->out.numargs * sizeof(struct fuse_arg));
563         fuse_request_send(fc, req);
564         ret = req->out.h.error;
565         if (!ret && args->out.argvar) {
566                 BUG_ON(args->out.numargs != 1);
567                 ret = req->out.args[0].size;
568         }
569         fuse_put_request(fc, req);
570
571         return ret;
572 }
573
574 /*
575  * Called under fc->lock
576  *
577  * fc->connected must have been checked previously
578  */
579 void fuse_request_send_background_locked(struct fuse_conn *fc,
580                                          struct fuse_req *req)
581 {
582         BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
583         if (!test_bit(FR_WAITING, &req->flags)) {
584                 __set_bit(FR_WAITING, &req->flags);
585                 atomic_inc(&fc->num_waiting);
586         }
587         __set_bit(FR_ISREPLY, &req->flags);
588         fc->num_background++;
589         if (fc->num_background == fc->max_background)
590                 fc->blocked = 1;
591         if (fc->num_background == fc->congestion_threshold &&
592             fc->bdi_initialized) {
593                 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
594                 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
595         }
596         list_add_tail(&req->list, &fc->bg_queue);
597         flush_bg_queue(fc);
598 }
599
600 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
601 {
602         BUG_ON(!req->end);
603         spin_lock(&fc->lock);
604         if (fc->connected) {
605                 fuse_request_send_background_locked(fc, req);
606                 spin_unlock(&fc->lock);
607         } else {
608                 spin_unlock(&fc->lock);
609                 req->out.h.error = -ENOTCONN;
610                 req->end(fc, req);
611                 fuse_put_request(fc, req);
612         }
613 }
614 EXPORT_SYMBOL_GPL(fuse_request_send_background);
615
616 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
617                                           struct fuse_req *req, u64 unique)
618 {
619         int err = -ENODEV;
620
621         __clear_bit(FR_ISREPLY, &req->flags);
622         req->in.h.unique = unique;
623         spin_lock(&fc->lock);
624         if (fc->connected) {
625                 queue_request(fc, req);
626                 err = 0;
627         }
628         spin_unlock(&fc->lock);
629
630         return err;
631 }
632
633 void fuse_force_forget(struct file *file, u64 nodeid)
634 {
635         struct inode *inode = file_inode(file);
636         struct fuse_conn *fc = get_fuse_conn(inode);
637         struct fuse_req *req;
638         struct fuse_forget_in inarg;
639
640         memset(&inarg, 0, sizeof(inarg));
641         inarg.nlookup = 1;
642         req = fuse_get_req_nofail_nopages(fc, file);
643         req->in.h.opcode = FUSE_FORGET;
644         req->in.h.nodeid = nodeid;
645         req->in.numargs = 1;
646         req->in.args[0].size = sizeof(inarg);
647         req->in.args[0].value = &inarg;
648         __clear_bit(FR_ISREPLY, &req->flags);
649         __fuse_request_send(fc, req);
650         /* ignore errors */
651         fuse_put_request(fc, req);
652 }
653
654 /*
655  * Lock the request.  Up to the next unlock_request() there mustn't be
656  * anything that could cause a page-fault.  If the request was already
657  * aborted bail out.
658  */
659 static int lock_request(struct fuse_req *req)
660 {
661         int err = 0;
662         if (req) {
663                 spin_lock(&req->waitq.lock);
664                 if (test_bit(FR_ABORTED, &req->flags))
665                         err = -ENOENT;
666                 else
667                         set_bit(FR_LOCKED, &req->flags);
668                 spin_unlock(&req->waitq.lock);
669         }
670         return err;
671 }
672
673 /*
674  * Unlock request.  If it was aborted while locked, caller is responsible
675  * for unlocking and ending the request.
676  */
677 static int unlock_request(struct fuse_req *req)
678 {
679         int err = 0;
680         if (req) {
681                 spin_lock(&req->waitq.lock);
682                 if (test_bit(FR_ABORTED, &req->flags))
683                         err = -ENOENT;
684                 else
685                         clear_bit(FR_LOCKED, &req->flags);
686                 spin_unlock(&req->waitq.lock);
687         }
688         return err;
689 }
690
691 struct fuse_copy_state {
692         int write;
693         struct fuse_req *req;
694         struct iov_iter *iter;
695         struct pipe_buffer *pipebufs;
696         struct pipe_buffer *currbuf;
697         struct pipe_inode_info *pipe;
698         unsigned long nr_segs;
699         struct page *pg;
700         unsigned len;
701         unsigned offset;
702         unsigned move_pages:1;
703 };
704
705 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
706                            struct iov_iter *iter)
707 {
708         memset(cs, 0, sizeof(*cs));
709         cs->write = write;
710         cs->iter = iter;
711 }
712
713 /* Unmap and put previous page of userspace buffer */
714 static void fuse_copy_finish(struct fuse_copy_state *cs)
715 {
716         if (cs->currbuf) {
717                 struct pipe_buffer *buf = cs->currbuf;
718
719                 if (cs->write)
720                         buf->len = PAGE_SIZE - cs->len;
721                 cs->currbuf = NULL;
722         } else if (cs->pg) {
723                 if (cs->write) {
724                         flush_dcache_page(cs->pg);
725                         set_page_dirty_lock(cs->pg);
726                 }
727                 put_page(cs->pg);
728         }
729         cs->pg = NULL;
730 }
731
732 /*
733  * Get another pagefull of userspace buffer, and map it to kernel
734  * address space, and lock request
735  */
736 static int fuse_copy_fill(struct fuse_copy_state *cs)
737 {
738         struct page *page;
739         int err;
740
741         err = unlock_request(cs->req);
742         if (err)
743                 return err;
744
745         fuse_copy_finish(cs);
746         if (cs->pipebufs) {
747                 struct pipe_buffer *buf = cs->pipebufs;
748
749                 if (!cs->write) {
750                         err = buf->ops->confirm(cs->pipe, buf);
751                         if (err)
752                                 return err;
753
754                         BUG_ON(!cs->nr_segs);
755                         cs->currbuf = buf;
756                         cs->pg = buf->page;
757                         cs->offset = buf->offset;
758                         cs->len = buf->len;
759                         cs->pipebufs++;
760                         cs->nr_segs--;
761                 } else {
762                         if (cs->nr_segs == cs->pipe->buffers)
763                                 return -EIO;
764
765                         page = alloc_page(GFP_HIGHUSER);
766                         if (!page)
767                                 return -ENOMEM;
768
769                         buf->page = page;
770                         buf->offset = 0;
771                         buf->len = 0;
772
773                         cs->currbuf = buf;
774                         cs->pg = page;
775                         cs->offset = 0;
776                         cs->len = PAGE_SIZE;
777                         cs->pipebufs++;
778                         cs->nr_segs++;
779                 }
780         } else {
781                 size_t off;
782                 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
783                 if (err < 0)
784                         return err;
785                 BUG_ON(!err);
786                 cs->len = err;
787                 cs->offset = off;
788                 cs->pg = page;
789                 cs->offset = off;
790                 iov_iter_advance(cs->iter, err);
791         }
792
793         return lock_request(cs->req);
794 }
795
796 /* Do as much copy to/from userspace buffer as we can */
797 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
798 {
799         unsigned ncpy = min(*size, cs->len);
800         if (val) {
801                 void *pgaddr = kmap_atomic(cs->pg);
802                 void *buf = pgaddr + cs->offset;
803
804                 if (cs->write)
805                         memcpy(buf, *val, ncpy);
806                 else
807                         memcpy(*val, buf, ncpy);
808
809                 kunmap_atomic(pgaddr);
810                 *val += ncpy;
811         }
812         *size -= ncpy;
813         cs->len -= ncpy;
814         cs->offset += ncpy;
815         return ncpy;
816 }
817
818 static int fuse_check_page(struct page *page)
819 {
820         if (page_mapcount(page) ||
821             page->mapping != NULL ||
822             page_count(page) != 1 ||
823             (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
824              ~(1 << PG_locked |
825                1 << PG_referenced |
826                1 << PG_uptodate |
827                1 << PG_lru |
828                1 << PG_active |
829                1 << PG_reclaim))) {
830                 printk(KERN_WARNING "fuse: trying to steal weird page\n");
831                 printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
832                 return 1;
833         }
834         return 0;
835 }
836
837 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
838 {
839         int err;
840         struct page *oldpage = *pagep;
841         struct page *newpage;
842         struct pipe_buffer *buf = cs->pipebufs;
843
844         err = unlock_request(cs->req);
845         if (err)
846                 return err;
847
848         fuse_copy_finish(cs);
849
850         err = buf->ops->confirm(cs->pipe, buf);
851         if (err)
852                 return err;
853
854         BUG_ON(!cs->nr_segs);
855         cs->currbuf = buf;
856         cs->len = buf->len;
857         cs->pipebufs++;
858         cs->nr_segs--;
859
860         if (cs->len != PAGE_SIZE)
861                 goto out_fallback;
862
863         if (buf->ops->steal(cs->pipe, buf) != 0)
864                 goto out_fallback;
865
866         newpage = buf->page;
867
868         if (!PageUptodate(newpage))
869                 SetPageUptodate(newpage);
870
871         ClearPageMappedToDisk(newpage);
872
873         if (fuse_check_page(newpage) != 0)
874                 goto out_fallback_unlock;
875
876         /*
877          * This is a new and locked page, it shouldn't be mapped or
878          * have any special flags on it
879          */
880         if (WARN_ON(page_mapped(oldpage)))
881                 goto out_fallback_unlock;
882         if (WARN_ON(page_has_private(oldpage)))
883                 goto out_fallback_unlock;
884         if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
885                 goto out_fallback_unlock;
886         if (WARN_ON(PageMlocked(oldpage)))
887                 goto out_fallback_unlock;
888
889         err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
890         if (err) {
891                 unlock_page(newpage);
892                 return err;
893         }
894
895         page_cache_get(newpage);
896
897         if (!(buf->flags & PIPE_BUF_FLAG_LRU))
898                 lru_cache_add_file(newpage);
899
900         err = 0;
901         spin_lock(&cs->req->waitq.lock);
902         if (test_bit(FR_ABORTED, &cs->req->flags))
903                 err = -ENOENT;
904         else
905                 *pagep = newpage;
906         spin_unlock(&cs->req->waitq.lock);
907
908         if (err) {
909                 unlock_page(newpage);
910                 page_cache_release(newpage);
911                 return err;
912         }
913
914         unlock_page(oldpage);
915         page_cache_release(oldpage);
916         cs->len = 0;
917
918         return 0;
919
920 out_fallback_unlock:
921         unlock_page(newpage);
922 out_fallback:
923         cs->pg = buf->page;
924         cs->offset = buf->offset;
925
926         err = lock_request(cs->req);
927         if (err)
928                 return err;
929
930         return 1;
931 }
932
933 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
934                          unsigned offset, unsigned count)
935 {
936         struct pipe_buffer *buf;
937         int err;
938
939         if (cs->nr_segs == cs->pipe->buffers)
940                 return -EIO;
941
942         err = unlock_request(cs->req);
943         if (err)
944                 return err;
945
946         fuse_copy_finish(cs);
947
948         buf = cs->pipebufs;
949         page_cache_get(page);
950         buf->page = page;
951         buf->offset = offset;
952         buf->len = count;
953
954         cs->pipebufs++;
955         cs->nr_segs++;
956         cs->len = 0;
957
958         return 0;
959 }
960
961 /*
962  * Copy a page in the request to/from the userspace buffer.  Must be
963  * done atomically
964  */
965 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
966                           unsigned offset, unsigned count, int zeroing)
967 {
968         int err;
969         struct page *page = *pagep;
970
971         if (page && zeroing && count < PAGE_SIZE)
972                 clear_highpage(page);
973
974         while (count) {
975                 if (cs->write && cs->pipebufs && page) {
976                         return fuse_ref_page(cs, page, offset, count);
977                 } else if (!cs->len) {
978                         if (cs->move_pages && page &&
979                             offset == 0 && count == PAGE_SIZE) {
980                                 err = fuse_try_move_page(cs, pagep);
981                                 if (err <= 0)
982                                         return err;
983                         } else {
984                                 err = fuse_copy_fill(cs);
985                                 if (err)
986                                         return err;
987                         }
988                 }
989                 if (page) {
990                         void *mapaddr = kmap_atomic(page);
991                         void *buf = mapaddr + offset;
992                         offset += fuse_copy_do(cs, &buf, &count);
993                         kunmap_atomic(mapaddr);
994                 } else
995                         offset += fuse_copy_do(cs, NULL, &count);
996         }
997         if (page && !cs->write)
998                 flush_dcache_page(page);
999         return 0;
1000 }
1001
1002 /* Copy pages in the request to/from userspace buffer */
1003 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1004                            int zeroing)
1005 {
1006         unsigned i;
1007         struct fuse_req *req = cs->req;
1008
1009         for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1010                 int err;
1011                 unsigned offset = req->page_descs[i].offset;
1012                 unsigned count = min(nbytes, req->page_descs[i].length);
1013
1014                 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1015                                      zeroing);
1016                 if (err)
1017                         return err;
1018
1019                 nbytes -= count;
1020         }
1021         return 0;
1022 }
1023
1024 /* Copy a single argument in the request to/from userspace buffer */
1025 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1026 {
1027         while (size) {
1028                 if (!cs->len) {
1029                         int err = fuse_copy_fill(cs);
1030                         if (err)
1031                                 return err;
1032                 }
1033                 fuse_copy_do(cs, &val, &size);
1034         }
1035         return 0;
1036 }
1037
1038 /* Copy request arguments to/from userspace buffer */
1039 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1040                           unsigned argpages, struct fuse_arg *args,
1041                           int zeroing)
1042 {
1043         int err = 0;
1044         unsigned i;
1045
1046         for (i = 0; !err && i < numargs; i++)  {
1047                 struct fuse_arg *arg = &args[i];
1048                 if (i == numargs - 1 && argpages)
1049                         err = fuse_copy_pages(cs, arg->size, zeroing);
1050                 else
1051                         err = fuse_copy_one(cs, arg->value, arg->size);
1052         }
1053         return err;
1054 }
1055
1056 static int forget_pending(struct fuse_conn *fc)
1057 {
1058         return fc->forget_list_head.next != NULL;
1059 }
1060
1061 static int request_pending(struct fuse_conn *fc)
1062 {
1063         return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
1064                 forget_pending(fc);
1065 }
1066
1067 /* Wait until a request is available on the pending list */
1068 static void request_wait(struct fuse_conn *fc)
1069 __releases(fc->lock)
1070 __acquires(fc->lock)
1071 {
1072         DECLARE_WAITQUEUE(wait, current);
1073
1074         add_wait_queue_exclusive(&fc->waitq, &wait);
1075         while (fc->connected && !request_pending(fc)) {
1076                 set_current_state(TASK_INTERRUPTIBLE);
1077                 if (signal_pending(current))
1078                         break;
1079
1080                 spin_unlock(&fc->lock);
1081                 schedule();
1082                 spin_lock(&fc->lock);
1083         }
1084         set_current_state(TASK_RUNNING);
1085         remove_wait_queue(&fc->waitq, &wait);
1086 }
1087
1088 /*
1089  * Transfer an interrupt request to userspace
1090  *
1091  * Unlike other requests this is assembled on demand, without a need
1092  * to allocate a separate fuse_req structure.
1093  *
1094  * Called with fc->lock held, releases it
1095  */
1096 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1097                                size_t nbytes, struct fuse_req *req)
1098 __releases(fc->lock)
1099 {
1100         struct fuse_in_header ih;
1101         struct fuse_interrupt_in arg;
1102         unsigned reqsize = sizeof(ih) + sizeof(arg);
1103         int err;
1104
1105         list_del_init(&req->intr_entry);
1106         req->intr_unique = fuse_get_unique(fc);
1107         memset(&ih, 0, sizeof(ih));
1108         memset(&arg, 0, sizeof(arg));
1109         ih.len = reqsize;
1110         ih.opcode = FUSE_INTERRUPT;
1111         ih.unique = req->intr_unique;
1112         arg.unique = req->in.h.unique;
1113
1114         spin_unlock(&fc->lock);
1115         if (nbytes < reqsize)
1116                 return -EINVAL;
1117
1118         err = fuse_copy_one(cs, &ih, sizeof(ih));
1119         if (!err)
1120                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1121         fuse_copy_finish(cs);
1122
1123         return err ? err : reqsize;
1124 }
1125
1126 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1127                                                unsigned max,
1128                                                unsigned *countp)
1129 {
1130         struct fuse_forget_link *head = fc->forget_list_head.next;
1131         struct fuse_forget_link **newhead = &head;
1132         unsigned count;
1133
1134         for (count = 0; *newhead != NULL && count < max; count++)
1135                 newhead = &(*newhead)->next;
1136
1137         fc->forget_list_head.next = *newhead;
1138         *newhead = NULL;
1139         if (fc->forget_list_head.next == NULL)
1140                 fc->forget_list_tail = &fc->forget_list_head;
1141
1142         if (countp != NULL)
1143                 *countp = count;
1144
1145         return head;
1146 }
1147
1148 static int fuse_read_single_forget(struct fuse_conn *fc,
1149                                    struct fuse_copy_state *cs,
1150                                    size_t nbytes)
1151 __releases(fc->lock)
1152 {
1153         int err;
1154         struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1155         struct fuse_forget_in arg = {
1156                 .nlookup = forget->forget_one.nlookup,
1157         };
1158         struct fuse_in_header ih = {
1159                 .opcode = FUSE_FORGET,
1160                 .nodeid = forget->forget_one.nodeid,
1161                 .unique = fuse_get_unique(fc),
1162                 .len = sizeof(ih) + sizeof(arg),
1163         };
1164
1165         spin_unlock(&fc->lock);
1166         kfree(forget);
1167         if (nbytes < ih.len)
1168                 return -EINVAL;
1169
1170         err = fuse_copy_one(cs, &ih, sizeof(ih));
1171         if (!err)
1172                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1173         fuse_copy_finish(cs);
1174
1175         if (err)
1176                 return err;
1177
1178         return ih.len;
1179 }
1180
1181 static int fuse_read_batch_forget(struct fuse_conn *fc,
1182                                    struct fuse_copy_state *cs, size_t nbytes)
1183 __releases(fc->lock)
1184 {
1185         int err;
1186         unsigned max_forgets;
1187         unsigned count;
1188         struct fuse_forget_link *head;
1189         struct fuse_batch_forget_in arg = { .count = 0 };
1190         struct fuse_in_header ih = {
1191                 .opcode = FUSE_BATCH_FORGET,
1192                 .unique = fuse_get_unique(fc),
1193                 .len = sizeof(ih) + sizeof(arg),
1194         };
1195
1196         if (nbytes < ih.len) {
1197                 spin_unlock(&fc->lock);
1198                 return -EINVAL;
1199         }
1200
1201         max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1202         head = dequeue_forget(fc, max_forgets, &count);
1203         spin_unlock(&fc->lock);
1204
1205         arg.count = count;
1206         ih.len += count * sizeof(struct fuse_forget_one);
1207         err = fuse_copy_one(cs, &ih, sizeof(ih));
1208         if (!err)
1209                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1210
1211         while (head) {
1212                 struct fuse_forget_link *forget = head;
1213
1214                 if (!err) {
1215                         err = fuse_copy_one(cs, &forget->forget_one,
1216                                             sizeof(forget->forget_one));
1217                 }
1218                 head = forget->next;
1219                 kfree(forget);
1220         }
1221
1222         fuse_copy_finish(cs);
1223
1224         if (err)
1225                 return err;
1226
1227         return ih.len;
1228 }
1229
1230 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1231                             size_t nbytes)
1232 __releases(fc->lock)
1233 {
1234         if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1235                 return fuse_read_single_forget(fc, cs, nbytes);
1236         else
1237                 return fuse_read_batch_forget(fc, cs, nbytes);
1238 }
1239
1240 /*
1241  * Read a single request into the userspace filesystem's buffer.  This
1242  * function waits until a request is available, then removes it from
1243  * the pending list and copies request data to userspace buffer.  If
1244  * no reply is needed (FORGET) or request has been aborted or there
1245  * was an error during the copying then it's finished by calling
1246  * request_end().  Otherwise add it to the processing list, and set
1247  * the 'sent' flag.
1248  */
1249 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1250                                 struct fuse_copy_state *cs, size_t nbytes)
1251 {
1252         int err;
1253         struct fuse_req *req;
1254         struct fuse_in *in;
1255         unsigned reqsize;
1256
1257  restart:
1258         spin_lock(&fc->lock);
1259         err = -EAGAIN;
1260         if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1261             !request_pending(fc))
1262                 goto err_unlock;
1263
1264         request_wait(fc);
1265         err = -ENODEV;
1266         if (!fc->connected)
1267                 goto err_unlock;
1268         err = -ERESTARTSYS;
1269         if (!request_pending(fc))
1270                 goto err_unlock;
1271
1272         if (!list_empty(&fc->interrupts)) {
1273                 req = list_entry(fc->interrupts.next, struct fuse_req,
1274                                  intr_entry);
1275                 return fuse_read_interrupt(fc, cs, nbytes, req);
1276         }
1277
1278         if (forget_pending(fc)) {
1279                 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1280                         return fuse_read_forget(fc, cs, nbytes);
1281
1282                 if (fc->forget_batch <= -8)
1283                         fc->forget_batch = 16;
1284         }
1285
1286         req = list_entry(fc->pending.next, struct fuse_req, list);
1287         req->state = FUSE_REQ_READING;
1288         list_move(&req->list, &fc->io);
1289
1290         in = &req->in;
1291         reqsize = in->h.len;
1292         /* If request is too large, reply with an error and restart the read */
1293         if (nbytes < reqsize) {
1294                 req->out.h.error = -EIO;
1295                 /* SETXATTR is special, since it may contain too large data */
1296                 if (in->h.opcode == FUSE_SETXATTR)
1297                         req->out.h.error = -E2BIG;
1298                 request_end(fc, req);
1299                 goto restart;
1300         }
1301         spin_unlock(&fc->lock);
1302         cs->req = req;
1303         err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1304         if (!err)
1305                 err = fuse_copy_args(cs, in->numargs, in->argpages,
1306                                      (struct fuse_arg *) in->args, 0);
1307         fuse_copy_finish(cs);
1308         spin_lock(&fc->lock);
1309         clear_bit(FR_LOCKED, &req->flags);
1310         if (!fc->connected) {
1311                 request_end(fc, req);
1312                 return -ENODEV;
1313         }
1314         if (err) {
1315                 req->out.h.error = -EIO;
1316                 request_end(fc, req);
1317                 return err;
1318         }
1319         if (!test_bit(FR_ISREPLY, &req->flags)) {
1320                 request_end(fc, req);
1321         } else {
1322                 req->state = FUSE_REQ_SENT;
1323                 list_move_tail(&req->list, &fc->processing);
1324                 if (test_bit(FR_INTERRUPTED, &req->flags))
1325                         queue_interrupt(fc, req);
1326                 spin_unlock(&fc->lock);
1327         }
1328         return reqsize;
1329
1330  err_unlock:
1331         spin_unlock(&fc->lock);
1332         return err;
1333 }
1334
1335 static int fuse_dev_open(struct inode *inode, struct file *file)
1336 {
1337         /*
1338          * The fuse device's file's private_data is used to hold
1339          * the fuse_conn(ection) when it is mounted, and is used to
1340          * keep track of whether the file has been mounted already.
1341          */
1342         file->private_data = NULL;
1343         return 0;
1344 }
1345
1346 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1347 {
1348         struct fuse_copy_state cs;
1349         struct file *file = iocb->ki_filp;
1350         struct fuse_conn *fc = fuse_get_conn(file);
1351         if (!fc)
1352                 return -EPERM;
1353
1354         if (!iter_is_iovec(to))
1355                 return -EINVAL;
1356
1357         fuse_copy_init(&cs, 1, to);
1358
1359         return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
1360 }
1361
1362 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1363                                     struct pipe_inode_info *pipe,
1364                                     size_t len, unsigned int flags)
1365 {
1366         int ret;
1367         int page_nr = 0;
1368         int do_wakeup = 0;
1369         struct pipe_buffer *bufs;
1370         struct fuse_copy_state cs;
1371         struct fuse_conn *fc = fuse_get_conn(in);
1372         if (!fc)
1373                 return -EPERM;
1374
1375         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1376         if (!bufs)
1377                 return -ENOMEM;
1378
1379         fuse_copy_init(&cs, 1, NULL);
1380         cs.pipebufs = bufs;
1381         cs.pipe = pipe;
1382         ret = fuse_dev_do_read(fc, in, &cs, len);
1383         if (ret < 0)
1384                 goto out;
1385
1386         ret = 0;
1387         pipe_lock(pipe);
1388
1389         if (!pipe->readers) {
1390                 send_sig(SIGPIPE, current, 0);
1391                 if (!ret)
1392                         ret = -EPIPE;
1393                 goto out_unlock;
1394         }
1395
1396         if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1397                 ret = -EIO;
1398                 goto out_unlock;
1399         }
1400
1401         while (page_nr < cs.nr_segs) {
1402                 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1403                 struct pipe_buffer *buf = pipe->bufs + newbuf;
1404
1405                 buf->page = bufs[page_nr].page;
1406                 buf->offset = bufs[page_nr].offset;
1407                 buf->len = bufs[page_nr].len;
1408                 /*
1409                  * Need to be careful about this.  Having buf->ops in module
1410                  * code can Oops if the buffer persists after module unload.
1411                  */
1412                 buf->ops = &nosteal_pipe_buf_ops;
1413
1414                 pipe->nrbufs++;
1415                 page_nr++;
1416                 ret += buf->len;
1417
1418                 if (pipe->files)
1419                         do_wakeup = 1;
1420         }
1421
1422 out_unlock:
1423         pipe_unlock(pipe);
1424
1425         if (do_wakeup) {
1426                 smp_mb();
1427                 if (waitqueue_active(&pipe->wait))
1428                         wake_up_interruptible(&pipe->wait);
1429                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1430         }
1431
1432 out:
1433         for (; page_nr < cs.nr_segs; page_nr++)
1434                 page_cache_release(bufs[page_nr].page);
1435
1436         kfree(bufs);
1437         return ret;
1438 }
1439
1440 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1441                             struct fuse_copy_state *cs)
1442 {
1443         struct fuse_notify_poll_wakeup_out outarg;
1444         int err = -EINVAL;
1445
1446         if (size != sizeof(outarg))
1447                 goto err;
1448
1449         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1450         if (err)
1451                 goto err;
1452
1453         fuse_copy_finish(cs);
1454         return fuse_notify_poll_wakeup(fc, &outarg);
1455
1456 err:
1457         fuse_copy_finish(cs);
1458         return err;
1459 }
1460
1461 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1462                                    struct fuse_copy_state *cs)
1463 {
1464         struct fuse_notify_inval_inode_out outarg;
1465         int err = -EINVAL;
1466
1467         if (size != sizeof(outarg))
1468                 goto err;
1469
1470         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1471         if (err)
1472                 goto err;
1473         fuse_copy_finish(cs);
1474
1475         down_read(&fc->killsb);
1476         err = -ENOENT;
1477         if (fc->sb) {
1478                 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1479                                                outarg.off, outarg.len);
1480         }
1481         up_read(&fc->killsb);
1482         return err;
1483
1484 err:
1485         fuse_copy_finish(cs);
1486         return err;
1487 }
1488
1489 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1490                                    struct fuse_copy_state *cs)
1491 {
1492         struct fuse_notify_inval_entry_out outarg;
1493         int err = -ENOMEM;
1494         char *buf;
1495         struct qstr name;
1496
1497         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1498         if (!buf)
1499                 goto err;
1500
1501         err = -EINVAL;
1502         if (size < sizeof(outarg))
1503                 goto err;
1504
1505         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1506         if (err)
1507                 goto err;
1508
1509         err = -ENAMETOOLONG;
1510         if (outarg.namelen > FUSE_NAME_MAX)
1511                 goto err;
1512
1513         err = -EINVAL;
1514         if (size != sizeof(outarg) + outarg.namelen + 1)
1515                 goto err;
1516
1517         name.name = buf;
1518         name.len = outarg.namelen;
1519         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1520         if (err)
1521                 goto err;
1522         fuse_copy_finish(cs);
1523         buf[outarg.namelen] = 0;
1524         name.hash = full_name_hash(name.name, name.len);
1525
1526         down_read(&fc->killsb);
1527         err = -ENOENT;
1528         if (fc->sb)
1529                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1530         up_read(&fc->killsb);
1531         kfree(buf);
1532         return err;
1533
1534 err:
1535         kfree(buf);
1536         fuse_copy_finish(cs);
1537         return err;
1538 }
1539
1540 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1541                               struct fuse_copy_state *cs)
1542 {
1543         struct fuse_notify_delete_out outarg;
1544         int err = -ENOMEM;
1545         char *buf;
1546         struct qstr name;
1547
1548         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1549         if (!buf)
1550                 goto err;
1551
1552         err = -EINVAL;
1553         if (size < sizeof(outarg))
1554                 goto err;
1555
1556         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1557         if (err)
1558                 goto err;
1559
1560         err = -ENAMETOOLONG;
1561         if (outarg.namelen > FUSE_NAME_MAX)
1562                 goto err;
1563
1564         err = -EINVAL;
1565         if (size != sizeof(outarg) + outarg.namelen + 1)
1566                 goto err;
1567
1568         name.name = buf;
1569         name.len = outarg.namelen;
1570         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1571         if (err)
1572                 goto err;
1573         fuse_copy_finish(cs);
1574         buf[outarg.namelen] = 0;
1575         name.hash = full_name_hash(name.name, name.len);
1576
1577         down_read(&fc->killsb);
1578         err = -ENOENT;
1579         if (fc->sb)
1580                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1581                                                outarg.child, &name);
1582         up_read(&fc->killsb);
1583         kfree(buf);
1584         return err;
1585
1586 err:
1587         kfree(buf);
1588         fuse_copy_finish(cs);
1589         return err;
1590 }
1591
1592 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1593                              struct fuse_copy_state *cs)
1594 {
1595         struct fuse_notify_store_out outarg;
1596         struct inode *inode;
1597         struct address_space *mapping;
1598         u64 nodeid;
1599         int err;
1600         pgoff_t index;
1601         unsigned int offset;
1602         unsigned int num;
1603         loff_t file_size;
1604         loff_t end;
1605
1606         err = -EINVAL;
1607         if (size < sizeof(outarg))
1608                 goto out_finish;
1609
1610         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1611         if (err)
1612                 goto out_finish;
1613
1614         err = -EINVAL;
1615         if (size - sizeof(outarg) != outarg.size)
1616                 goto out_finish;
1617
1618         nodeid = outarg.nodeid;
1619
1620         down_read(&fc->killsb);
1621
1622         err = -ENOENT;
1623         if (!fc->sb)
1624                 goto out_up_killsb;
1625
1626         inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1627         if (!inode)
1628                 goto out_up_killsb;
1629
1630         mapping = inode->i_mapping;
1631         index = outarg.offset >> PAGE_CACHE_SHIFT;
1632         offset = outarg.offset & ~PAGE_CACHE_MASK;
1633         file_size = i_size_read(inode);
1634         end = outarg.offset + outarg.size;
1635         if (end > file_size) {
1636                 file_size = end;
1637                 fuse_write_update_size(inode, file_size);
1638         }
1639
1640         num = outarg.size;
1641         while (num) {
1642                 struct page *page;
1643                 unsigned int this_num;
1644
1645                 err = -ENOMEM;
1646                 page = find_or_create_page(mapping, index,
1647                                            mapping_gfp_mask(mapping));
1648                 if (!page)
1649                         goto out_iput;
1650
1651                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1652                 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1653                 if (!err && offset == 0 &&
1654                     (this_num == PAGE_CACHE_SIZE || file_size == end))
1655                         SetPageUptodate(page);
1656                 unlock_page(page);
1657                 page_cache_release(page);
1658
1659                 if (err)
1660                         goto out_iput;
1661
1662                 num -= this_num;
1663                 offset = 0;
1664                 index++;
1665         }
1666
1667         err = 0;
1668
1669 out_iput:
1670         iput(inode);
1671 out_up_killsb:
1672         up_read(&fc->killsb);
1673 out_finish:
1674         fuse_copy_finish(cs);
1675         return err;
1676 }
1677
1678 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1679 {
1680         release_pages(req->pages, req->num_pages, false);
1681 }
1682
1683 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1684                          struct fuse_notify_retrieve_out *outarg)
1685 {
1686         int err;
1687         struct address_space *mapping = inode->i_mapping;
1688         struct fuse_req *req;
1689         pgoff_t index;
1690         loff_t file_size;
1691         unsigned int num;
1692         unsigned int offset;
1693         size_t total_len = 0;
1694         int num_pages;
1695
1696         offset = outarg->offset & ~PAGE_CACHE_MASK;
1697         file_size = i_size_read(inode);
1698
1699         num = outarg->size;
1700         if (outarg->offset > file_size)
1701                 num = 0;
1702         else if (outarg->offset + num > file_size)
1703                 num = file_size - outarg->offset;
1704
1705         num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1706         num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1707
1708         req = fuse_get_req(fc, num_pages);
1709         if (IS_ERR(req))
1710                 return PTR_ERR(req);
1711
1712         req->in.h.opcode = FUSE_NOTIFY_REPLY;
1713         req->in.h.nodeid = outarg->nodeid;
1714         req->in.numargs = 2;
1715         req->in.argpages = 1;
1716         req->page_descs[0].offset = offset;
1717         req->end = fuse_retrieve_end;
1718
1719         index = outarg->offset >> PAGE_CACHE_SHIFT;
1720
1721         while (num && req->num_pages < num_pages) {
1722                 struct page *page;
1723                 unsigned int this_num;
1724
1725                 page = find_get_page(mapping, index);
1726                 if (!page)
1727                         break;
1728
1729                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1730                 req->pages[req->num_pages] = page;
1731                 req->page_descs[req->num_pages].length = this_num;
1732                 req->num_pages++;
1733
1734                 offset = 0;
1735                 num -= this_num;
1736                 total_len += this_num;
1737                 index++;
1738         }
1739         req->misc.retrieve_in.offset = outarg->offset;
1740         req->misc.retrieve_in.size = total_len;
1741         req->in.args[0].size = sizeof(req->misc.retrieve_in);
1742         req->in.args[0].value = &req->misc.retrieve_in;
1743         req->in.args[1].size = total_len;
1744
1745         err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1746         if (err)
1747                 fuse_retrieve_end(fc, req);
1748
1749         return err;
1750 }
1751
1752 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1753                                 struct fuse_copy_state *cs)
1754 {
1755         struct fuse_notify_retrieve_out outarg;
1756         struct inode *inode;
1757         int err;
1758
1759         err = -EINVAL;
1760         if (size != sizeof(outarg))
1761                 goto copy_finish;
1762
1763         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1764         if (err)
1765                 goto copy_finish;
1766
1767         fuse_copy_finish(cs);
1768
1769         down_read(&fc->killsb);
1770         err = -ENOENT;
1771         if (fc->sb) {
1772                 u64 nodeid = outarg.nodeid;
1773
1774                 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1775                 if (inode) {
1776                         err = fuse_retrieve(fc, inode, &outarg);
1777                         iput(inode);
1778                 }
1779         }
1780         up_read(&fc->killsb);
1781
1782         return err;
1783
1784 copy_finish:
1785         fuse_copy_finish(cs);
1786         return err;
1787 }
1788
1789 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1790                        unsigned int size, struct fuse_copy_state *cs)
1791 {
1792         /* Don't try to move pages (yet) */
1793         cs->move_pages = 0;
1794
1795         switch (code) {
1796         case FUSE_NOTIFY_POLL:
1797                 return fuse_notify_poll(fc, size, cs);
1798
1799         case FUSE_NOTIFY_INVAL_INODE:
1800                 return fuse_notify_inval_inode(fc, size, cs);
1801
1802         case FUSE_NOTIFY_INVAL_ENTRY:
1803                 return fuse_notify_inval_entry(fc, size, cs);
1804
1805         case FUSE_NOTIFY_STORE:
1806                 return fuse_notify_store(fc, size, cs);
1807
1808         case FUSE_NOTIFY_RETRIEVE:
1809                 return fuse_notify_retrieve(fc, size, cs);
1810
1811         case FUSE_NOTIFY_DELETE:
1812                 return fuse_notify_delete(fc, size, cs);
1813
1814         default:
1815                 fuse_copy_finish(cs);
1816                 return -EINVAL;
1817         }
1818 }
1819
1820 /* Look up request on processing list by unique ID */
1821 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1822 {
1823         struct fuse_req *req;
1824
1825         list_for_each_entry(req, &fc->processing, list) {
1826                 if (req->in.h.unique == unique || req->intr_unique == unique)
1827                         return req;
1828         }
1829         return NULL;
1830 }
1831
1832 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1833                          unsigned nbytes)
1834 {
1835         unsigned reqsize = sizeof(struct fuse_out_header);
1836
1837         if (out->h.error)
1838                 return nbytes != reqsize ? -EINVAL : 0;
1839
1840         reqsize += len_args(out->numargs, out->args);
1841
1842         if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1843                 return -EINVAL;
1844         else if (reqsize > nbytes) {
1845                 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1846                 unsigned diffsize = reqsize - nbytes;
1847                 if (diffsize > lastarg->size)
1848                         return -EINVAL;
1849                 lastarg->size -= diffsize;
1850         }
1851         return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1852                               out->page_zeroing);
1853 }
1854
1855 /*
1856  * Write a single reply to a request.  First the header is copied from
1857  * the write buffer.  The request is then searched on the processing
1858  * list by the unique ID found in the header.  If found, then remove
1859  * it from the list and copy the rest of the buffer to the request.
1860  * The request is finished by calling request_end()
1861  */
1862 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1863                                  struct fuse_copy_state *cs, size_t nbytes)
1864 {
1865         int err;
1866         struct fuse_req *req;
1867         struct fuse_out_header oh;
1868
1869         if (nbytes < sizeof(struct fuse_out_header))
1870                 return -EINVAL;
1871
1872         err = fuse_copy_one(cs, &oh, sizeof(oh));
1873         if (err)
1874                 goto err_finish;
1875
1876         err = -EINVAL;
1877         if (oh.len != nbytes)
1878                 goto err_finish;
1879
1880         /*
1881          * Zero oh.unique indicates unsolicited notification message
1882          * and error contains notification code.
1883          */
1884         if (!oh.unique) {
1885                 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1886                 return err ? err : nbytes;
1887         }
1888
1889         err = -EINVAL;
1890         if (oh.error <= -1000 || oh.error > 0)
1891                 goto err_finish;
1892
1893         spin_lock(&fc->lock);
1894         err = -ENOENT;
1895         if (!fc->connected)
1896                 goto err_unlock;
1897
1898         req = request_find(fc, oh.unique);
1899         if (!req)
1900                 goto err_unlock;
1901
1902         /* Is it an interrupt reply? */
1903         if (req->intr_unique == oh.unique) {
1904                 err = -EINVAL;
1905                 if (nbytes != sizeof(struct fuse_out_header))
1906                         goto err_unlock;
1907
1908                 if (oh.error == -ENOSYS)
1909                         fc->no_interrupt = 1;
1910                 else if (oh.error == -EAGAIN)
1911                         queue_interrupt(fc, req);
1912
1913                 spin_unlock(&fc->lock);
1914                 fuse_copy_finish(cs);
1915                 return nbytes;
1916         }
1917
1918         req->state = FUSE_REQ_WRITING;
1919         list_move(&req->list, &fc->io);
1920         req->out.h = oh;
1921         set_bit(FR_LOCKED, &req->flags);
1922         cs->req = req;
1923         if (!req->out.page_replace)
1924                 cs->move_pages = 0;
1925         spin_unlock(&fc->lock);
1926
1927         err = copy_out_args(cs, &req->out, nbytes);
1928         fuse_copy_finish(cs);
1929
1930         spin_lock(&fc->lock);
1931         clear_bit(FR_LOCKED, &req->flags);
1932         if (!fc->connected)
1933                 err = -ENOENT;
1934         else if (err)
1935                 req->out.h.error = -EIO;
1936         request_end(fc, req);
1937
1938         return err ? err : nbytes;
1939
1940  err_unlock:
1941         spin_unlock(&fc->lock);
1942  err_finish:
1943         fuse_copy_finish(cs);
1944         return err;
1945 }
1946
1947 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1948 {
1949         struct fuse_copy_state cs;
1950         struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1951         if (!fc)
1952                 return -EPERM;
1953
1954         if (!iter_is_iovec(from))
1955                 return -EINVAL;
1956
1957         fuse_copy_init(&cs, 0, from);
1958
1959         return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
1960 }
1961
1962 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1963                                      struct file *out, loff_t *ppos,
1964                                      size_t len, unsigned int flags)
1965 {
1966         unsigned nbuf;
1967         unsigned idx;
1968         struct pipe_buffer *bufs;
1969         struct fuse_copy_state cs;
1970         struct fuse_conn *fc;
1971         size_t rem;
1972         ssize_t ret;
1973
1974         fc = fuse_get_conn(out);
1975         if (!fc)
1976                 return -EPERM;
1977
1978         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1979         if (!bufs)
1980                 return -ENOMEM;
1981
1982         pipe_lock(pipe);
1983         nbuf = 0;
1984         rem = 0;
1985         for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1986                 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1987
1988         ret = -EINVAL;
1989         if (rem < len) {
1990                 pipe_unlock(pipe);
1991                 goto out;
1992         }
1993
1994         rem = len;
1995         while (rem) {
1996                 struct pipe_buffer *ibuf;
1997                 struct pipe_buffer *obuf;
1998
1999                 BUG_ON(nbuf >= pipe->buffers);
2000                 BUG_ON(!pipe->nrbufs);
2001                 ibuf = &pipe->bufs[pipe->curbuf];
2002                 obuf = &bufs[nbuf];
2003
2004                 if (rem >= ibuf->len) {
2005                         *obuf = *ibuf;
2006                         ibuf->ops = NULL;
2007                         pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2008                         pipe->nrbufs--;
2009                 } else {
2010                         ibuf->ops->get(pipe, ibuf);
2011                         *obuf = *ibuf;
2012                         obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2013                         obuf->len = rem;
2014                         ibuf->offset += obuf->len;
2015                         ibuf->len -= obuf->len;
2016                 }
2017                 nbuf++;
2018                 rem -= obuf->len;
2019         }
2020         pipe_unlock(pipe);
2021
2022         fuse_copy_init(&cs, 0, NULL);
2023         cs.pipebufs = bufs;
2024         cs.nr_segs = nbuf;
2025         cs.pipe = pipe;
2026
2027         if (flags & SPLICE_F_MOVE)
2028                 cs.move_pages = 1;
2029
2030         ret = fuse_dev_do_write(fc, &cs, len);
2031
2032         for (idx = 0; idx < nbuf; idx++) {
2033                 struct pipe_buffer *buf = &bufs[idx];
2034                 buf->ops->release(pipe, buf);
2035         }
2036 out:
2037         kfree(bufs);
2038         return ret;
2039 }
2040
2041 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2042 {
2043         unsigned mask = POLLOUT | POLLWRNORM;
2044         struct fuse_conn *fc = fuse_get_conn(file);
2045         if (!fc)
2046                 return POLLERR;
2047
2048         poll_wait(file, &fc->waitq, wait);
2049
2050         spin_lock(&fc->lock);
2051         if (!fc->connected)
2052                 mask = POLLERR;
2053         else if (request_pending(fc))
2054                 mask |= POLLIN | POLLRDNORM;
2055         spin_unlock(&fc->lock);
2056
2057         return mask;
2058 }
2059
2060 /*
2061  * Abort all requests on the given list (pending or processing)
2062  *
2063  * This function releases and reacquires fc->lock
2064  */
2065 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2066 __releases(fc->lock)
2067 __acquires(fc->lock)
2068 {
2069         while (!list_empty(head)) {
2070                 struct fuse_req *req;
2071                 req = list_entry(head->next, struct fuse_req, list);
2072                 req->out.h.error = -ECONNABORTED;
2073                 request_end(fc, req);
2074                 spin_lock(&fc->lock);
2075         }
2076 }
2077
2078 /*
2079  * Abort requests under I/O
2080  *
2081  * Separate out unlocked requests, they should be finished off immediately.
2082  * Locked requests will be finished after unlock; see unlock_request().
2083  *
2084  * Next finish off the unlocked requests.  It is possible that some request will
2085  * finish before we can.  This is OK, the request will in that case be removed
2086  * from the list before we touch it.
2087  */
2088 static void end_io_requests(struct fuse_conn *fc)
2089 __releases(fc->lock)
2090 __acquires(fc->lock)
2091 {
2092         struct fuse_req *req, *next;
2093         LIST_HEAD(to_end);
2094
2095         list_for_each_entry_safe(req, next, &fc->io, list) {
2096                 req->out.h.error = -ECONNABORTED;
2097                 spin_lock(&req->waitq.lock);
2098                 set_bit(FR_ABORTED, &req->flags);
2099                 if (!test_bit(FR_LOCKED, &req->flags))
2100                         list_move(&req->list, &to_end);
2101                 spin_unlock(&req->waitq.lock);
2102         }
2103         while (!list_empty(&to_end)) {
2104                 req = list_first_entry(&to_end, struct fuse_req, list);
2105                 __fuse_get_request(req);
2106                 request_end(fc, req);
2107                 spin_lock(&fc->lock);
2108         }
2109 }
2110
2111 static void end_queued_requests(struct fuse_conn *fc)
2112 __releases(fc->lock)
2113 __acquires(fc->lock)
2114 {
2115         fc->max_background = UINT_MAX;
2116         flush_bg_queue(fc);
2117         end_requests(fc, &fc->pending);
2118         end_requests(fc, &fc->processing);
2119         while (forget_pending(fc))
2120                 kfree(dequeue_forget(fc, 1, NULL));
2121 }
2122
2123 static void end_polls(struct fuse_conn *fc)
2124 {
2125         struct rb_node *p;
2126
2127         p = rb_first(&fc->polled_files);
2128
2129         while (p) {
2130                 struct fuse_file *ff;
2131                 ff = rb_entry(p, struct fuse_file, polled_node);
2132                 wake_up_interruptible_all(&ff->poll_wait);
2133
2134                 p = rb_next(p);
2135         }
2136 }
2137
2138 /*
2139  * Abort all requests.
2140  *
2141  * Emergency exit in case of a malicious or accidental deadlock, or
2142  * just a hung filesystem.
2143  *
2144  * The same effect is usually achievable through killing the
2145  * filesystem daemon and all users of the filesystem.  The exception
2146  * is the combination of an asynchronous request and the tricky
2147  * deadlock (see Documentation/filesystems/fuse.txt).
2148  *
2149  * Request progression from one list to the next is prevented by
2150  * fc->connected being false.
2151  */
2152 void fuse_abort_conn(struct fuse_conn *fc)
2153 {
2154         spin_lock(&fc->lock);
2155         if (fc->connected) {
2156                 fc->connected = 0;
2157                 fc->blocked = 0;
2158                 fuse_set_initialized(fc);
2159                 end_io_requests(fc);
2160                 end_queued_requests(fc);
2161                 end_polls(fc);
2162                 wake_up_all(&fc->waitq);
2163                 wake_up_all(&fc->blocked_waitq);
2164                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
2165         }
2166         spin_unlock(&fc->lock);
2167 }
2168 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2169
2170 int fuse_dev_release(struct inode *inode, struct file *file)
2171 {
2172         struct fuse_conn *fc = fuse_get_conn(file);
2173         if (fc) {
2174                 WARN_ON(!list_empty(&fc->io));
2175                 WARN_ON(fc->fasync != NULL);
2176                 fuse_abort_conn(fc);
2177                 fuse_conn_put(fc);
2178         }
2179
2180         return 0;
2181 }
2182 EXPORT_SYMBOL_GPL(fuse_dev_release);
2183
2184 static int fuse_dev_fasync(int fd, struct file *file, int on)
2185 {
2186         struct fuse_conn *fc = fuse_get_conn(file);
2187         if (!fc)
2188                 return -EPERM;
2189
2190         /* No locking - fasync_helper does its own locking */
2191         return fasync_helper(fd, file, on, &fc->fasync);
2192 }
2193
2194 const struct file_operations fuse_dev_operations = {
2195         .owner          = THIS_MODULE,
2196         .open           = fuse_dev_open,
2197         .llseek         = no_llseek,
2198         .read_iter      = fuse_dev_read,
2199         .splice_read    = fuse_dev_splice_read,
2200         .write_iter     = fuse_dev_write,
2201         .splice_write   = fuse_dev_splice_write,
2202         .poll           = fuse_dev_poll,
2203         .release        = fuse_dev_release,
2204         .fasync         = fuse_dev_fasync,
2205 };
2206 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2207
2208 static struct miscdevice fuse_miscdevice = {
2209         .minor = FUSE_MINOR,
2210         .name  = "fuse",
2211         .fops = &fuse_dev_operations,
2212 };
2213
2214 int __init fuse_dev_init(void)
2215 {
2216         int err = -ENOMEM;
2217         fuse_req_cachep = kmem_cache_create("fuse_request",
2218                                             sizeof(struct fuse_req),
2219                                             0, 0, NULL);
2220         if (!fuse_req_cachep)
2221                 goto out;
2222
2223         err = misc_register(&fuse_miscdevice);
2224         if (err)
2225                 goto out_cache_clean;
2226
2227         return 0;
2228
2229  out_cache_clean:
2230         kmem_cache_destroy(fuse_req_cachep);
2231  out:
2232         return err;
2233 }
2234
2235 void fuse_dev_cleanup(void)
2236 {
2237         misc_deregister(&fuse_miscdevice);
2238         kmem_cache_destroy(fuse_req_cachep);
2239 }
This page took 0.163358 seconds and 4 git commands to generate.