]> Git Repo - linux.git/blame - fs/fuse/dev.c
fuse: req use bitops
[linux.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <[email protected]>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
dd3bb14f 19#include <linux/pipe_fs_i.h>
ce534fb0
MS
20#include <linux/swap.h>
21#include <linux/splice.h>
334f485d
MS
22
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 24MODULE_ALIAS("devname:fuse");
334f485d 25
e18b890b 26static struct kmem_cache *fuse_req_cachep;
334f485d 27
8bfc016d 28static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 29{
0720b315
MS
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
34 return file->private_data;
334f485d
MS
35}
36
4250c066 37static void fuse_request_init(struct fuse_req *req, struct page **pages,
b2430d75 38 struct fuse_page_desc *page_descs,
4250c066 39 unsigned npages)
334f485d
MS
40{
41 memset(req, 0, sizeof(*req));
4250c066 42 memset(pages, 0, sizeof(*pages) * npages);
b2430d75 43 memset(page_descs, 0, sizeof(*page_descs) * npages);
334f485d 44 INIT_LIST_HEAD(&req->list);
a4d27e75 45 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
46 init_waitqueue_head(&req->waitq);
47 atomic_set(&req->count, 1);
4250c066 48 req->pages = pages;
b2430d75 49 req->page_descs = page_descs;
4250c066 50 req->max_pages = npages;
334f485d
MS
51}
52
4250c066 53static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
334f485d 54{
4250c066
MP
55 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
56 if (req) {
57 struct page **pages;
b2430d75 58 struct fuse_page_desc *page_descs;
4250c066 59
b2430d75 60 if (npages <= FUSE_REQ_INLINE_PAGES) {
4250c066 61 pages = req->inline_pages;
b2430d75
MP
62 page_descs = req->inline_page_descs;
63 } else {
4250c066 64 pages = kmalloc(sizeof(struct page *) * npages, flags);
b2430d75
MP
65 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
66 npages, flags);
67 }
4250c066 68
b2430d75
MP
69 if (!pages || !page_descs) {
70 kfree(pages);
71 kfree(page_descs);
4250c066
MP
72 kmem_cache_free(fuse_req_cachep, req);
73 return NULL;
74 }
75
b2430d75 76 fuse_request_init(req, pages, page_descs, npages);
4250c066 77 }
334f485d
MS
78 return req;
79}
4250c066
MP
80
81struct fuse_req *fuse_request_alloc(unsigned npages)
82{
83 return __fuse_request_alloc(npages, GFP_KERNEL);
84}
08cbf542 85EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 86
4250c066 87struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
3be5a52b 88{
4250c066 89 return __fuse_request_alloc(npages, GFP_NOFS);
3be5a52b
MS
90}
91
334f485d
MS
92void fuse_request_free(struct fuse_req *req)
93{
b2430d75 94 if (req->pages != req->inline_pages) {
4250c066 95 kfree(req->pages);
b2430d75
MP
96 kfree(req->page_descs);
97 }
334f485d
MS
98 kmem_cache_free(fuse_req_cachep, req);
99}
100
8bfc016d 101static void block_sigs(sigset_t *oldset)
334f485d
MS
102{
103 sigset_t mask;
104
105 siginitsetinv(&mask, sigmask(SIGKILL));
106 sigprocmask(SIG_BLOCK, &mask, oldset);
107}
108
8bfc016d 109static void restore_sigs(sigset_t *oldset)
334f485d
MS
110{
111 sigprocmask(SIG_SETMASK, oldset, NULL);
112}
113
36cf66ed 114void __fuse_get_request(struct fuse_req *req)
334f485d
MS
115{
116 atomic_inc(&req->count);
117}
118
119/* Must be called with > 1 refcount */
120static void __fuse_put_request(struct fuse_req *req)
121{
122 BUG_ON(atomic_read(&req->count) < 2);
123 atomic_dec(&req->count);
124}
125
33649c91
MS
126static void fuse_req_init_context(struct fuse_req *req)
127{
499dcf20
EB
128 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
129 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
33649c91
MS
130 req->in.h.pid = current->pid;
131}
132
9759bd51
MS
133void fuse_set_initialized(struct fuse_conn *fc)
134{
135 /* Make sure stores before this are seen on another CPU */
136 smp_wmb();
137 fc->initialized = 1;
138}
139
0aada884
MP
140static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
141{
142 return !fc->initialized || (for_background && fc->blocked);
143}
144
8b41e671
MP
145static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
146 bool for_background)
334f485d 147{
08a53cdc 148 struct fuse_req *req;
08a53cdc 149 int err;
9bc5ddda 150 atomic_inc(&fc->num_waiting);
0aada884
MP
151
152 if (fuse_block_alloc(fc, for_background)) {
153 sigset_t oldset;
154 int intr;
155
156 block_sigs(&oldset);
722d2bea 157 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
0aada884
MP
158 !fuse_block_alloc(fc, for_background));
159 restore_sigs(&oldset);
160 err = -EINTR;
161 if (intr)
162 goto out;
163 }
9759bd51
MS
164 /* Matches smp_wmb() in fuse_set_initialized() */
165 smp_rmb();
08a53cdc 166
51eb01e7
MS
167 err = -ENOTCONN;
168 if (!fc->connected)
169 goto out;
170
de155226
MS
171 err = -ECONNREFUSED;
172 if (fc->conn_error)
173 goto out;
174
b111c8c0 175 req = fuse_request_alloc(npages);
9bc5ddda 176 err = -ENOMEM;
722d2bea
MP
177 if (!req) {
178 if (for_background)
179 wake_up(&fc->blocked_waitq);
9bc5ddda 180 goto out;
722d2bea 181 }
334f485d 182
33649c91 183 fuse_req_init_context(req);
825d6d33
MS
184 __set_bit(FR_WAITING, &req->flags);
185 if (for_background)
186 __set_bit(FR_BACKGROUND, &req->flags);
187
334f485d 188 return req;
9bc5ddda
MS
189
190 out:
191 atomic_dec(&fc->num_waiting);
192 return ERR_PTR(err);
334f485d 193}
8b41e671
MP
194
195struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
196{
197 return __fuse_get_req(fc, npages, false);
198}
08cbf542 199EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 200
8b41e671
MP
201struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
202 unsigned npages)
203{
204 return __fuse_get_req(fc, npages, true);
205}
206EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
207
33649c91
MS
208/*
209 * Return request in fuse_file->reserved_req. However that may
210 * currently be in use. If that is the case, wait for it to become
211 * available.
212 */
213static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
214 struct file *file)
215{
216 struct fuse_req *req = NULL;
217 struct fuse_file *ff = file->private_data;
218
219 do {
de5e3dec 220 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
221 spin_lock(&fc->lock);
222 if (ff->reserved_req) {
223 req = ff->reserved_req;
224 ff->reserved_req = NULL;
cb0942b8 225 req->stolen_file = get_file(file);
33649c91
MS
226 }
227 spin_unlock(&fc->lock);
228 } while (!req);
229
230 return req;
231}
232
233/*
234 * Put stolen request back into fuse_file->reserved_req
235 */
236static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
237{
238 struct file *file = req->stolen_file;
239 struct fuse_file *ff = file->private_data;
240
241 spin_lock(&fc->lock);
b2430d75 242 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
33649c91
MS
243 BUG_ON(ff->reserved_req);
244 ff->reserved_req = req;
de5e3dec 245 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
246 spin_unlock(&fc->lock);
247 fput(file);
248}
249
250/*
251 * Gets a requests for a file operation, always succeeds
252 *
253 * This is used for sending the FLUSH request, which must get to
254 * userspace, due to POSIX locks which may need to be unlocked.
255 *
256 * If allocation fails due to OOM, use the reserved request in
257 * fuse_file.
258 *
259 * This is very unlikely to deadlock accidentally, since the
260 * filesystem should not have it's own file open. If deadlock is
261 * intentional, it can still be broken by "aborting" the filesystem.
262 */
b111c8c0
MP
263struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
264 struct file *file)
33649c91
MS
265{
266 struct fuse_req *req;
267
268 atomic_inc(&fc->num_waiting);
0aada884 269 wait_event(fc->blocked_waitq, fc->initialized);
9759bd51
MS
270 /* Matches smp_wmb() in fuse_set_initialized() */
271 smp_rmb();
b111c8c0 272 req = fuse_request_alloc(0);
33649c91
MS
273 if (!req)
274 req = get_reserved_req(fc, file);
275
276 fuse_req_init_context(req);
825d6d33
MS
277 __set_bit(FR_WAITING, &req->flags);
278 __clear_bit(FR_BACKGROUND, &req->flags);
33649c91
MS
279 return req;
280}
281
334f485d 282void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
283{
284 if (atomic_dec_and_test(&req->count)) {
825d6d33 285 if (test_bit(FR_BACKGROUND, &req->flags)) {
722d2bea
MP
286 /*
287 * We get here in the unlikely case that a background
288 * request was allocated but not sent
289 */
290 spin_lock(&fc->lock);
291 if (!fc->blocked)
292 wake_up(&fc->blocked_waitq);
293 spin_unlock(&fc->lock);
294 }
295
825d6d33
MS
296 if (test_bit(FR_WAITING, &req->flags)) {
297 __clear_bit(FR_WAITING, &req->flags);
9bc5ddda 298 atomic_dec(&fc->num_waiting);
73e0e738 299 }
33649c91
MS
300
301 if (req->stolen_file)
302 put_reserved_req(fc, req);
303 else
304 fuse_request_free(req);
7128ec2a
MS
305 }
306}
08cbf542 307EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 308
d12def1b
MS
309static unsigned len_args(unsigned numargs, struct fuse_arg *args)
310{
311 unsigned nbytes = 0;
312 unsigned i;
313
314 for (i = 0; i < numargs; i++)
315 nbytes += args[i].size;
316
317 return nbytes;
318}
319
320static u64 fuse_get_unique(struct fuse_conn *fc)
321{
322 fc->reqctr++;
323 /* zero is special */
324 if (fc->reqctr == 0)
325 fc->reqctr = 1;
326
327 return fc->reqctr;
328}
329
330static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
331{
d12def1b
MS
332 req->in.h.len = sizeof(struct fuse_in_header) +
333 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
334 list_add_tail(&req->list, &fc->pending);
335 req->state = FUSE_REQ_PENDING;
d12def1b
MS
336 wake_up(&fc->waitq);
337 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
338}
339
07e77dca
MS
340void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
341 u64 nodeid, u64 nlookup)
342{
02c048b9
MS
343 forget->forget_one.nodeid = nodeid;
344 forget->forget_one.nlookup = nlookup;
07e77dca
MS
345
346 spin_lock(&fc->lock);
5dfcc87f
MS
347 if (fc->connected) {
348 fc->forget_list_tail->next = forget;
349 fc->forget_list_tail = forget;
350 wake_up(&fc->waitq);
351 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
352 } else {
353 kfree(forget);
354 }
07e77dca
MS
355 spin_unlock(&fc->lock);
356}
357
d12def1b
MS
358static void flush_bg_queue(struct fuse_conn *fc)
359{
7a6d3c8b 360 while (fc->active_background < fc->max_background &&
d12def1b
MS
361 !list_empty(&fc->bg_queue)) {
362 struct fuse_req *req;
363
364 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
365 list_del(&req->list);
366 fc->active_background++;
2d45ba38 367 req->in.h.unique = fuse_get_unique(fc);
d12def1b
MS
368 queue_request(fc, req);
369 }
370}
371
334f485d
MS
372/*
373 * This function is called when a request is finished. Either a reply
f9a2842e 374 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 375 * occurred during communication with userspace, or the device file
51eb01e7
MS
376 * was closed. The requester thread is woken up (if still waiting),
377 * the 'end' callback is called if given, else the reference to the
378 * request is released
7128ec2a 379 *
d7133114 380 * Called with fc->lock, unlocks it
334f485d
MS
381 */
382static void request_end(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2 383__releases(fc->lock)
334f485d 384{
51eb01e7
MS
385 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
386 req->end = NULL;
0d8e84b0
MS
387 list_del_init(&req->list);
388 list_del_init(&req->intr_entry);
83cfd493 389 req->state = FUSE_REQ_FINISHED;
825d6d33
MS
390 if (test_bit(FR_BACKGROUND, &req->flags)) {
391 clear_bit(FR_BACKGROUND, &req->flags);
722d2bea 392 if (fc->num_background == fc->max_background)
51eb01e7 393 fc->blocked = 0;
722d2bea
MP
394
395 /* Wake up next waiter, if any */
3c18ef81 396 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
722d2bea
MP
397 wake_up(&fc->blocked_waitq);
398
7a6d3c8b 399 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 400 fc->connected && fc->bdi_initialized) {
8aa7e847
JA
401 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
402 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
f92b99b9 403 }
51eb01e7 404 fc->num_background--;
d12def1b
MS
405 fc->active_background--;
406 flush_bg_queue(fc);
334f485d 407 }
51eb01e7 408 spin_unlock(&fc->lock);
51eb01e7
MS
409 wake_up(&req->waitq);
410 if (end)
411 end(fc, req);
e9bb09dd 412 fuse_put_request(fc, req);
334f485d
MS
413}
414
a4d27e75
MS
415static void wait_answer_interruptible(struct fuse_conn *fc,
416 struct fuse_req *req)
b9ca67b2
MS
417__releases(fc->lock)
418__acquires(fc->lock)
a4d27e75
MS
419{
420 if (signal_pending(current))
421 return;
422
423 spin_unlock(&fc->lock);
424 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
425 spin_lock(&fc->lock);
426}
427
428static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
429{
430 list_add_tail(&req->intr_entry, &fc->interrupts);
431 wake_up(&fc->waitq);
432 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
433}
434
7c352bdf 435static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2
MS
436__releases(fc->lock)
437__acquires(fc->lock)
334f485d 438{
a4d27e75
MS
439 if (!fc->no_interrupt) {
440 /* Any signal may interrupt this */
441 wait_answer_interruptible(fc, req);
334f485d 442
a4d27e75
MS
443 if (req->state == FUSE_REQ_FINISHED)
444 return;
445
825d6d33 446 set_bit(FR_INTERRUPTED, &req->flags);
a4d27e75
MS
447 if (req->state == FUSE_REQ_SENT)
448 queue_interrupt(fc, req);
449 }
450
825d6d33 451 if (!test_bit(FR_FORCE, &req->flags)) {
a4d27e75
MS
452 sigset_t oldset;
453
454 /* Only fatal signals may interrupt this */
51eb01e7 455 block_sigs(&oldset);
a4d27e75 456 wait_answer_interruptible(fc, req);
51eb01e7 457 restore_sigs(&oldset);
a131de0a 458
a131de0a
MS
459 if (req->state == FUSE_REQ_FINISHED)
460 return;
461
462 /* Request is not yet in userspace, bail out */
463 if (req->state == FUSE_REQ_PENDING) {
464 list_del(&req->list);
465 __fuse_put_request(req);
466 req->out.h.error = -EINTR;
467 return;
468 }
51eb01e7 469 }
334f485d 470
a131de0a
MS
471 /*
472 * Either request is already in userspace, or it was forced.
473 * Wait it out.
474 */
475 spin_unlock(&fc->lock);
476 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
477 spin_lock(&fc->lock);
334f485d
MS
478}
479
6a4e922c 480static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d 481{
825d6d33 482 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
d7133114 483 spin_lock(&fc->lock);
1e9a4ed9 484 if (!fc->connected)
334f485d 485 req->out.h.error = -ENOTCONN;
334f485d 486 else {
2d45ba38 487 req->in.h.unique = fuse_get_unique(fc);
334f485d
MS
488 queue_request(fc, req);
489 /* acquire extra reference, since request is still needed
490 after request_end() */
491 __fuse_get_request(req);
492
7c352bdf 493 request_wait_answer(fc, req);
334f485d 494 }
d7133114 495 spin_unlock(&fc->lock);
334f485d 496}
6a4e922c
EW
497
498void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
499{
825d6d33
MS
500 __set_bit(FR_ISREPLY, &req->flags);
501 if (!test_bit(FR_WAITING, &req->flags)) {
502 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
503 atomic_inc(&fc->num_waiting);
504 }
6a4e922c
EW
505 __fuse_request_send(fc, req);
506}
08cbf542 507EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 508
21f62174
MS
509static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
510{
511 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
512 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
513
514 if (fc->minor < 9) {
515 switch (args->in.h.opcode) {
516 case FUSE_LOOKUP:
517 case FUSE_CREATE:
518 case FUSE_MKNOD:
519 case FUSE_MKDIR:
520 case FUSE_SYMLINK:
521 case FUSE_LINK:
522 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
523 break;
524 case FUSE_GETATTR:
525 case FUSE_SETATTR:
526 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
527 break;
528 }
529 }
530 if (fc->minor < 12) {
531 switch (args->in.h.opcode) {
532 case FUSE_CREATE:
533 args->in.args[0].size = sizeof(struct fuse_open_in);
534 break;
535 case FUSE_MKNOD:
536 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
537 break;
538 }
539 }
540}
541
7078187a
MS
542ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
543{
544 struct fuse_req *req;
545 ssize_t ret;
546
547 req = fuse_get_req(fc, 0);
548 if (IS_ERR(req))
549 return PTR_ERR(req);
550
21f62174
MS
551 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
552 fuse_adjust_compat(fc, args);
553
7078187a
MS
554 req->in.h.opcode = args->in.h.opcode;
555 req->in.h.nodeid = args->in.h.nodeid;
556 req->in.numargs = args->in.numargs;
557 memcpy(req->in.args, args->in.args,
558 args->in.numargs * sizeof(struct fuse_in_arg));
559 req->out.argvar = args->out.argvar;
560 req->out.numargs = args->out.numargs;
561 memcpy(req->out.args, args->out.args,
562 args->out.numargs * sizeof(struct fuse_arg));
563 fuse_request_send(fc, req);
564 ret = req->out.h.error;
565 if (!ret && args->out.argvar) {
566 BUG_ON(args->out.numargs != 1);
567 ret = req->out.args[0].size;
568 }
569 fuse_put_request(fc, req);
570
571 return ret;
572}
573
f0139aa8
MS
574/*
575 * Called under fc->lock
576 *
577 * fc->connected must have been checked previously
578 */
579void fuse_request_send_background_locked(struct fuse_conn *fc,
580 struct fuse_req *req)
d12def1b 581{
825d6d33
MS
582 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
583 if (!test_bit(FR_WAITING, &req->flags)) {
584 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
585 atomic_inc(&fc->num_waiting);
586 }
825d6d33 587 __set_bit(FR_ISREPLY, &req->flags);
d12def1b 588 fc->num_background++;
7a6d3c8b 589 if (fc->num_background == fc->max_background)
d12def1b 590 fc->blocked = 1;
7a6d3c8b 591 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 592 fc->bdi_initialized) {
8aa7e847
JA
593 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
594 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
d12def1b
MS
595 }
596 list_add_tail(&req->list, &fc->bg_queue);
597 flush_bg_queue(fc);
598}
599
f0139aa8 600void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d 601{
42dc6211 602 BUG_ON(!req->end);
d7133114 603 spin_lock(&fc->lock);
1e9a4ed9 604 if (fc->connected) {
f0139aa8 605 fuse_request_send_background_locked(fc, req);
d7133114 606 spin_unlock(&fc->lock);
334f485d 607 } else {
42dc6211 608 spin_unlock(&fc->lock);
334f485d 609 req->out.h.error = -ENOTCONN;
42dc6211
MS
610 req->end(fc, req);
611 fuse_put_request(fc, req);
334f485d
MS
612 }
613}
08cbf542 614EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 615
2d45ba38
MS
616static int fuse_request_send_notify_reply(struct fuse_conn *fc,
617 struct fuse_req *req, u64 unique)
618{
619 int err = -ENODEV;
620
825d6d33 621 __clear_bit(FR_ISREPLY, &req->flags);
2d45ba38
MS
622 req->in.h.unique = unique;
623 spin_lock(&fc->lock);
624 if (fc->connected) {
625 queue_request(fc, req);
626 err = 0;
627 }
628 spin_unlock(&fc->lock);
629
630 return err;
631}
632
0b05b183
AA
633void fuse_force_forget(struct file *file, u64 nodeid)
634{
6131ffaa 635 struct inode *inode = file_inode(file);
0b05b183
AA
636 struct fuse_conn *fc = get_fuse_conn(inode);
637 struct fuse_req *req;
638 struct fuse_forget_in inarg;
639
640 memset(&inarg, 0, sizeof(inarg));
641 inarg.nlookup = 1;
b111c8c0 642 req = fuse_get_req_nofail_nopages(fc, file);
0b05b183
AA
643 req->in.h.opcode = FUSE_FORGET;
644 req->in.h.nodeid = nodeid;
645 req->in.numargs = 1;
646 req->in.args[0].size = sizeof(inarg);
647 req->in.args[0].value = &inarg;
825d6d33 648 __clear_bit(FR_ISREPLY, &req->flags);
6a4e922c
EW
649 __fuse_request_send(fc, req);
650 /* ignore errors */
651 fuse_put_request(fc, req);
0b05b183
AA
652}
653
334f485d
MS
654/*
655 * Lock the request. Up to the next unlock_request() there mustn't be
656 * anything that could cause a page-fault. If the request was already
f9a2842e 657 * aborted bail out.
334f485d 658 */
d7133114 659static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
660{
661 int err = 0;
662 if (req) {
d7133114 663 spin_lock(&fc->lock);
825d6d33 664 if (test_bit(FR_ABORTED, &req->flags))
334f485d
MS
665 err = -ENOENT;
666 else
825d6d33 667 set_bit(FR_LOCKED, &req->flags);
d7133114 668 spin_unlock(&fc->lock);
334f485d
MS
669 }
670 return err;
671}
672
673/*
0d8e84b0
MS
674 * Unlock request. If it was aborted while locked, caller is responsible
675 * for unlocking and ending the request.
334f485d 676 */
0d8e84b0 677static int unlock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d 678{
0d8e84b0 679 int err = 0;
334f485d 680 if (req) {
d7133114 681 spin_lock(&fc->lock);
825d6d33 682 if (test_bit(FR_ABORTED, &req->flags))
0d8e84b0
MS
683 err = -ENOENT;
684 else
825d6d33 685 clear_bit(FR_LOCKED, &req->flags);
d7133114 686 spin_unlock(&fc->lock);
334f485d 687 }
0d8e84b0 688 return err;
334f485d
MS
689}
690
691struct fuse_copy_state {
d7133114 692 struct fuse_conn *fc;
334f485d
MS
693 int write;
694 struct fuse_req *req;
6c09e94a 695 struct iov_iter *iter;
dd3bb14f
MS
696 struct pipe_buffer *pipebufs;
697 struct pipe_buffer *currbuf;
698 struct pipe_inode_info *pipe;
334f485d 699 unsigned long nr_segs;
334f485d 700 struct page *pg;
334f485d 701 unsigned len;
c55a01d3 702 unsigned offset;
ce534fb0 703 unsigned move_pages:1;
334f485d
MS
704};
705
6c09e94a
AV
706static void fuse_copy_init(struct fuse_copy_state *cs,
707 struct fuse_conn *fc,
c3021629 708 int write,
6c09e94a 709 struct iov_iter *iter)
334f485d
MS
710{
711 memset(cs, 0, sizeof(*cs));
d7133114 712 cs->fc = fc;
334f485d 713 cs->write = write;
6c09e94a 714 cs->iter = iter;
334f485d
MS
715}
716
717/* Unmap and put previous page of userspace buffer */
8bfc016d 718static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 719{
dd3bb14f
MS
720 if (cs->currbuf) {
721 struct pipe_buffer *buf = cs->currbuf;
722
c55a01d3 723 if (cs->write)
c3021629 724 buf->len = PAGE_SIZE - cs->len;
dd3bb14f 725 cs->currbuf = NULL;
c55a01d3 726 } else if (cs->pg) {
334f485d
MS
727 if (cs->write) {
728 flush_dcache_page(cs->pg);
729 set_page_dirty_lock(cs->pg);
730 }
731 put_page(cs->pg);
334f485d 732 }
c55a01d3 733 cs->pg = NULL;
334f485d
MS
734}
735
736/*
737 * Get another pagefull of userspace buffer, and map it to kernel
738 * address space, and lock request
739 */
740static int fuse_copy_fill(struct fuse_copy_state *cs)
741{
c55a01d3 742 struct page *page;
334f485d
MS
743 int err;
744
0d8e84b0
MS
745 err = unlock_request(cs->fc, cs->req);
746 if (err)
747 return err;
748
334f485d 749 fuse_copy_finish(cs);
dd3bb14f
MS
750 if (cs->pipebufs) {
751 struct pipe_buffer *buf = cs->pipebufs;
752
c3021629
MS
753 if (!cs->write) {
754 err = buf->ops->confirm(cs->pipe, buf);
755 if (err)
756 return err;
757
758 BUG_ON(!cs->nr_segs);
759 cs->currbuf = buf;
c55a01d3
MS
760 cs->pg = buf->page;
761 cs->offset = buf->offset;
c3021629 762 cs->len = buf->len;
c3021629
MS
763 cs->pipebufs++;
764 cs->nr_segs--;
765 } else {
c3021629
MS
766 if (cs->nr_segs == cs->pipe->buffers)
767 return -EIO;
768
769 page = alloc_page(GFP_HIGHUSER);
770 if (!page)
771 return -ENOMEM;
772
773 buf->page = page;
774 buf->offset = 0;
775 buf->len = 0;
776
777 cs->currbuf = buf;
c55a01d3
MS
778 cs->pg = page;
779 cs->offset = 0;
c3021629
MS
780 cs->len = PAGE_SIZE;
781 cs->pipebufs++;
782 cs->nr_segs++;
783 }
dd3bb14f 784 } else {
6c09e94a
AV
785 size_t off;
786 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
dd3bb14f
MS
787 if (err < 0)
788 return err;
6c09e94a
AV
789 BUG_ON(!err);
790 cs->len = err;
791 cs->offset = off;
c55a01d3 792 cs->pg = page;
6c09e94a
AV
793 cs->offset = off;
794 iov_iter_advance(cs->iter, err);
334f485d 795 }
334f485d 796
d7133114 797 return lock_request(cs->fc, cs->req);
334f485d
MS
798}
799
800/* Do as much copy to/from userspace buffer as we can */
8bfc016d 801static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
802{
803 unsigned ncpy = min(*size, cs->len);
804 if (val) {
c55a01d3
MS
805 void *pgaddr = kmap_atomic(cs->pg);
806 void *buf = pgaddr + cs->offset;
807
334f485d 808 if (cs->write)
c55a01d3 809 memcpy(buf, *val, ncpy);
334f485d 810 else
c55a01d3
MS
811 memcpy(*val, buf, ncpy);
812
813 kunmap_atomic(pgaddr);
334f485d
MS
814 *val += ncpy;
815 }
816 *size -= ncpy;
817 cs->len -= ncpy;
c55a01d3 818 cs->offset += ncpy;
334f485d
MS
819 return ncpy;
820}
821
ce534fb0
MS
822static int fuse_check_page(struct page *page)
823{
824 if (page_mapcount(page) ||
825 page->mapping != NULL ||
826 page_count(page) != 1 ||
827 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
828 ~(1 << PG_locked |
829 1 << PG_referenced |
830 1 << PG_uptodate |
831 1 << PG_lru |
832 1 << PG_active |
833 1 << PG_reclaim))) {
834 printk(KERN_WARNING "fuse: trying to steal weird page\n");
835 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
836 return 1;
837 }
838 return 0;
839}
840
841static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
842{
843 int err;
844 struct page *oldpage = *pagep;
845 struct page *newpage;
846 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0 847
0d8e84b0
MS
848 err = unlock_request(cs->fc, cs->req);
849 if (err)
850 return err;
851
ce534fb0
MS
852 fuse_copy_finish(cs);
853
854 err = buf->ops->confirm(cs->pipe, buf);
855 if (err)
856 return err;
857
858 BUG_ON(!cs->nr_segs);
859 cs->currbuf = buf;
860 cs->len = buf->len;
861 cs->pipebufs++;
862 cs->nr_segs--;
863
864 if (cs->len != PAGE_SIZE)
865 goto out_fallback;
866
867 if (buf->ops->steal(cs->pipe, buf) != 0)
868 goto out_fallback;
869
870 newpage = buf->page;
871
aa991b3b
MS
872 if (!PageUptodate(newpage))
873 SetPageUptodate(newpage);
ce534fb0
MS
874
875 ClearPageMappedToDisk(newpage);
876
877 if (fuse_check_page(newpage) != 0)
878 goto out_fallback_unlock;
879
ce534fb0
MS
880 /*
881 * This is a new and locked page, it shouldn't be mapped or
882 * have any special flags on it
883 */
884 if (WARN_ON(page_mapped(oldpage)))
885 goto out_fallback_unlock;
886 if (WARN_ON(page_has_private(oldpage)))
887 goto out_fallback_unlock;
888 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
889 goto out_fallback_unlock;
890 if (WARN_ON(PageMlocked(oldpage)))
891 goto out_fallback_unlock;
892
ef6a3c63 893 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 894 if (err) {
ef6a3c63
MS
895 unlock_page(newpage);
896 return err;
ce534fb0 897 }
ef6a3c63 898
ce534fb0
MS
899 page_cache_get(newpage);
900
901 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
902 lru_cache_add_file(newpage);
903
904 err = 0;
905 spin_lock(&cs->fc->lock);
825d6d33 906 if (test_bit(FR_ABORTED, &cs->req->flags))
ce534fb0
MS
907 err = -ENOENT;
908 else
909 *pagep = newpage;
910 spin_unlock(&cs->fc->lock);
911
912 if (err) {
913 unlock_page(newpage);
914 page_cache_release(newpage);
915 return err;
916 }
917
918 unlock_page(oldpage);
919 page_cache_release(oldpage);
920 cs->len = 0;
921
922 return 0;
923
924out_fallback_unlock:
925 unlock_page(newpage);
926out_fallback:
c55a01d3
MS
927 cs->pg = buf->page;
928 cs->offset = buf->offset;
ce534fb0
MS
929
930 err = lock_request(cs->fc, cs->req);
931 if (err)
932 return err;
933
934 return 1;
935}
936
c3021629
MS
937static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
938 unsigned offset, unsigned count)
939{
940 struct pipe_buffer *buf;
0d8e84b0 941 int err;
c3021629
MS
942
943 if (cs->nr_segs == cs->pipe->buffers)
944 return -EIO;
945
0d8e84b0
MS
946 err = unlock_request(cs->fc, cs->req);
947 if (err)
948 return err;
949
c3021629
MS
950 fuse_copy_finish(cs);
951
952 buf = cs->pipebufs;
953 page_cache_get(page);
954 buf->page = page;
955 buf->offset = offset;
956 buf->len = count;
957
958 cs->pipebufs++;
959 cs->nr_segs++;
960 cs->len = 0;
961
962 return 0;
963}
964
334f485d
MS
965/*
966 * Copy a page in the request to/from the userspace buffer. Must be
967 * done atomically
968 */
ce534fb0 969static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 970 unsigned offset, unsigned count, int zeroing)
334f485d 971{
ce534fb0
MS
972 int err;
973 struct page *page = *pagep;
974
b6777c40
MS
975 if (page && zeroing && count < PAGE_SIZE)
976 clear_highpage(page);
977
334f485d 978 while (count) {
c3021629
MS
979 if (cs->write && cs->pipebufs && page) {
980 return fuse_ref_page(cs, page, offset, count);
981 } else if (!cs->len) {
ce534fb0
MS
982 if (cs->move_pages && page &&
983 offset == 0 && count == PAGE_SIZE) {
984 err = fuse_try_move_page(cs, pagep);
985 if (err <= 0)
986 return err;
987 } else {
988 err = fuse_copy_fill(cs);
989 if (err)
990 return err;
991 }
1729a16c 992 }
334f485d 993 if (page) {
2408f6ef 994 void *mapaddr = kmap_atomic(page);
334f485d
MS
995 void *buf = mapaddr + offset;
996 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 997 kunmap_atomic(mapaddr);
334f485d
MS
998 } else
999 offset += fuse_copy_do(cs, NULL, &count);
1000 }
1001 if (page && !cs->write)
1002 flush_dcache_page(page);
1003 return 0;
1004}
1005
1006/* Copy pages in the request to/from userspace buffer */
1007static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1008 int zeroing)
1009{
1010 unsigned i;
1011 struct fuse_req *req = cs->req;
334f485d
MS
1012
1013 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0 1014 int err;
85f40aec
MP
1015 unsigned offset = req->page_descs[i].offset;
1016 unsigned count = min(nbytes, req->page_descs[i].length);
ce534fb0
MS
1017
1018 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1019 zeroing);
334f485d
MS
1020 if (err)
1021 return err;
1022
1023 nbytes -= count;
334f485d
MS
1024 }
1025 return 0;
1026}
1027
1028/* Copy a single argument in the request to/from userspace buffer */
1029static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1030{
1031 while (size) {
1729a16c
MS
1032 if (!cs->len) {
1033 int err = fuse_copy_fill(cs);
1034 if (err)
1035 return err;
1036 }
334f485d
MS
1037 fuse_copy_do(cs, &val, &size);
1038 }
1039 return 0;
1040}
1041
1042/* Copy request arguments to/from userspace buffer */
1043static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1044 unsigned argpages, struct fuse_arg *args,
1045 int zeroing)
1046{
1047 int err = 0;
1048 unsigned i;
1049
1050 for (i = 0; !err && i < numargs; i++) {
1051 struct fuse_arg *arg = &args[i];
1052 if (i == numargs - 1 && argpages)
1053 err = fuse_copy_pages(cs, arg->size, zeroing);
1054 else
1055 err = fuse_copy_one(cs, arg->value, arg->size);
1056 }
1057 return err;
1058}
1059
07e77dca
MS
1060static int forget_pending(struct fuse_conn *fc)
1061{
1062 return fc->forget_list_head.next != NULL;
1063}
1064
a4d27e75
MS
1065static int request_pending(struct fuse_conn *fc)
1066{
07e77dca
MS
1067 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
1068 forget_pending(fc);
a4d27e75
MS
1069}
1070
334f485d
MS
1071/* Wait until a request is available on the pending list */
1072static void request_wait(struct fuse_conn *fc)
b9ca67b2
MS
1073__releases(fc->lock)
1074__acquires(fc->lock)
334f485d
MS
1075{
1076 DECLARE_WAITQUEUE(wait, current);
1077
1078 add_wait_queue_exclusive(&fc->waitq, &wait);
a4d27e75 1079 while (fc->connected && !request_pending(fc)) {
334f485d
MS
1080 set_current_state(TASK_INTERRUPTIBLE);
1081 if (signal_pending(current))
1082 break;
1083
d7133114 1084 spin_unlock(&fc->lock);
334f485d 1085 schedule();
d7133114 1086 spin_lock(&fc->lock);
334f485d
MS
1087 }
1088 set_current_state(TASK_RUNNING);
1089 remove_wait_queue(&fc->waitq, &wait);
1090}
1091
a4d27e75
MS
1092/*
1093 * Transfer an interrupt request to userspace
1094 *
1095 * Unlike other requests this is assembled on demand, without a need
1096 * to allocate a separate fuse_req structure.
1097 *
1098 * Called with fc->lock held, releases it
1099 */
c3021629
MS
1100static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1101 size_t nbytes, struct fuse_req *req)
b9ca67b2 1102__releases(fc->lock)
a4d27e75 1103{
a4d27e75
MS
1104 struct fuse_in_header ih;
1105 struct fuse_interrupt_in arg;
1106 unsigned reqsize = sizeof(ih) + sizeof(arg);
1107 int err;
1108
1109 list_del_init(&req->intr_entry);
1110 req->intr_unique = fuse_get_unique(fc);
1111 memset(&ih, 0, sizeof(ih));
1112 memset(&arg, 0, sizeof(arg));
1113 ih.len = reqsize;
1114 ih.opcode = FUSE_INTERRUPT;
1115 ih.unique = req->intr_unique;
1116 arg.unique = req->in.h.unique;
1117
1118 spin_unlock(&fc->lock);
c3021629 1119 if (nbytes < reqsize)
a4d27e75
MS
1120 return -EINVAL;
1121
c3021629 1122 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1123 if (!err)
c3021629
MS
1124 err = fuse_copy_one(cs, &arg, sizeof(arg));
1125 fuse_copy_finish(cs);
a4d27e75
MS
1126
1127 return err ? err : reqsize;
1128}
1129
02c048b9
MS
1130static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1131 unsigned max,
1132 unsigned *countp)
07e77dca 1133{
02c048b9
MS
1134 struct fuse_forget_link *head = fc->forget_list_head.next;
1135 struct fuse_forget_link **newhead = &head;
1136 unsigned count;
07e77dca 1137
02c048b9
MS
1138 for (count = 0; *newhead != NULL && count < max; count++)
1139 newhead = &(*newhead)->next;
1140
1141 fc->forget_list_head.next = *newhead;
1142 *newhead = NULL;
07e77dca
MS
1143 if (fc->forget_list_head.next == NULL)
1144 fc->forget_list_tail = &fc->forget_list_head;
1145
02c048b9
MS
1146 if (countp != NULL)
1147 *countp = count;
1148
1149 return head;
07e77dca
MS
1150}
1151
1152static int fuse_read_single_forget(struct fuse_conn *fc,
1153 struct fuse_copy_state *cs,
1154 size_t nbytes)
1155__releases(fc->lock)
1156{
1157 int err;
02c048b9 1158 struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
07e77dca 1159 struct fuse_forget_in arg = {
02c048b9 1160 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1161 };
1162 struct fuse_in_header ih = {
1163 .opcode = FUSE_FORGET,
02c048b9 1164 .nodeid = forget->forget_one.nodeid,
07e77dca
MS
1165 .unique = fuse_get_unique(fc),
1166 .len = sizeof(ih) + sizeof(arg),
1167 };
1168
1169 spin_unlock(&fc->lock);
1170 kfree(forget);
1171 if (nbytes < ih.len)
1172 return -EINVAL;
1173
1174 err = fuse_copy_one(cs, &ih, sizeof(ih));
1175 if (!err)
1176 err = fuse_copy_one(cs, &arg, sizeof(arg));
1177 fuse_copy_finish(cs);
1178
1179 if (err)
1180 return err;
1181
1182 return ih.len;
1183}
1184
02c048b9
MS
1185static int fuse_read_batch_forget(struct fuse_conn *fc,
1186 struct fuse_copy_state *cs, size_t nbytes)
1187__releases(fc->lock)
1188{
1189 int err;
1190 unsigned max_forgets;
1191 unsigned count;
1192 struct fuse_forget_link *head;
1193 struct fuse_batch_forget_in arg = { .count = 0 };
1194 struct fuse_in_header ih = {
1195 .opcode = FUSE_BATCH_FORGET,
1196 .unique = fuse_get_unique(fc),
1197 .len = sizeof(ih) + sizeof(arg),
1198 };
1199
1200 if (nbytes < ih.len) {
1201 spin_unlock(&fc->lock);
1202 return -EINVAL;
1203 }
1204
1205 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1206 head = dequeue_forget(fc, max_forgets, &count);
1207 spin_unlock(&fc->lock);
1208
1209 arg.count = count;
1210 ih.len += count * sizeof(struct fuse_forget_one);
1211 err = fuse_copy_one(cs, &ih, sizeof(ih));
1212 if (!err)
1213 err = fuse_copy_one(cs, &arg, sizeof(arg));
1214
1215 while (head) {
1216 struct fuse_forget_link *forget = head;
1217
1218 if (!err) {
1219 err = fuse_copy_one(cs, &forget->forget_one,
1220 sizeof(forget->forget_one));
1221 }
1222 head = forget->next;
1223 kfree(forget);
1224 }
1225
1226 fuse_copy_finish(cs);
1227
1228 if (err)
1229 return err;
1230
1231 return ih.len;
1232}
1233
1234static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1235 size_t nbytes)
1236__releases(fc->lock)
1237{
1238 if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1239 return fuse_read_single_forget(fc, cs, nbytes);
1240 else
1241 return fuse_read_batch_forget(fc, cs, nbytes);
1242}
1243
334f485d
MS
1244/*
1245 * Read a single request into the userspace filesystem's buffer. This
1246 * function waits until a request is available, then removes it from
1247 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1248 * no reply is needed (FORGET) or request has been aborted or there
1249 * was an error during the copying then it's finished by calling
334f485d
MS
1250 * request_end(). Otherwise add it to the processing list, and set
1251 * the 'sent' flag.
1252 */
c3021629
MS
1253static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1254 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1255{
1256 int err;
334f485d
MS
1257 struct fuse_req *req;
1258 struct fuse_in *in;
334f485d
MS
1259 unsigned reqsize;
1260
1d3d752b 1261 restart:
d7133114 1262 spin_lock(&fc->lock);
e5ac1d1e
JD
1263 err = -EAGAIN;
1264 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
a4d27e75 1265 !request_pending(fc))
e5ac1d1e
JD
1266 goto err_unlock;
1267
334f485d
MS
1268 request_wait(fc);
1269 err = -ENODEV;
9ba7cbba 1270 if (!fc->connected)
334f485d
MS
1271 goto err_unlock;
1272 err = -ERESTARTSYS;
a4d27e75 1273 if (!request_pending(fc))
334f485d
MS
1274 goto err_unlock;
1275
a4d27e75
MS
1276 if (!list_empty(&fc->interrupts)) {
1277 req = list_entry(fc->interrupts.next, struct fuse_req,
1278 intr_entry);
c3021629 1279 return fuse_read_interrupt(fc, cs, nbytes, req);
a4d27e75
MS
1280 }
1281
07e77dca
MS
1282 if (forget_pending(fc)) {
1283 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
02c048b9 1284 return fuse_read_forget(fc, cs, nbytes);
07e77dca
MS
1285
1286 if (fc->forget_batch <= -8)
1287 fc->forget_batch = 16;
1288 }
1289
334f485d 1290 req = list_entry(fc->pending.next, struct fuse_req, list);
83cfd493 1291 req->state = FUSE_REQ_READING;
d77a1d5b 1292 list_move(&req->list, &fc->io);
334f485d
MS
1293
1294 in = &req->in;
1d3d752b
MS
1295 reqsize = in->h.len;
1296 /* If request is too large, reply with an error and restart the read */
c3021629 1297 if (nbytes < reqsize) {
1d3d752b
MS
1298 req->out.h.error = -EIO;
1299 /* SETXATTR is special, since it may contain too large data */
1300 if (in->h.opcode == FUSE_SETXATTR)
1301 req->out.h.error = -E2BIG;
1302 request_end(fc, req);
1303 goto restart;
334f485d 1304 }
d7133114 1305 spin_unlock(&fc->lock);
c3021629
MS
1306 cs->req = req;
1307 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1308 if (!err)
c3021629 1309 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1310 (struct fuse_arg *) in->args, 0);
c3021629 1311 fuse_copy_finish(cs);
d7133114 1312 spin_lock(&fc->lock);
825d6d33 1313 clear_bit(FR_LOCKED, &req->flags);
0d8e84b0 1314 if (!fc->connected) {
c9c9d7df
MS
1315 request_end(fc, req);
1316 return -ENODEV;
1317 }
334f485d 1318 if (err) {
c9c9d7df 1319 req->out.h.error = -EIO;
334f485d
MS
1320 request_end(fc, req);
1321 return err;
1322 }
825d6d33 1323 if (!test_bit(FR_ISREPLY, &req->flags)) {
334f485d 1324 request_end(fc, req);
825d6d33 1325 } else {
83cfd493 1326 req->state = FUSE_REQ_SENT;
d77a1d5b 1327 list_move_tail(&req->list, &fc->processing);
825d6d33 1328 if (test_bit(FR_INTERRUPTED, &req->flags))
a4d27e75 1329 queue_interrupt(fc, req);
d7133114 1330 spin_unlock(&fc->lock);
334f485d
MS
1331 }
1332 return reqsize;
1333
1334 err_unlock:
d7133114 1335 spin_unlock(&fc->lock);
334f485d
MS
1336 return err;
1337}
1338
94e4fe2c
TVB
1339static int fuse_dev_open(struct inode *inode, struct file *file)
1340{
1341 /*
1342 * The fuse device's file's private_data is used to hold
1343 * the fuse_conn(ection) when it is mounted, and is used to
1344 * keep track of whether the file has been mounted already.
1345 */
1346 file->private_data = NULL;
1347 return 0;
1348}
1349
fbdbacca 1350static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
c3021629
MS
1351{
1352 struct fuse_copy_state cs;
1353 struct file *file = iocb->ki_filp;
1354 struct fuse_conn *fc = fuse_get_conn(file);
1355 if (!fc)
1356 return -EPERM;
1357
fbdbacca
AV
1358 if (!iter_is_iovec(to))
1359 return -EINVAL;
1360
6c09e94a 1361 fuse_copy_init(&cs, fc, 1, to);
c3021629 1362
fbdbacca 1363 return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
c3021629
MS
1364}
1365
c3021629
MS
1366static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1367 struct pipe_inode_info *pipe,
1368 size_t len, unsigned int flags)
1369{
1370 int ret;
1371 int page_nr = 0;
1372 int do_wakeup = 0;
1373 struct pipe_buffer *bufs;
1374 struct fuse_copy_state cs;
1375 struct fuse_conn *fc = fuse_get_conn(in);
1376 if (!fc)
1377 return -EPERM;
1378
07e77dca 1379 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1380 if (!bufs)
1381 return -ENOMEM;
1382
6c09e94a 1383 fuse_copy_init(&cs, fc, 1, NULL);
c3021629
MS
1384 cs.pipebufs = bufs;
1385 cs.pipe = pipe;
1386 ret = fuse_dev_do_read(fc, in, &cs, len);
1387 if (ret < 0)
1388 goto out;
1389
1390 ret = 0;
1391 pipe_lock(pipe);
1392
1393 if (!pipe->readers) {
1394 send_sig(SIGPIPE, current, 0);
1395 if (!ret)
1396 ret = -EPIPE;
1397 goto out_unlock;
1398 }
1399
1400 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1401 ret = -EIO;
1402 goto out_unlock;
1403 }
1404
1405 while (page_nr < cs.nr_segs) {
1406 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1407 struct pipe_buffer *buf = pipe->bufs + newbuf;
1408
1409 buf->page = bufs[page_nr].page;
1410 buf->offset = bufs[page_nr].offset;
1411 buf->len = bufs[page_nr].len;
28a625cb
MS
1412 /*
1413 * Need to be careful about this. Having buf->ops in module
1414 * code can Oops if the buffer persists after module unload.
1415 */
1416 buf->ops = &nosteal_pipe_buf_ops;
c3021629
MS
1417
1418 pipe->nrbufs++;
1419 page_nr++;
1420 ret += buf->len;
1421
6447a3cf 1422 if (pipe->files)
c3021629
MS
1423 do_wakeup = 1;
1424 }
1425
1426out_unlock:
1427 pipe_unlock(pipe);
1428
1429 if (do_wakeup) {
1430 smp_mb();
1431 if (waitqueue_active(&pipe->wait))
1432 wake_up_interruptible(&pipe->wait);
1433 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1434 }
1435
1436out:
1437 for (; page_nr < cs.nr_segs; page_nr++)
1438 page_cache_release(bufs[page_nr].page);
1439
1440 kfree(bufs);
1441 return ret;
1442}
1443
95668a69
TH
1444static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1445 struct fuse_copy_state *cs)
1446{
1447 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1448 int err = -EINVAL;
95668a69
TH
1449
1450 if (size != sizeof(outarg))
f6d47a17 1451 goto err;
95668a69
TH
1452
1453 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1454 if (err)
f6d47a17 1455 goto err;
95668a69 1456
f6d47a17 1457 fuse_copy_finish(cs);
95668a69 1458 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1459
1460err:
1461 fuse_copy_finish(cs);
1462 return err;
95668a69
TH
1463}
1464
3b463ae0
JM
1465static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1466 struct fuse_copy_state *cs)
1467{
1468 struct fuse_notify_inval_inode_out outarg;
1469 int err = -EINVAL;
1470
1471 if (size != sizeof(outarg))
1472 goto err;
1473
1474 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1475 if (err)
1476 goto err;
1477 fuse_copy_finish(cs);
1478
1479 down_read(&fc->killsb);
1480 err = -ENOENT;
b21dda43
MS
1481 if (fc->sb) {
1482 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1483 outarg.off, outarg.len);
1484 }
3b463ae0
JM
1485 up_read(&fc->killsb);
1486 return err;
1487
1488err:
1489 fuse_copy_finish(cs);
1490 return err;
1491}
1492
1493static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1494 struct fuse_copy_state *cs)
1495{
1496 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1497 int err = -ENOMEM;
1498 char *buf;
3b463ae0
JM
1499 struct qstr name;
1500
b2d82ee3
FW
1501 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1502 if (!buf)
1503 goto err;
1504
1505 err = -EINVAL;
3b463ae0
JM
1506 if (size < sizeof(outarg))
1507 goto err;
1508
1509 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1510 if (err)
1511 goto err;
1512
1513 err = -ENAMETOOLONG;
1514 if (outarg.namelen > FUSE_NAME_MAX)
1515 goto err;
1516
c2183d1e
MS
1517 err = -EINVAL;
1518 if (size != sizeof(outarg) + outarg.namelen + 1)
1519 goto err;
1520
3b463ae0
JM
1521 name.name = buf;
1522 name.len = outarg.namelen;
1523 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1524 if (err)
1525 goto err;
1526 fuse_copy_finish(cs);
1527 buf[outarg.namelen] = 0;
1528 name.hash = full_name_hash(name.name, name.len);
1529
1530 down_read(&fc->killsb);
1531 err = -ENOENT;
b21dda43 1532 if (fc->sb)
451d0f59
JM
1533 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1534 up_read(&fc->killsb);
1535 kfree(buf);
1536 return err;
1537
1538err:
1539 kfree(buf);
1540 fuse_copy_finish(cs);
1541 return err;
1542}
1543
1544static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1545 struct fuse_copy_state *cs)
1546{
1547 struct fuse_notify_delete_out outarg;
1548 int err = -ENOMEM;
1549 char *buf;
1550 struct qstr name;
1551
1552 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1553 if (!buf)
1554 goto err;
1555
1556 err = -EINVAL;
1557 if (size < sizeof(outarg))
1558 goto err;
1559
1560 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1561 if (err)
1562 goto err;
1563
1564 err = -ENAMETOOLONG;
1565 if (outarg.namelen > FUSE_NAME_MAX)
1566 goto err;
1567
1568 err = -EINVAL;
1569 if (size != sizeof(outarg) + outarg.namelen + 1)
1570 goto err;
1571
1572 name.name = buf;
1573 name.len = outarg.namelen;
1574 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1575 if (err)
1576 goto err;
1577 fuse_copy_finish(cs);
1578 buf[outarg.namelen] = 0;
1579 name.hash = full_name_hash(name.name, name.len);
1580
1581 down_read(&fc->killsb);
1582 err = -ENOENT;
1583 if (fc->sb)
1584 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1585 outarg.child, &name);
3b463ae0 1586 up_read(&fc->killsb);
b2d82ee3 1587 kfree(buf);
3b463ae0
JM
1588 return err;
1589
1590err:
b2d82ee3 1591 kfree(buf);
3b463ae0
JM
1592 fuse_copy_finish(cs);
1593 return err;
1594}
1595
a1d75f25
MS
1596static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1597 struct fuse_copy_state *cs)
1598{
1599 struct fuse_notify_store_out outarg;
1600 struct inode *inode;
1601 struct address_space *mapping;
1602 u64 nodeid;
1603 int err;
1604 pgoff_t index;
1605 unsigned int offset;
1606 unsigned int num;
1607 loff_t file_size;
1608 loff_t end;
1609
1610 err = -EINVAL;
1611 if (size < sizeof(outarg))
1612 goto out_finish;
1613
1614 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1615 if (err)
1616 goto out_finish;
1617
1618 err = -EINVAL;
1619 if (size - sizeof(outarg) != outarg.size)
1620 goto out_finish;
1621
1622 nodeid = outarg.nodeid;
1623
1624 down_read(&fc->killsb);
1625
1626 err = -ENOENT;
1627 if (!fc->sb)
1628 goto out_up_killsb;
1629
1630 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1631 if (!inode)
1632 goto out_up_killsb;
1633
1634 mapping = inode->i_mapping;
1635 index = outarg.offset >> PAGE_CACHE_SHIFT;
1636 offset = outarg.offset & ~PAGE_CACHE_MASK;
1637 file_size = i_size_read(inode);
1638 end = outarg.offset + outarg.size;
1639 if (end > file_size) {
1640 file_size = end;
1641 fuse_write_update_size(inode, file_size);
1642 }
1643
1644 num = outarg.size;
1645 while (num) {
1646 struct page *page;
1647 unsigned int this_num;
1648
1649 err = -ENOMEM;
1650 page = find_or_create_page(mapping, index,
1651 mapping_gfp_mask(mapping));
1652 if (!page)
1653 goto out_iput;
1654
1655 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1656 err = fuse_copy_page(cs, &page, offset, this_num, 0);
063ec1e5
MS
1657 if (!err && offset == 0 &&
1658 (this_num == PAGE_CACHE_SIZE || file_size == end))
a1d75f25
MS
1659 SetPageUptodate(page);
1660 unlock_page(page);
1661 page_cache_release(page);
1662
1663 if (err)
1664 goto out_iput;
1665
1666 num -= this_num;
1667 offset = 0;
1668 index++;
1669 }
1670
1671 err = 0;
1672
1673out_iput:
1674 iput(inode);
1675out_up_killsb:
1676 up_read(&fc->killsb);
1677out_finish:
1678 fuse_copy_finish(cs);
1679 return err;
1680}
1681
2d45ba38
MS
1682static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1683{
b745bc85 1684 release_pages(req->pages, req->num_pages, false);
2d45ba38
MS
1685}
1686
1687static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1688 struct fuse_notify_retrieve_out *outarg)
1689{
1690 int err;
1691 struct address_space *mapping = inode->i_mapping;
1692 struct fuse_req *req;
1693 pgoff_t index;
1694 loff_t file_size;
1695 unsigned int num;
1696 unsigned int offset;
0157443c 1697 size_t total_len = 0;
4d53dc99 1698 int num_pages;
2d45ba38 1699
4d53dc99
MP
1700 offset = outarg->offset & ~PAGE_CACHE_MASK;
1701 file_size = i_size_read(inode);
1702
1703 num = outarg->size;
1704 if (outarg->offset > file_size)
1705 num = 0;
1706 else if (outarg->offset + num > file_size)
1707 num = file_size - outarg->offset;
1708
1709 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1710 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1711
1712 req = fuse_get_req(fc, num_pages);
2d45ba38
MS
1713 if (IS_ERR(req))
1714 return PTR_ERR(req);
1715
2d45ba38
MS
1716 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1717 req->in.h.nodeid = outarg->nodeid;
1718 req->in.numargs = 2;
1719 req->in.argpages = 1;
b2430d75 1720 req->page_descs[0].offset = offset;
2d45ba38
MS
1721 req->end = fuse_retrieve_end;
1722
1723 index = outarg->offset >> PAGE_CACHE_SHIFT;
2d45ba38 1724
4d53dc99 1725 while (num && req->num_pages < num_pages) {
2d45ba38
MS
1726 struct page *page;
1727 unsigned int this_num;
1728
1729 page = find_get_page(mapping, index);
1730 if (!page)
1731 break;
1732
1733 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1734 req->pages[req->num_pages] = page;
85f40aec 1735 req->page_descs[req->num_pages].length = this_num;
2d45ba38
MS
1736 req->num_pages++;
1737
c9e67d48 1738 offset = 0;
2d45ba38
MS
1739 num -= this_num;
1740 total_len += this_num;
48706d0a 1741 index++;
2d45ba38
MS
1742 }
1743 req->misc.retrieve_in.offset = outarg->offset;
1744 req->misc.retrieve_in.size = total_len;
1745 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1746 req->in.args[0].value = &req->misc.retrieve_in;
1747 req->in.args[1].size = total_len;
1748
1749 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1750 if (err)
1751 fuse_retrieve_end(fc, req);
1752
1753 return err;
1754}
1755
1756static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1757 struct fuse_copy_state *cs)
1758{
1759 struct fuse_notify_retrieve_out outarg;
1760 struct inode *inode;
1761 int err;
1762
1763 err = -EINVAL;
1764 if (size != sizeof(outarg))
1765 goto copy_finish;
1766
1767 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1768 if (err)
1769 goto copy_finish;
1770
1771 fuse_copy_finish(cs);
1772
1773 down_read(&fc->killsb);
1774 err = -ENOENT;
1775 if (fc->sb) {
1776 u64 nodeid = outarg.nodeid;
1777
1778 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1779 if (inode) {
1780 err = fuse_retrieve(fc, inode, &outarg);
1781 iput(inode);
1782 }
1783 }
1784 up_read(&fc->killsb);
1785
1786 return err;
1787
1788copy_finish:
1789 fuse_copy_finish(cs);
1790 return err;
1791}
1792
8599396b
TH
1793static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1794 unsigned int size, struct fuse_copy_state *cs)
1795{
0d278362
MS
1796 /* Don't try to move pages (yet) */
1797 cs->move_pages = 0;
1798
8599396b 1799 switch (code) {
95668a69
TH
1800 case FUSE_NOTIFY_POLL:
1801 return fuse_notify_poll(fc, size, cs);
1802
3b463ae0
JM
1803 case FUSE_NOTIFY_INVAL_INODE:
1804 return fuse_notify_inval_inode(fc, size, cs);
1805
1806 case FUSE_NOTIFY_INVAL_ENTRY:
1807 return fuse_notify_inval_entry(fc, size, cs);
1808
a1d75f25
MS
1809 case FUSE_NOTIFY_STORE:
1810 return fuse_notify_store(fc, size, cs);
1811
2d45ba38
MS
1812 case FUSE_NOTIFY_RETRIEVE:
1813 return fuse_notify_retrieve(fc, size, cs);
1814
451d0f59
JM
1815 case FUSE_NOTIFY_DELETE:
1816 return fuse_notify_delete(fc, size, cs);
1817
8599396b 1818 default:
f6d47a17 1819 fuse_copy_finish(cs);
8599396b
TH
1820 return -EINVAL;
1821 }
1822}
1823
334f485d
MS
1824/* Look up request on processing list by unique ID */
1825static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1826{
05726aca 1827 struct fuse_req *req;
334f485d 1828
05726aca 1829 list_for_each_entry(req, &fc->processing, list) {
a4d27e75 1830 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1831 return req;
1832 }
1833 return NULL;
1834}
1835
1836static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1837 unsigned nbytes)
1838{
1839 unsigned reqsize = sizeof(struct fuse_out_header);
1840
1841 if (out->h.error)
1842 return nbytes != reqsize ? -EINVAL : 0;
1843
1844 reqsize += len_args(out->numargs, out->args);
1845
1846 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1847 return -EINVAL;
1848 else if (reqsize > nbytes) {
1849 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1850 unsigned diffsize = reqsize - nbytes;
1851 if (diffsize > lastarg->size)
1852 return -EINVAL;
1853 lastarg->size -= diffsize;
1854 }
1855 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1856 out->page_zeroing);
1857}
1858
1859/*
1860 * Write a single reply to a request. First the header is copied from
1861 * the write buffer. The request is then searched on the processing
1862 * list by the unique ID found in the header. If found, then remove
1863 * it from the list and copy the rest of the buffer to the request.
1864 * The request is finished by calling request_end()
1865 */
dd3bb14f
MS
1866static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1867 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1868{
1869 int err;
334f485d
MS
1870 struct fuse_req *req;
1871 struct fuse_out_header oh;
334f485d 1872
334f485d
MS
1873 if (nbytes < sizeof(struct fuse_out_header))
1874 return -EINVAL;
1875
dd3bb14f 1876 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1877 if (err)
1878 goto err_finish;
8599396b
TH
1879
1880 err = -EINVAL;
1881 if (oh.len != nbytes)
1882 goto err_finish;
1883
1884 /*
1885 * Zero oh.unique indicates unsolicited notification message
1886 * and error contains notification code.
1887 */
1888 if (!oh.unique) {
dd3bb14f 1889 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1890 return err ? err : nbytes;
1891 }
1892
334f485d 1893 err = -EINVAL;
8599396b 1894 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1895 goto err_finish;
1896
d7133114 1897 spin_lock(&fc->lock);
69a53bf2
MS
1898 err = -ENOENT;
1899 if (!fc->connected)
1900 goto err_unlock;
1901
334f485d 1902 req = request_find(fc, oh.unique);
334f485d
MS
1903 if (!req)
1904 goto err_unlock;
1905
a4d27e75
MS
1906 /* Is it an interrupt reply? */
1907 if (req->intr_unique == oh.unique) {
1908 err = -EINVAL;
1909 if (nbytes != sizeof(struct fuse_out_header))
1910 goto err_unlock;
1911
1912 if (oh.error == -ENOSYS)
1913 fc->no_interrupt = 1;
1914 else if (oh.error == -EAGAIN)
1915 queue_interrupt(fc, req);
1916
1917 spin_unlock(&fc->lock);
dd3bb14f 1918 fuse_copy_finish(cs);
a4d27e75
MS
1919 return nbytes;
1920 }
1921
1922 req->state = FUSE_REQ_WRITING;
d77a1d5b 1923 list_move(&req->list, &fc->io);
334f485d 1924 req->out.h = oh;
825d6d33 1925 set_bit(FR_LOCKED, &req->flags);
dd3bb14f 1926 cs->req = req;
ce534fb0
MS
1927 if (!req->out.page_replace)
1928 cs->move_pages = 0;
d7133114 1929 spin_unlock(&fc->lock);
334f485d 1930
dd3bb14f
MS
1931 err = copy_out_args(cs, &req->out, nbytes);
1932 fuse_copy_finish(cs);
334f485d 1933
d7133114 1934 spin_lock(&fc->lock);
825d6d33 1935 clear_bit(FR_LOCKED, &req->flags);
0d8e84b0
MS
1936 if (!fc->connected)
1937 err = -ENOENT;
1938 else if (err)
334f485d
MS
1939 req->out.h.error = -EIO;
1940 request_end(fc, req);
1941
1942 return err ? err : nbytes;
1943
1944 err_unlock:
d7133114 1945 spin_unlock(&fc->lock);
334f485d 1946 err_finish:
dd3bb14f 1947 fuse_copy_finish(cs);
334f485d
MS
1948 return err;
1949}
1950
fbdbacca 1951static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
dd3bb14f
MS
1952{
1953 struct fuse_copy_state cs;
1954 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1955 if (!fc)
1956 return -EPERM;
1957
fbdbacca
AV
1958 if (!iter_is_iovec(from))
1959 return -EINVAL;
1960
6c09e94a 1961 fuse_copy_init(&cs, fc, 0, from);
dd3bb14f 1962
fbdbacca 1963 return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
dd3bb14f
MS
1964}
1965
1966static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1967 struct file *out, loff_t *ppos,
1968 size_t len, unsigned int flags)
1969{
1970 unsigned nbuf;
1971 unsigned idx;
1972 struct pipe_buffer *bufs;
1973 struct fuse_copy_state cs;
1974 struct fuse_conn *fc;
1975 size_t rem;
1976 ssize_t ret;
1977
1978 fc = fuse_get_conn(out);
1979 if (!fc)
1980 return -EPERM;
1981
07e77dca 1982 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
1983 if (!bufs)
1984 return -ENOMEM;
1985
1986 pipe_lock(pipe);
1987 nbuf = 0;
1988 rem = 0;
1989 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1990 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1991
1992 ret = -EINVAL;
1993 if (rem < len) {
1994 pipe_unlock(pipe);
1995 goto out;
1996 }
1997
1998 rem = len;
1999 while (rem) {
2000 struct pipe_buffer *ibuf;
2001 struct pipe_buffer *obuf;
2002
2003 BUG_ON(nbuf >= pipe->buffers);
2004 BUG_ON(!pipe->nrbufs);
2005 ibuf = &pipe->bufs[pipe->curbuf];
2006 obuf = &bufs[nbuf];
2007
2008 if (rem >= ibuf->len) {
2009 *obuf = *ibuf;
2010 ibuf->ops = NULL;
2011 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2012 pipe->nrbufs--;
2013 } else {
2014 ibuf->ops->get(pipe, ibuf);
2015 *obuf = *ibuf;
2016 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2017 obuf->len = rem;
2018 ibuf->offset += obuf->len;
2019 ibuf->len -= obuf->len;
2020 }
2021 nbuf++;
2022 rem -= obuf->len;
2023 }
2024 pipe_unlock(pipe);
2025
6c09e94a 2026 fuse_copy_init(&cs, fc, 0, NULL);
dd3bb14f 2027 cs.pipebufs = bufs;
6c09e94a 2028 cs.nr_segs = nbuf;
dd3bb14f
MS
2029 cs.pipe = pipe;
2030
ce534fb0
MS
2031 if (flags & SPLICE_F_MOVE)
2032 cs.move_pages = 1;
2033
dd3bb14f
MS
2034 ret = fuse_dev_do_write(fc, &cs, len);
2035
2036 for (idx = 0; idx < nbuf; idx++) {
2037 struct pipe_buffer *buf = &bufs[idx];
2038 buf->ops->release(pipe, buf);
2039 }
2040out:
2041 kfree(bufs);
2042 return ret;
2043}
2044
334f485d
MS
2045static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2046{
334f485d 2047 unsigned mask = POLLOUT | POLLWRNORM;
7025d9ad 2048 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2049 if (!fc)
7025d9ad 2050 return POLLERR;
334f485d
MS
2051
2052 poll_wait(file, &fc->waitq, wait);
2053
d7133114 2054 spin_lock(&fc->lock);
7025d9ad
MS
2055 if (!fc->connected)
2056 mask = POLLERR;
a4d27e75 2057 else if (request_pending(fc))
7025d9ad 2058 mask |= POLLIN | POLLRDNORM;
d7133114 2059 spin_unlock(&fc->lock);
334f485d
MS
2060
2061 return mask;
2062}
2063
69a53bf2
MS
2064/*
2065 * Abort all requests on the given list (pending or processing)
2066 *
d7133114 2067 * This function releases and reacquires fc->lock
69a53bf2 2068 */
334f485d 2069static void end_requests(struct fuse_conn *fc, struct list_head *head)
b9ca67b2
MS
2070__releases(fc->lock)
2071__acquires(fc->lock)
334f485d
MS
2072{
2073 while (!list_empty(head)) {
2074 struct fuse_req *req;
2075 req = list_entry(head->next, struct fuse_req, list);
334f485d
MS
2076 req->out.h.error = -ECONNABORTED;
2077 request_end(fc, req);
d7133114 2078 spin_lock(&fc->lock);
334f485d
MS
2079 }
2080}
2081
69a53bf2
MS
2082/*
2083 * Abort requests under I/O
2084 *
0d8e84b0
MS
2085 * Separate out unlocked requests, they should be finished off immediately.
2086 * Locked requests will be finished after unlock; see unlock_request().
64c6d8ed 2087 *
0d8e84b0
MS
2088 * Next finish off the unlocked requests. It is possible that some request will
2089 * finish before we can. This is OK, the request will in that case be removed
2090 * from the list before we touch it.
69a53bf2
MS
2091 */
2092static void end_io_requests(struct fuse_conn *fc)
b9ca67b2
MS
2093__releases(fc->lock)
2094__acquires(fc->lock)
69a53bf2 2095{
0d8e84b0
MS
2096 struct fuse_req *req, *next;
2097 LIST_HEAD(to_end);
64c6d8ed 2098
0d8e84b0 2099 list_for_each_entry_safe(req, next, &fc->io, list) {
69a53bf2 2100 req->out.h.error = -ECONNABORTED;
825d6d33
MS
2101 set_bit(FR_ABORTED, &req->flags);
2102 if (!test_bit(FR_LOCKED, &req->flags))
0d8e84b0
MS
2103 list_move(&req->list, &to_end);
2104 }
2105 while (!list_empty(&to_end)) {
2106 req = list_first_entry(&to_end, struct fuse_req, list);
2107 __fuse_get_request(req);
2108 request_end(fc, req);
2109 spin_lock(&fc->lock);
69a53bf2
MS
2110 }
2111}
2112
595afaf9 2113static void end_queued_requests(struct fuse_conn *fc)
b9ca67b2
MS
2114__releases(fc->lock)
2115__acquires(fc->lock)
595afaf9
MS
2116{
2117 fc->max_background = UINT_MAX;
2118 flush_bg_queue(fc);
2119 end_requests(fc, &fc->pending);
2120 end_requests(fc, &fc->processing);
07e77dca 2121 while (forget_pending(fc))
02c048b9 2122 kfree(dequeue_forget(fc, 1, NULL));
595afaf9
MS
2123}
2124
357ccf2b
BG
2125static void end_polls(struct fuse_conn *fc)
2126{
2127 struct rb_node *p;
2128
2129 p = rb_first(&fc->polled_files);
2130
2131 while (p) {
2132 struct fuse_file *ff;
2133 ff = rb_entry(p, struct fuse_file, polled_node);
2134 wake_up_interruptible_all(&ff->poll_wait);
2135
2136 p = rb_next(p);
2137 }
2138}
2139
69a53bf2
MS
2140/*
2141 * Abort all requests.
2142 *
2143 * Emergency exit in case of a malicious or accidental deadlock, or
2144 * just a hung filesystem.
2145 *
2146 * The same effect is usually achievable through killing the
2147 * filesystem daemon and all users of the filesystem. The exception
2148 * is the combination of an asynchronous request and the tricky
2149 * deadlock (see Documentation/filesystems/fuse.txt).
2150 *
0d8e84b0
MS
2151 * Request progression from one list to the next is prevented by
2152 * fc->connected being false.
69a53bf2
MS
2153 */
2154void fuse_abort_conn(struct fuse_conn *fc)
2155{
d7133114 2156 spin_lock(&fc->lock);
69a53bf2
MS
2157 if (fc->connected) {
2158 fc->connected = 0;
51eb01e7 2159 fc->blocked = 0;
9759bd51 2160 fuse_set_initialized(fc);
69a53bf2 2161 end_io_requests(fc);
595afaf9 2162 end_queued_requests(fc);
357ccf2b 2163 end_polls(fc);
69a53bf2 2164 wake_up_all(&fc->waitq);
51eb01e7 2165 wake_up_all(&fc->blocked_waitq);
385a17bf 2166 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
69a53bf2 2167 }
d7133114 2168 spin_unlock(&fc->lock);
69a53bf2 2169}
08cbf542 2170EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2171
08cbf542 2172int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2173{
0720b315 2174 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2175 if (fc) {
ccd0a0bd
MS
2176 WARN_ON(!list_empty(&fc->io));
2177 WARN_ON(fc->fasync != NULL);
2178 fuse_abort_conn(fc);
bafa9654 2179 fuse_conn_put(fc);
385a17bf 2180 }
f543f253 2181
334f485d
MS
2182 return 0;
2183}
08cbf542 2184EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2185
385a17bf
JD
2186static int fuse_dev_fasync(int fd, struct file *file, int on)
2187{
2188 struct fuse_conn *fc = fuse_get_conn(file);
2189 if (!fc)
a87046d8 2190 return -EPERM;
385a17bf
JD
2191
2192 /* No locking - fasync_helper does its own locking */
2193 return fasync_helper(fd, file, on, &fc->fasync);
2194}
2195
4b6f5d20 2196const struct file_operations fuse_dev_operations = {
334f485d 2197 .owner = THIS_MODULE,
94e4fe2c 2198 .open = fuse_dev_open,
334f485d 2199 .llseek = no_llseek,
fbdbacca 2200 .read_iter = fuse_dev_read,
c3021629 2201 .splice_read = fuse_dev_splice_read,
fbdbacca 2202 .write_iter = fuse_dev_write,
dd3bb14f 2203 .splice_write = fuse_dev_splice_write,
334f485d
MS
2204 .poll = fuse_dev_poll,
2205 .release = fuse_dev_release,
385a17bf 2206 .fasync = fuse_dev_fasync,
334f485d 2207};
08cbf542 2208EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2209
2210static struct miscdevice fuse_miscdevice = {
2211 .minor = FUSE_MINOR,
2212 .name = "fuse",
2213 .fops = &fuse_dev_operations,
2214};
2215
2216int __init fuse_dev_init(void)
2217{
2218 int err = -ENOMEM;
2219 fuse_req_cachep = kmem_cache_create("fuse_request",
2220 sizeof(struct fuse_req),
20c2df83 2221 0, 0, NULL);
334f485d
MS
2222 if (!fuse_req_cachep)
2223 goto out;
2224
2225 err = misc_register(&fuse_miscdevice);
2226 if (err)
2227 goto out_cache_clean;
2228
2229 return 0;
2230
2231 out_cache_clean:
2232 kmem_cache_destroy(fuse_req_cachep);
2233 out:
2234 return err;
2235}
2236
2237void fuse_dev_cleanup(void)
2238{
2239 misc_deregister(&fuse_miscdevice);
2240 kmem_cache_destroy(fuse_req_cachep);
2241}
This page took 1.084545 seconds and 4 git commands to generate.