]> Git Repo - linux.git/blame - fs/fuse/dev.c
fuse: request_end(): do once
[linux.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <[email protected]>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
dd3bb14f 19#include <linux/pipe_fs_i.h>
ce534fb0
MS
20#include <linux/swap.h>
21#include <linux/splice.h>
334f485d
MS
22
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 24MODULE_ALIAS("devname:fuse");
334f485d 25
e18b890b 26static struct kmem_cache *fuse_req_cachep;
334f485d 27
8bfc016d 28static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 29{
0720b315
MS
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
34 return file->private_data;
334f485d
MS
35}
36
4250c066 37static void fuse_request_init(struct fuse_req *req, struct page **pages,
b2430d75 38 struct fuse_page_desc *page_descs,
4250c066 39 unsigned npages)
334f485d
MS
40{
41 memset(req, 0, sizeof(*req));
4250c066 42 memset(pages, 0, sizeof(*pages) * npages);
b2430d75 43 memset(page_descs, 0, sizeof(*page_descs) * npages);
334f485d 44 INIT_LIST_HEAD(&req->list);
a4d27e75 45 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
46 init_waitqueue_head(&req->waitq);
47 atomic_set(&req->count, 1);
4250c066 48 req->pages = pages;
b2430d75 49 req->page_descs = page_descs;
4250c066 50 req->max_pages = npages;
33e14b4d 51 __set_bit(FR_PENDING, &req->flags);
334f485d
MS
52}
53
4250c066 54static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
334f485d 55{
4250c066
MP
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 if (req) {
58 struct page **pages;
b2430d75 59 struct fuse_page_desc *page_descs;
4250c066 60
b2430d75 61 if (npages <= FUSE_REQ_INLINE_PAGES) {
4250c066 62 pages = req->inline_pages;
b2430d75
MP
63 page_descs = req->inline_page_descs;
64 } else {
4250c066 65 pages = kmalloc(sizeof(struct page *) * npages, flags);
b2430d75
MP
66 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 npages, flags);
68 }
4250c066 69
b2430d75
MP
70 if (!pages || !page_descs) {
71 kfree(pages);
72 kfree(page_descs);
4250c066
MP
73 kmem_cache_free(fuse_req_cachep, req);
74 return NULL;
75 }
76
b2430d75 77 fuse_request_init(req, pages, page_descs, npages);
4250c066 78 }
334f485d
MS
79 return req;
80}
4250c066
MP
81
82struct fuse_req *fuse_request_alloc(unsigned npages)
83{
84 return __fuse_request_alloc(npages, GFP_KERNEL);
85}
08cbf542 86EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 87
4250c066 88struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
3be5a52b 89{
4250c066 90 return __fuse_request_alloc(npages, GFP_NOFS);
3be5a52b
MS
91}
92
334f485d
MS
93void fuse_request_free(struct fuse_req *req)
94{
b2430d75 95 if (req->pages != req->inline_pages) {
4250c066 96 kfree(req->pages);
b2430d75
MP
97 kfree(req->page_descs);
98 }
334f485d
MS
99 kmem_cache_free(fuse_req_cachep, req);
100}
101
8bfc016d 102static void block_sigs(sigset_t *oldset)
334f485d
MS
103{
104 sigset_t mask;
105
106 siginitsetinv(&mask, sigmask(SIGKILL));
107 sigprocmask(SIG_BLOCK, &mask, oldset);
108}
109
8bfc016d 110static void restore_sigs(sigset_t *oldset)
334f485d
MS
111{
112 sigprocmask(SIG_SETMASK, oldset, NULL);
113}
114
36cf66ed 115void __fuse_get_request(struct fuse_req *req)
334f485d
MS
116{
117 atomic_inc(&req->count);
118}
119
120/* Must be called with > 1 refcount */
121static void __fuse_put_request(struct fuse_req *req)
122{
123 BUG_ON(atomic_read(&req->count) < 2);
124 atomic_dec(&req->count);
125}
126
33649c91
MS
127static void fuse_req_init_context(struct fuse_req *req)
128{
499dcf20
EB
129 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
33649c91
MS
131 req->in.h.pid = current->pid;
132}
133
9759bd51
MS
134void fuse_set_initialized(struct fuse_conn *fc)
135{
136 /* Make sure stores before this are seen on another CPU */
137 smp_wmb();
138 fc->initialized = 1;
139}
140
0aada884
MP
141static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
142{
143 return !fc->initialized || (for_background && fc->blocked);
144}
145
8b41e671
MP
146static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
147 bool for_background)
334f485d 148{
08a53cdc 149 struct fuse_req *req;
08a53cdc 150 int err;
9bc5ddda 151 atomic_inc(&fc->num_waiting);
0aada884
MP
152
153 if (fuse_block_alloc(fc, for_background)) {
154 sigset_t oldset;
155 int intr;
156
157 block_sigs(&oldset);
722d2bea 158 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
0aada884
MP
159 !fuse_block_alloc(fc, for_background));
160 restore_sigs(&oldset);
161 err = -EINTR;
162 if (intr)
163 goto out;
164 }
9759bd51
MS
165 /* Matches smp_wmb() in fuse_set_initialized() */
166 smp_rmb();
08a53cdc 167
51eb01e7
MS
168 err = -ENOTCONN;
169 if (!fc->connected)
170 goto out;
171
de155226
MS
172 err = -ECONNREFUSED;
173 if (fc->conn_error)
174 goto out;
175
b111c8c0 176 req = fuse_request_alloc(npages);
9bc5ddda 177 err = -ENOMEM;
722d2bea
MP
178 if (!req) {
179 if (for_background)
180 wake_up(&fc->blocked_waitq);
9bc5ddda 181 goto out;
722d2bea 182 }
334f485d 183
33649c91 184 fuse_req_init_context(req);
825d6d33
MS
185 __set_bit(FR_WAITING, &req->flags);
186 if (for_background)
187 __set_bit(FR_BACKGROUND, &req->flags);
188
334f485d 189 return req;
9bc5ddda
MS
190
191 out:
192 atomic_dec(&fc->num_waiting);
193 return ERR_PTR(err);
334f485d 194}
8b41e671
MP
195
196struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
197{
198 return __fuse_get_req(fc, npages, false);
199}
08cbf542 200EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 201
8b41e671
MP
202struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
203 unsigned npages)
204{
205 return __fuse_get_req(fc, npages, true);
206}
207EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
208
33649c91
MS
209/*
210 * Return request in fuse_file->reserved_req. However that may
211 * currently be in use. If that is the case, wait for it to become
212 * available.
213 */
214static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
215 struct file *file)
216{
217 struct fuse_req *req = NULL;
218 struct fuse_file *ff = file->private_data;
219
220 do {
de5e3dec 221 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
222 spin_lock(&fc->lock);
223 if (ff->reserved_req) {
224 req = ff->reserved_req;
225 ff->reserved_req = NULL;
cb0942b8 226 req->stolen_file = get_file(file);
33649c91
MS
227 }
228 spin_unlock(&fc->lock);
229 } while (!req);
230
231 return req;
232}
233
234/*
235 * Put stolen request back into fuse_file->reserved_req
236 */
237static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
238{
239 struct file *file = req->stolen_file;
240 struct fuse_file *ff = file->private_data;
241
242 spin_lock(&fc->lock);
b2430d75 243 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
33649c91
MS
244 BUG_ON(ff->reserved_req);
245 ff->reserved_req = req;
de5e3dec 246 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
247 spin_unlock(&fc->lock);
248 fput(file);
249}
250
251/*
252 * Gets a requests for a file operation, always succeeds
253 *
254 * This is used for sending the FLUSH request, which must get to
255 * userspace, due to POSIX locks which may need to be unlocked.
256 *
257 * If allocation fails due to OOM, use the reserved request in
258 * fuse_file.
259 *
260 * This is very unlikely to deadlock accidentally, since the
261 * filesystem should not have it's own file open. If deadlock is
262 * intentional, it can still be broken by "aborting" the filesystem.
263 */
b111c8c0
MP
264struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
265 struct file *file)
33649c91
MS
266{
267 struct fuse_req *req;
268
269 atomic_inc(&fc->num_waiting);
0aada884 270 wait_event(fc->blocked_waitq, fc->initialized);
9759bd51
MS
271 /* Matches smp_wmb() in fuse_set_initialized() */
272 smp_rmb();
b111c8c0 273 req = fuse_request_alloc(0);
33649c91
MS
274 if (!req)
275 req = get_reserved_req(fc, file);
276
277 fuse_req_init_context(req);
825d6d33
MS
278 __set_bit(FR_WAITING, &req->flags);
279 __clear_bit(FR_BACKGROUND, &req->flags);
33649c91
MS
280 return req;
281}
282
334f485d 283void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
284{
285 if (atomic_dec_and_test(&req->count)) {
825d6d33 286 if (test_bit(FR_BACKGROUND, &req->flags)) {
722d2bea
MP
287 /*
288 * We get here in the unlikely case that a background
289 * request was allocated but not sent
290 */
291 spin_lock(&fc->lock);
292 if (!fc->blocked)
293 wake_up(&fc->blocked_waitq);
294 spin_unlock(&fc->lock);
295 }
296
825d6d33
MS
297 if (test_bit(FR_WAITING, &req->flags)) {
298 __clear_bit(FR_WAITING, &req->flags);
9bc5ddda 299 atomic_dec(&fc->num_waiting);
73e0e738 300 }
33649c91
MS
301
302 if (req->stolen_file)
303 put_reserved_req(fc, req);
304 else
305 fuse_request_free(req);
7128ec2a
MS
306 }
307}
08cbf542 308EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 309
d12def1b
MS
310static unsigned len_args(unsigned numargs, struct fuse_arg *args)
311{
312 unsigned nbytes = 0;
313 unsigned i;
314
315 for (i = 0; i < numargs; i++)
316 nbytes += args[i].size;
317
318 return nbytes;
319}
320
f88996a9 321static u64 fuse_get_unique(struct fuse_iqueue *fiq)
d12def1b 322{
f88996a9 323 return ++fiq->reqctr;
d12def1b
MS
324}
325
f88996a9 326static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
d12def1b 327{
d12def1b
MS
328 req->in.h.len = sizeof(struct fuse_in_header) +
329 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
f88996a9 330 list_add_tail(&req->list, &fiq->pending);
4ce60812 331 wake_up_locked(&fiq->waitq);
f88996a9 332 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
d12def1b
MS
333}
334
07e77dca
MS
335void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
336 u64 nodeid, u64 nlookup)
337{
f88996a9
MS
338 struct fuse_iqueue *fiq = &fc->iq;
339
02c048b9
MS
340 forget->forget_one.nodeid = nodeid;
341 forget->forget_one.nlookup = nlookup;
07e77dca 342
4ce60812 343 spin_lock(&fiq->waitq.lock);
e16714d8 344 if (fiq->connected) {
f88996a9
MS
345 fiq->forget_list_tail->next = forget;
346 fiq->forget_list_tail = forget;
4ce60812 347 wake_up_locked(&fiq->waitq);
f88996a9 348 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5dfcc87f
MS
349 } else {
350 kfree(forget);
351 }
4ce60812 352 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
353}
354
d12def1b
MS
355static void flush_bg_queue(struct fuse_conn *fc)
356{
7a6d3c8b 357 while (fc->active_background < fc->max_background &&
d12def1b
MS
358 !list_empty(&fc->bg_queue)) {
359 struct fuse_req *req;
f88996a9 360 struct fuse_iqueue *fiq = &fc->iq;
d12def1b
MS
361
362 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
363 list_del(&req->list);
364 fc->active_background++;
4ce60812 365 spin_lock(&fiq->waitq.lock);
f88996a9
MS
366 req->in.h.unique = fuse_get_unique(fiq);
367 queue_request(fiq, req);
4ce60812 368 spin_unlock(&fiq->waitq.lock);
d12def1b
MS
369 }
370}
371
334f485d
MS
372/*
373 * This function is called when a request is finished. Either a reply
f9a2842e 374 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 375 * occurred during communication with userspace, or the device file
51eb01e7
MS
376 * was closed. The requester thread is woken up (if still waiting),
377 * the 'end' callback is called if given, else the reference to the
378 * request is released
7128ec2a 379 *
d7133114 380 * Called with fc->lock, unlocks it
334f485d
MS
381 */
382static void request_end(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2 383__releases(fc->lock)
334f485d 384{
4ce60812 385 struct fuse_iqueue *fiq = &fc->iq;
51eb01e7 386 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
365ae710
MS
387
388 if (test_and_set_bit(FR_FINISHED, &req->flags)) {
389 spin_unlock(&fc->lock);
390 return;
391 }
392
51eb01e7 393 req->end = NULL;
4ce60812 394 spin_lock(&fiq->waitq.lock);
0d8e84b0 395 list_del_init(&req->intr_entry);
4ce60812 396 spin_unlock(&fiq->waitq.lock);
33e14b4d
MS
397 WARN_ON(test_bit(FR_PENDING, &req->flags));
398 WARN_ON(test_bit(FR_SENT, &req->flags));
825d6d33
MS
399 if (test_bit(FR_BACKGROUND, &req->flags)) {
400 clear_bit(FR_BACKGROUND, &req->flags);
722d2bea 401 if (fc->num_background == fc->max_background)
51eb01e7 402 fc->blocked = 0;
722d2bea
MP
403
404 /* Wake up next waiter, if any */
3c18ef81 405 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
722d2bea
MP
406 wake_up(&fc->blocked_waitq);
407
7a6d3c8b 408 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 409 fc->connected && fc->bdi_initialized) {
8aa7e847
JA
410 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
411 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
f92b99b9 412 }
51eb01e7 413 fc->num_background--;
d12def1b
MS
414 fc->active_background--;
415 flush_bg_queue(fc);
334f485d 416 }
51eb01e7 417 spin_unlock(&fc->lock);
51eb01e7
MS
418 wake_up(&req->waitq);
419 if (end)
420 end(fc, req);
e9bb09dd 421 fuse_put_request(fc, req);
334f485d
MS
422}
423
f88996a9 424static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
a4d27e75 425{
4ce60812 426 spin_lock(&fiq->waitq.lock);
8f7bb368
MS
427 if (list_empty(&req->intr_entry)) {
428 list_add_tail(&req->intr_entry, &fiq->interrupts);
429 wake_up_locked(&fiq->waitq);
430 }
4ce60812 431 spin_unlock(&fiq->waitq.lock);
f88996a9 432 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
a4d27e75
MS
433}
434
7c352bdf 435static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
334f485d 436{
4ce60812 437 struct fuse_iqueue *fiq = &fc->iq;
c4775267
MS
438 int err;
439
a4d27e75
MS
440 if (!fc->no_interrupt) {
441 /* Any signal may interrupt this */
c4775267 442 err = wait_event_interruptible(req->waitq,
33e14b4d 443 test_bit(FR_FINISHED, &req->flags));
c4775267 444 if (!err)
a4d27e75
MS
445 return;
446
825d6d33 447 set_bit(FR_INTERRUPTED, &req->flags);
8f7bb368
MS
448 /* matches barrier in fuse_dev_do_read() */
449 smp_mb__after_atomic();
33e14b4d 450 if (test_bit(FR_SENT, &req->flags))
4ce60812 451 queue_interrupt(fiq, req);
a4d27e75
MS
452 }
453
825d6d33 454 if (!test_bit(FR_FORCE, &req->flags)) {
a4d27e75
MS
455 sigset_t oldset;
456
457 /* Only fatal signals may interrupt this */
51eb01e7 458 block_sigs(&oldset);
c4775267 459 err = wait_event_interruptible(req->waitq,
33e14b4d 460 test_bit(FR_FINISHED, &req->flags));
51eb01e7 461 restore_sigs(&oldset);
a131de0a 462
c4775267 463 if (!err)
a131de0a
MS
464 return;
465
4ce60812 466 spin_lock(&fiq->waitq.lock);
a131de0a 467 /* Request is not yet in userspace, bail out */
33e14b4d 468 if (test_bit(FR_PENDING, &req->flags)) {
a131de0a 469 list_del(&req->list);
4ce60812 470 spin_unlock(&fiq->waitq.lock);
a131de0a
MS
471 __fuse_put_request(req);
472 req->out.h.error = -EINTR;
473 return;
474 }
4ce60812 475 spin_unlock(&fiq->waitq.lock);
51eb01e7 476 }
334f485d 477
a131de0a
MS
478 /*
479 * Either request is already in userspace, or it was forced.
480 * Wait it out.
481 */
33e14b4d 482 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
334f485d
MS
483}
484
6a4e922c 485static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d 486{
e16714d8
MS
487 struct fuse_iqueue *fiq = &fc->iq;
488
825d6d33 489 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
4ce60812 490 spin_lock(&fiq->waitq.lock);
e16714d8 491 if (!fiq->connected) {
4ce60812 492 spin_unlock(&fiq->waitq.lock);
334f485d 493 req->out.h.error = -ENOTCONN;
c4775267 494 } else {
f88996a9
MS
495 req->in.h.unique = fuse_get_unique(fiq);
496 queue_request(fiq, req);
334f485d
MS
497 /* acquire extra reference, since request is still needed
498 after request_end() */
499 __fuse_get_request(req);
4ce60812 500 spin_unlock(&fiq->waitq.lock);
334f485d 501
7c352bdf 502 request_wait_answer(fc, req);
c4775267
MS
503 /* Pairs with smp_wmb() in request_end() */
504 smp_rmb();
334f485d 505 }
334f485d 506}
6a4e922c
EW
507
508void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
509{
825d6d33
MS
510 __set_bit(FR_ISREPLY, &req->flags);
511 if (!test_bit(FR_WAITING, &req->flags)) {
512 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
513 atomic_inc(&fc->num_waiting);
514 }
6a4e922c
EW
515 __fuse_request_send(fc, req);
516}
08cbf542 517EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 518
21f62174
MS
519static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
520{
521 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
522 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
523
524 if (fc->minor < 9) {
525 switch (args->in.h.opcode) {
526 case FUSE_LOOKUP:
527 case FUSE_CREATE:
528 case FUSE_MKNOD:
529 case FUSE_MKDIR:
530 case FUSE_SYMLINK:
531 case FUSE_LINK:
532 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
533 break;
534 case FUSE_GETATTR:
535 case FUSE_SETATTR:
536 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
537 break;
538 }
539 }
540 if (fc->minor < 12) {
541 switch (args->in.h.opcode) {
542 case FUSE_CREATE:
543 args->in.args[0].size = sizeof(struct fuse_open_in);
544 break;
545 case FUSE_MKNOD:
546 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
547 break;
548 }
549 }
550}
551
7078187a
MS
552ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
553{
554 struct fuse_req *req;
555 ssize_t ret;
556
557 req = fuse_get_req(fc, 0);
558 if (IS_ERR(req))
559 return PTR_ERR(req);
560
21f62174
MS
561 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
562 fuse_adjust_compat(fc, args);
563
7078187a
MS
564 req->in.h.opcode = args->in.h.opcode;
565 req->in.h.nodeid = args->in.h.nodeid;
566 req->in.numargs = args->in.numargs;
567 memcpy(req->in.args, args->in.args,
568 args->in.numargs * sizeof(struct fuse_in_arg));
569 req->out.argvar = args->out.argvar;
570 req->out.numargs = args->out.numargs;
571 memcpy(req->out.args, args->out.args,
572 args->out.numargs * sizeof(struct fuse_arg));
573 fuse_request_send(fc, req);
574 ret = req->out.h.error;
575 if (!ret && args->out.argvar) {
576 BUG_ON(args->out.numargs != 1);
577 ret = req->out.args[0].size;
578 }
579 fuse_put_request(fc, req);
580
581 return ret;
582}
583
f0139aa8
MS
584/*
585 * Called under fc->lock
586 *
587 * fc->connected must have been checked previously
588 */
589void fuse_request_send_background_locked(struct fuse_conn *fc,
590 struct fuse_req *req)
d12def1b 591{
825d6d33
MS
592 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
593 if (!test_bit(FR_WAITING, &req->flags)) {
594 __set_bit(FR_WAITING, &req->flags);
5437f241
MS
595 atomic_inc(&fc->num_waiting);
596 }
825d6d33 597 __set_bit(FR_ISREPLY, &req->flags);
d12def1b 598 fc->num_background++;
7a6d3c8b 599 if (fc->num_background == fc->max_background)
d12def1b 600 fc->blocked = 1;
7a6d3c8b 601 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 602 fc->bdi_initialized) {
8aa7e847
JA
603 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
604 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
d12def1b
MS
605 }
606 list_add_tail(&req->list, &fc->bg_queue);
607 flush_bg_queue(fc);
608}
609
f0139aa8 610void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d 611{
42dc6211 612 BUG_ON(!req->end);
d7133114 613 spin_lock(&fc->lock);
1e9a4ed9 614 if (fc->connected) {
f0139aa8 615 fuse_request_send_background_locked(fc, req);
d7133114 616 spin_unlock(&fc->lock);
334f485d 617 } else {
42dc6211 618 spin_unlock(&fc->lock);
334f485d 619 req->out.h.error = -ENOTCONN;
42dc6211
MS
620 req->end(fc, req);
621 fuse_put_request(fc, req);
334f485d
MS
622 }
623}
08cbf542 624EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 625
2d45ba38
MS
626static int fuse_request_send_notify_reply(struct fuse_conn *fc,
627 struct fuse_req *req, u64 unique)
628{
629 int err = -ENODEV;
f88996a9 630 struct fuse_iqueue *fiq = &fc->iq;
2d45ba38 631
825d6d33 632 __clear_bit(FR_ISREPLY, &req->flags);
2d45ba38 633 req->in.h.unique = unique;
4ce60812 634 spin_lock(&fiq->waitq.lock);
e16714d8 635 if (fiq->connected) {
f88996a9 636 queue_request(fiq, req);
2d45ba38
MS
637 err = 0;
638 }
4ce60812 639 spin_unlock(&fiq->waitq.lock);
2d45ba38
MS
640
641 return err;
642}
643
0b05b183
AA
644void fuse_force_forget(struct file *file, u64 nodeid)
645{
6131ffaa 646 struct inode *inode = file_inode(file);
0b05b183
AA
647 struct fuse_conn *fc = get_fuse_conn(inode);
648 struct fuse_req *req;
649 struct fuse_forget_in inarg;
650
651 memset(&inarg, 0, sizeof(inarg));
652 inarg.nlookup = 1;
b111c8c0 653 req = fuse_get_req_nofail_nopages(fc, file);
0b05b183
AA
654 req->in.h.opcode = FUSE_FORGET;
655 req->in.h.nodeid = nodeid;
656 req->in.numargs = 1;
657 req->in.args[0].size = sizeof(inarg);
658 req->in.args[0].value = &inarg;
825d6d33 659 __clear_bit(FR_ISREPLY, &req->flags);
6a4e922c
EW
660 __fuse_request_send(fc, req);
661 /* ignore errors */
662 fuse_put_request(fc, req);
0b05b183
AA
663}
664
334f485d
MS
665/*
666 * Lock the request. Up to the next unlock_request() there mustn't be
667 * anything that could cause a page-fault. If the request was already
f9a2842e 668 * aborted bail out.
334f485d 669 */
dc00809a 670static int lock_request(struct fuse_req *req)
334f485d
MS
671{
672 int err = 0;
673 if (req) {
dc00809a 674 spin_lock(&req->waitq.lock);
825d6d33 675 if (test_bit(FR_ABORTED, &req->flags))
334f485d
MS
676 err = -ENOENT;
677 else
825d6d33 678 set_bit(FR_LOCKED, &req->flags);
dc00809a 679 spin_unlock(&req->waitq.lock);
334f485d
MS
680 }
681 return err;
682}
683
684/*
0d8e84b0
MS
685 * Unlock request. If it was aborted while locked, caller is responsible
686 * for unlocking and ending the request.
334f485d 687 */
dc00809a 688static int unlock_request(struct fuse_req *req)
334f485d 689{
0d8e84b0 690 int err = 0;
334f485d 691 if (req) {
dc00809a 692 spin_lock(&req->waitq.lock);
825d6d33 693 if (test_bit(FR_ABORTED, &req->flags))
0d8e84b0
MS
694 err = -ENOENT;
695 else
825d6d33 696 clear_bit(FR_LOCKED, &req->flags);
dc00809a 697 spin_unlock(&req->waitq.lock);
334f485d 698 }
0d8e84b0 699 return err;
334f485d
MS
700}
701
702struct fuse_copy_state {
703 int write;
704 struct fuse_req *req;
6c09e94a 705 struct iov_iter *iter;
dd3bb14f
MS
706 struct pipe_buffer *pipebufs;
707 struct pipe_buffer *currbuf;
708 struct pipe_inode_info *pipe;
334f485d 709 unsigned long nr_segs;
334f485d 710 struct page *pg;
334f485d 711 unsigned len;
c55a01d3 712 unsigned offset;
ce534fb0 713 unsigned move_pages:1;
334f485d
MS
714};
715
dc00809a 716static void fuse_copy_init(struct fuse_copy_state *cs, int write,
6c09e94a 717 struct iov_iter *iter)
334f485d
MS
718{
719 memset(cs, 0, sizeof(*cs));
720 cs->write = write;
6c09e94a 721 cs->iter = iter;
334f485d
MS
722}
723
724/* Unmap and put previous page of userspace buffer */
8bfc016d 725static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 726{
dd3bb14f
MS
727 if (cs->currbuf) {
728 struct pipe_buffer *buf = cs->currbuf;
729
c55a01d3 730 if (cs->write)
c3021629 731 buf->len = PAGE_SIZE - cs->len;
dd3bb14f 732 cs->currbuf = NULL;
c55a01d3 733 } else if (cs->pg) {
334f485d
MS
734 if (cs->write) {
735 flush_dcache_page(cs->pg);
736 set_page_dirty_lock(cs->pg);
737 }
738 put_page(cs->pg);
334f485d 739 }
c55a01d3 740 cs->pg = NULL;
334f485d
MS
741}
742
743/*
744 * Get another pagefull of userspace buffer, and map it to kernel
745 * address space, and lock request
746 */
747static int fuse_copy_fill(struct fuse_copy_state *cs)
748{
c55a01d3 749 struct page *page;
334f485d
MS
750 int err;
751
dc00809a 752 err = unlock_request(cs->req);
0d8e84b0
MS
753 if (err)
754 return err;
755
334f485d 756 fuse_copy_finish(cs);
dd3bb14f
MS
757 if (cs->pipebufs) {
758 struct pipe_buffer *buf = cs->pipebufs;
759
c3021629
MS
760 if (!cs->write) {
761 err = buf->ops->confirm(cs->pipe, buf);
762 if (err)
763 return err;
764
765 BUG_ON(!cs->nr_segs);
766 cs->currbuf = buf;
c55a01d3
MS
767 cs->pg = buf->page;
768 cs->offset = buf->offset;
c3021629 769 cs->len = buf->len;
c3021629
MS
770 cs->pipebufs++;
771 cs->nr_segs--;
772 } else {
c3021629
MS
773 if (cs->nr_segs == cs->pipe->buffers)
774 return -EIO;
775
776 page = alloc_page(GFP_HIGHUSER);
777 if (!page)
778 return -ENOMEM;
779
780 buf->page = page;
781 buf->offset = 0;
782 buf->len = 0;
783
784 cs->currbuf = buf;
c55a01d3
MS
785 cs->pg = page;
786 cs->offset = 0;
c3021629
MS
787 cs->len = PAGE_SIZE;
788 cs->pipebufs++;
789 cs->nr_segs++;
790 }
dd3bb14f 791 } else {
6c09e94a
AV
792 size_t off;
793 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
dd3bb14f
MS
794 if (err < 0)
795 return err;
6c09e94a
AV
796 BUG_ON(!err);
797 cs->len = err;
798 cs->offset = off;
c55a01d3 799 cs->pg = page;
6c09e94a
AV
800 cs->offset = off;
801 iov_iter_advance(cs->iter, err);
334f485d 802 }
334f485d 803
dc00809a 804 return lock_request(cs->req);
334f485d
MS
805}
806
807/* Do as much copy to/from userspace buffer as we can */
8bfc016d 808static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
809{
810 unsigned ncpy = min(*size, cs->len);
811 if (val) {
c55a01d3
MS
812 void *pgaddr = kmap_atomic(cs->pg);
813 void *buf = pgaddr + cs->offset;
814
334f485d 815 if (cs->write)
c55a01d3 816 memcpy(buf, *val, ncpy);
334f485d 817 else
c55a01d3
MS
818 memcpy(*val, buf, ncpy);
819
820 kunmap_atomic(pgaddr);
334f485d
MS
821 *val += ncpy;
822 }
823 *size -= ncpy;
824 cs->len -= ncpy;
c55a01d3 825 cs->offset += ncpy;
334f485d
MS
826 return ncpy;
827}
828
ce534fb0
MS
829static int fuse_check_page(struct page *page)
830{
831 if (page_mapcount(page) ||
832 page->mapping != NULL ||
833 page_count(page) != 1 ||
834 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
835 ~(1 << PG_locked |
836 1 << PG_referenced |
837 1 << PG_uptodate |
838 1 << PG_lru |
839 1 << PG_active |
840 1 << PG_reclaim))) {
841 printk(KERN_WARNING "fuse: trying to steal weird page\n");
842 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
843 return 1;
844 }
845 return 0;
846}
847
848static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
849{
850 int err;
851 struct page *oldpage = *pagep;
852 struct page *newpage;
853 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0 854
dc00809a 855 err = unlock_request(cs->req);
0d8e84b0
MS
856 if (err)
857 return err;
858
ce534fb0
MS
859 fuse_copy_finish(cs);
860
861 err = buf->ops->confirm(cs->pipe, buf);
862 if (err)
863 return err;
864
865 BUG_ON(!cs->nr_segs);
866 cs->currbuf = buf;
867 cs->len = buf->len;
868 cs->pipebufs++;
869 cs->nr_segs--;
870
871 if (cs->len != PAGE_SIZE)
872 goto out_fallback;
873
874 if (buf->ops->steal(cs->pipe, buf) != 0)
875 goto out_fallback;
876
877 newpage = buf->page;
878
aa991b3b
MS
879 if (!PageUptodate(newpage))
880 SetPageUptodate(newpage);
ce534fb0
MS
881
882 ClearPageMappedToDisk(newpage);
883
884 if (fuse_check_page(newpage) != 0)
885 goto out_fallback_unlock;
886
ce534fb0
MS
887 /*
888 * This is a new and locked page, it shouldn't be mapped or
889 * have any special flags on it
890 */
891 if (WARN_ON(page_mapped(oldpage)))
892 goto out_fallback_unlock;
893 if (WARN_ON(page_has_private(oldpage)))
894 goto out_fallback_unlock;
895 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
896 goto out_fallback_unlock;
897 if (WARN_ON(PageMlocked(oldpage)))
898 goto out_fallback_unlock;
899
ef6a3c63 900 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 901 if (err) {
ef6a3c63
MS
902 unlock_page(newpage);
903 return err;
ce534fb0 904 }
ef6a3c63 905
ce534fb0
MS
906 page_cache_get(newpage);
907
908 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
909 lru_cache_add_file(newpage);
910
911 err = 0;
dc00809a 912 spin_lock(&cs->req->waitq.lock);
825d6d33 913 if (test_bit(FR_ABORTED, &cs->req->flags))
ce534fb0
MS
914 err = -ENOENT;
915 else
916 *pagep = newpage;
dc00809a 917 spin_unlock(&cs->req->waitq.lock);
ce534fb0
MS
918
919 if (err) {
920 unlock_page(newpage);
921 page_cache_release(newpage);
922 return err;
923 }
924
925 unlock_page(oldpage);
926 page_cache_release(oldpage);
927 cs->len = 0;
928
929 return 0;
930
931out_fallback_unlock:
932 unlock_page(newpage);
933out_fallback:
c55a01d3
MS
934 cs->pg = buf->page;
935 cs->offset = buf->offset;
ce534fb0 936
dc00809a 937 err = lock_request(cs->req);
ce534fb0
MS
938 if (err)
939 return err;
940
941 return 1;
942}
943
c3021629
MS
944static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
945 unsigned offset, unsigned count)
946{
947 struct pipe_buffer *buf;
0d8e84b0 948 int err;
c3021629
MS
949
950 if (cs->nr_segs == cs->pipe->buffers)
951 return -EIO;
952
dc00809a 953 err = unlock_request(cs->req);
0d8e84b0
MS
954 if (err)
955 return err;
956
c3021629
MS
957 fuse_copy_finish(cs);
958
959 buf = cs->pipebufs;
960 page_cache_get(page);
961 buf->page = page;
962 buf->offset = offset;
963 buf->len = count;
964
965 cs->pipebufs++;
966 cs->nr_segs++;
967 cs->len = 0;
968
969 return 0;
970}
971
334f485d
MS
972/*
973 * Copy a page in the request to/from the userspace buffer. Must be
974 * done atomically
975 */
ce534fb0 976static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 977 unsigned offset, unsigned count, int zeroing)
334f485d 978{
ce534fb0
MS
979 int err;
980 struct page *page = *pagep;
981
b6777c40
MS
982 if (page && zeroing && count < PAGE_SIZE)
983 clear_highpage(page);
984
334f485d 985 while (count) {
c3021629
MS
986 if (cs->write && cs->pipebufs && page) {
987 return fuse_ref_page(cs, page, offset, count);
988 } else if (!cs->len) {
ce534fb0
MS
989 if (cs->move_pages && page &&
990 offset == 0 && count == PAGE_SIZE) {
991 err = fuse_try_move_page(cs, pagep);
992 if (err <= 0)
993 return err;
994 } else {
995 err = fuse_copy_fill(cs);
996 if (err)
997 return err;
998 }
1729a16c 999 }
334f485d 1000 if (page) {
2408f6ef 1001 void *mapaddr = kmap_atomic(page);
334f485d
MS
1002 void *buf = mapaddr + offset;
1003 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 1004 kunmap_atomic(mapaddr);
334f485d
MS
1005 } else
1006 offset += fuse_copy_do(cs, NULL, &count);
1007 }
1008 if (page && !cs->write)
1009 flush_dcache_page(page);
1010 return 0;
1011}
1012
1013/* Copy pages in the request to/from userspace buffer */
1014static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1015 int zeroing)
1016{
1017 unsigned i;
1018 struct fuse_req *req = cs->req;
334f485d
MS
1019
1020 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0 1021 int err;
85f40aec
MP
1022 unsigned offset = req->page_descs[i].offset;
1023 unsigned count = min(nbytes, req->page_descs[i].length);
ce534fb0
MS
1024
1025 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1026 zeroing);
334f485d
MS
1027 if (err)
1028 return err;
1029
1030 nbytes -= count;
334f485d
MS
1031 }
1032 return 0;
1033}
1034
1035/* Copy a single argument in the request to/from userspace buffer */
1036static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1037{
1038 while (size) {
1729a16c
MS
1039 if (!cs->len) {
1040 int err = fuse_copy_fill(cs);
1041 if (err)
1042 return err;
1043 }
334f485d
MS
1044 fuse_copy_do(cs, &val, &size);
1045 }
1046 return 0;
1047}
1048
1049/* Copy request arguments to/from userspace buffer */
1050static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1051 unsigned argpages, struct fuse_arg *args,
1052 int zeroing)
1053{
1054 int err = 0;
1055 unsigned i;
1056
1057 for (i = 0; !err && i < numargs; i++) {
1058 struct fuse_arg *arg = &args[i];
1059 if (i == numargs - 1 && argpages)
1060 err = fuse_copy_pages(cs, arg->size, zeroing);
1061 else
1062 err = fuse_copy_one(cs, arg->value, arg->size);
1063 }
1064 return err;
1065}
1066
f88996a9 1067static int forget_pending(struct fuse_iqueue *fiq)
07e77dca 1068{
f88996a9 1069 return fiq->forget_list_head.next != NULL;
07e77dca
MS
1070}
1071
f88996a9 1072static int request_pending(struct fuse_iqueue *fiq)
a4d27e75 1073{
f88996a9
MS
1074 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1075 forget_pending(fiq);
a4d27e75
MS
1076}
1077
a4d27e75
MS
1078/*
1079 * Transfer an interrupt request to userspace
1080 *
1081 * Unlike other requests this is assembled on demand, without a need
1082 * to allocate a separate fuse_req structure.
1083 *
fd22d62e 1084 * Called with fiq->waitq.lock held, releases it
a4d27e75 1085 */
fd22d62e
MS
1086static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1087 struct fuse_copy_state *cs,
c3021629 1088 size_t nbytes, struct fuse_req *req)
fd22d62e 1089__releases(fiq->waitq.lock)
a4d27e75 1090{
a4d27e75
MS
1091 struct fuse_in_header ih;
1092 struct fuse_interrupt_in arg;
1093 unsigned reqsize = sizeof(ih) + sizeof(arg);
1094 int err;
1095
1096 list_del_init(&req->intr_entry);
4ce60812 1097 req->intr_unique = fuse_get_unique(fiq);
a4d27e75
MS
1098 memset(&ih, 0, sizeof(ih));
1099 memset(&arg, 0, sizeof(arg));
1100 ih.len = reqsize;
1101 ih.opcode = FUSE_INTERRUPT;
1102 ih.unique = req->intr_unique;
1103 arg.unique = req->in.h.unique;
1104
4ce60812 1105 spin_unlock(&fiq->waitq.lock);
c3021629 1106 if (nbytes < reqsize)
a4d27e75
MS
1107 return -EINVAL;
1108
c3021629 1109 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 1110 if (!err)
c3021629
MS
1111 err = fuse_copy_one(cs, &arg, sizeof(arg));
1112 fuse_copy_finish(cs);
a4d27e75
MS
1113
1114 return err ? err : reqsize;
1115}
1116
f88996a9 1117static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
02c048b9
MS
1118 unsigned max,
1119 unsigned *countp)
07e77dca 1120{
f88996a9 1121 struct fuse_forget_link *head = fiq->forget_list_head.next;
02c048b9
MS
1122 struct fuse_forget_link **newhead = &head;
1123 unsigned count;
07e77dca 1124
02c048b9
MS
1125 for (count = 0; *newhead != NULL && count < max; count++)
1126 newhead = &(*newhead)->next;
1127
f88996a9 1128 fiq->forget_list_head.next = *newhead;
02c048b9 1129 *newhead = NULL;
f88996a9
MS
1130 if (fiq->forget_list_head.next == NULL)
1131 fiq->forget_list_tail = &fiq->forget_list_head;
07e77dca 1132
02c048b9
MS
1133 if (countp != NULL)
1134 *countp = count;
1135
1136 return head;
07e77dca
MS
1137}
1138
fd22d62e 1139static int fuse_read_single_forget(struct fuse_iqueue *fiq,
07e77dca
MS
1140 struct fuse_copy_state *cs,
1141 size_t nbytes)
fd22d62e 1142__releases(fiq->waitq.lock)
07e77dca
MS
1143{
1144 int err;
f88996a9 1145 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
07e77dca 1146 struct fuse_forget_in arg = {
02c048b9 1147 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1148 };
1149 struct fuse_in_header ih = {
1150 .opcode = FUSE_FORGET,
02c048b9 1151 .nodeid = forget->forget_one.nodeid,
f88996a9 1152 .unique = fuse_get_unique(fiq),
07e77dca
MS
1153 .len = sizeof(ih) + sizeof(arg),
1154 };
1155
4ce60812 1156 spin_unlock(&fiq->waitq.lock);
07e77dca
MS
1157 kfree(forget);
1158 if (nbytes < ih.len)
1159 return -EINVAL;
1160
1161 err = fuse_copy_one(cs, &ih, sizeof(ih));
1162 if (!err)
1163 err = fuse_copy_one(cs, &arg, sizeof(arg));
1164 fuse_copy_finish(cs);
1165
1166 if (err)
1167 return err;
1168
1169 return ih.len;
1170}
1171
fd22d62e 1172static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
02c048b9 1173 struct fuse_copy_state *cs, size_t nbytes)
fd22d62e 1174__releases(fiq->waitq.lock)
02c048b9
MS
1175{
1176 int err;
1177 unsigned max_forgets;
1178 unsigned count;
1179 struct fuse_forget_link *head;
1180 struct fuse_batch_forget_in arg = { .count = 0 };
1181 struct fuse_in_header ih = {
1182 .opcode = FUSE_BATCH_FORGET,
f88996a9 1183 .unique = fuse_get_unique(fiq),
02c048b9
MS
1184 .len = sizeof(ih) + sizeof(arg),
1185 };
1186
1187 if (nbytes < ih.len) {
4ce60812 1188 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1189 return -EINVAL;
1190 }
1191
1192 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
f88996a9 1193 head = dequeue_forget(fiq, max_forgets, &count);
4ce60812 1194 spin_unlock(&fiq->waitq.lock);
02c048b9
MS
1195
1196 arg.count = count;
1197 ih.len += count * sizeof(struct fuse_forget_one);
1198 err = fuse_copy_one(cs, &ih, sizeof(ih));
1199 if (!err)
1200 err = fuse_copy_one(cs, &arg, sizeof(arg));
1201
1202 while (head) {
1203 struct fuse_forget_link *forget = head;
1204
1205 if (!err) {
1206 err = fuse_copy_one(cs, &forget->forget_one,
1207 sizeof(forget->forget_one));
1208 }
1209 head = forget->next;
1210 kfree(forget);
1211 }
1212
1213 fuse_copy_finish(cs);
1214
1215 if (err)
1216 return err;
1217
1218 return ih.len;
1219}
1220
fd22d62e
MS
1221static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1222 struct fuse_copy_state *cs,
02c048b9 1223 size_t nbytes)
fd22d62e 1224__releases(fiq->waitq.lock)
02c048b9 1225{
f88996a9 1226 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
fd22d62e 1227 return fuse_read_single_forget(fiq, cs, nbytes);
02c048b9 1228 else
fd22d62e 1229 return fuse_read_batch_forget(fiq, cs, nbytes);
02c048b9
MS
1230}
1231
334f485d
MS
1232/*
1233 * Read a single request into the userspace filesystem's buffer. This
1234 * function waits until a request is available, then removes it from
1235 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1236 * no reply is needed (FORGET) or request has been aborted or there
1237 * was an error during the copying then it's finished by calling
334f485d
MS
1238 * request_end(). Otherwise add it to the processing list, and set
1239 * the 'sent' flag.
1240 */
c3021629
MS
1241static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1242 struct fuse_copy_state *cs, size_t nbytes)
334f485d 1243{
82cbdcd3 1244 ssize_t err;
f88996a9 1245 struct fuse_iqueue *fiq = &fc->iq;
3a2b5b9c 1246 struct fuse_pqueue *fpq = &fc->pq;
334f485d
MS
1247 struct fuse_req *req;
1248 struct fuse_in *in;
334f485d
MS
1249 unsigned reqsize;
1250
1d3d752b 1251 restart:
4ce60812 1252 spin_lock(&fiq->waitq.lock);
e5ac1d1e 1253 err = -EAGAIN;
e16714d8 1254 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
f88996a9 1255 !request_pending(fiq))
e5ac1d1e
JD
1256 goto err_unlock;
1257
5250921b
MS
1258 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1259 !fiq->connected || request_pending(fiq));
1260 if (err)
1261 goto err_unlock;
1262
334f485d 1263 err = -ENODEV;
e16714d8 1264 if (!fiq->connected)
334f485d 1265 goto err_unlock;
334f485d 1266
f88996a9
MS
1267 if (!list_empty(&fiq->interrupts)) {
1268 req = list_entry(fiq->interrupts.next, struct fuse_req,
a4d27e75 1269 intr_entry);
fd22d62e 1270 return fuse_read_interrupt(fiq, cs, nbytes, req);
a4d27e75
MS
1271 }
1272
f88996a9
MS
1273 if (forget_pending(fiq)) {
1274 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
fd22d62e 1275 return fuse_read_forget(fc, fiq, cs, nbytes);
07e77dca 1276
f88996a9
MS
1277 if (fiq->forget_batch <= -8)
1278 fiq->forget_batch = 16;
07e77dca
MS
1279 }
1280
f88996a9 1281 req = list_entry(fiq->pending.next, struct fuse_req, list);
33e14b4d 1282 clear_bit(FR_PENDING, &req->flags);
ef759258 1283 list_del_init(&req->list);
4ce60812
MS
1284 spin_unlock(&fiq->waitq.lock);
1285
fd22d62e 1286 spin_lock(&fc->lock);
334f485d 1287 in = &req->in;
1d3d752b
MS
1288 reqsize = in->h.len;
1289 /* If request is too large, reply with an error and restart the read */
c3021629 1290 if (nbytes < reqsize) {
1d3d752b
MS
1291 req->out.h.error = -EIO;
1292 /* SETXATTR is special, since it may contain too large data */
1293 if (in->h.opcode == FUSE_SETXATTR)
1294 req->out.h.error = -E2BIG;
1295 request_end(fc, req);
1296 goto restart;
334f485d 1297 }
45a91cb1 1298 spin_lock(&fpq->lock);
82cbdcd3 1299 list_add(&req->list, &fpq->io);
45a91cb1 1300 spin_unlock(&fpq->lock);
d7133114 1301 spin_unlock(&fc->lock);
c3021629
MS
1302 cs->req = req;
1303 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1304 if (!err)
c3021629 1305 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1306 (struct fuse_arg *) in->args, 0);
c3021629 1307 fuse_copy_finish(cs);
d7133114 1308 spin_lock(&fc->lock);
45a91cb1 1309 spin_lock(&fpq->lock);
825d6d33 1310 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1311 if (!fpq->connected) {
82cbdcd3
MS
1312 err = -ENODEV;
1313 goto out_end;
c9c9d7df 1314 }
334f485d 1315 if (err) {
c9c9d7df 1316 req->out.h.error = -EIO;
82cbdcd3 1317 goto out_end;
334f485d 1318 }
825d6d33 1319 if (!test_bit(FR_ISREPLY, &req->flags)) {
82cbdcd3
MS
1320 err = reqsize;
1321 goto out_end;
334f485d 1322 }
82cbdcd3 1323 list_move_tail(&req->list, &fpq->processing);
45a91cb1 1324 spin_unlock(&fpq->lock);
82cbdcd3
MS
1325 set_bit(FR_SENT, &req->flags);
1326 /* matches barrier in request_wait_answer() */
1327 smp_mb__after_atomic();
1328 if (test_bit(FR_INTERRUPTED, &req->flags))
1329 queue_interrupt(fiq, req);
1330 spin_unlock(&fc->lock);
1331
334f485d
MS
1332 return reqsize;
1333
82cbdcd3 1334out_end:
77cd9d48
MS
1335 if (!test_bit(FR_PRIVATE, &req->flags))
1336 list_del_init(&req->list);
45a91cb1 1337 spin_unlock(&fpq->lock);
82cbdcd3
MS
1338 request_end(fc, req);
1339 return err;
1340
334f485d 1341 err_unlock:
4ce60812 1342 spin_unlock(&fiq->waitq.lock);
334f485d
MS
1343 return err;
1344}
1345
94e4fe2c
TVB
1346static int fuse_dev_open(struct inode *inode, struct file *file)
1347{
1348 /*
1349 * The fuse device's file's private_data is used to hold
1350 * the fuse_conn(ection) when it is mounted, and is used to
1351 * keep track of whether the file has been mounted already.
1352 */
1353 file->private_data = NULL;
1354 return 0;
1355}
1356
fbdbacca 1357static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
c3021629
MS
1358{
1359 struct fuse_copy_state cs;
1360 struct file *file = iocb->ki_filp;
1361 struct fuse_conn *fc = fuse_get_conn(file);
1362 if (!fc)
1363 return -EPERM;
1364
fbdbacca
AV
1365 if (!iter_is_iovec(to))
1366 return -EINVAL;
1367
dc00809a 1368 fuse_copy_init(&cs, 1, to);
c3021629 1369
fbdbacca 1370 return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
c3021629
MS
1371}
1372
c3021629
MS
1373static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1374 struct pipe_inode_info *pipe,
1375 size_t len, unsigned int flags)
1376{
1377 int ret;
1378 int page_nr = 0;
1379 int do_wakeup = 0;
1380 struct pipe_buffer *bufs;
1381 struct fuse_copy_state cs;
1382 struct fuse_conn *fc = fuse_get_conn(in);
1383 if (!fc)
1384 return -EPERM;
1385
07e77dca 1386 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1387 if (!bufs)
1388 return -ENOMEM;
1389
dc00809a 1390 fuse_copy_init(&cs, 1, NULL);
c3021629
MS
1391 cs.pipebufs = bufs;
1392 cs.pipe = pipe;
1393 ret = fuse_dev_do_read(fc, in, &cs, len);
1394 if (ret < 0)
1395 goto out;
1396
1397 ret = 0;
1398 pipe_lock(pipe);
1399
1400 if (!pipe->readers) {
1401 send_sig(SIGPIPE, current, 0);
1402 if (!ret)
1403 ret = -EPIPE;
1404 goto out_unlock;
1405 }
1406
1407 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1408 ret = -EIO;
1409 goto out_unlock;
1410 }
1411
1412 while (page_nr < cs.nr_segs) {
1413 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1414 struct pipe_buffer *buf = pipe->bufs + newbuf;
1415
1416 buf->page = bufs[page_nr].page;
1417 buf->offset = bufs[page_nr].offset;
1418 buf->len = bufs[page_nr].len;
28a625cb
MS
1419 /*
1420 * Need to be careful about this. Having buf->ops in module
1421 * code can Oops if the buffer persists after module unload.
1422 */
1423 buf->ops = &nosteal_pipe_buf_ops;
c3021629
MS
1424
1425 pipe->nrbufs++;
1426 page_nr++;
1427 ret += buf->len;
1428
6447a3cf 1429 if (pipe->files)
c3021629
MS
1430 do_wakeup = 1;
1431 }
1432
1433out_unlock:
1434 pipe_unlock(pipe);
1435
1436 if (do_wakeup) {
1437 smp_mb();
1438 if (waitqueue_active(&pipe->wait))
1439 wake_up_interruptible(&pipe->wait);
1440 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1441 }
1442
1443out:
1444 for (; page_nr < cs.nr_segs; page_nr++)
1445 page_cache_release(bufs[page_nr].page);
1446
1447 kfree(bufs);
1448 return ret;
1449}
1450
95668a69
TH
1451static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1452 struct fuse_copy_state *cs)
1453{
1454 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1455 int err = -EINVAL;
95668a69
TH
1456
1457 if (size != sizeof(outarg))
f6d47a17 1458 goto err;
95668a69
TH
1459
1460 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1461 if (err)
f6d47a17 1462 goto err;
95668a69 1463
f6d47a17 1464 fuse_copy_finish(cs);
95668a69 1465 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1466
1467err:
1468 fuse_copy_finish(cs);
1469 return err;
95668a69
TH
1470}
1471
3b463ae0
JM
1472static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1473 struct fuse_copy_state *cs)
1474{
1475 struct fuse_notify_inval_inode_out outarg;
1476 int err = -EINVAL;
1477
1478 if (size != sizeof(outarg))
1479 goto err;
1480
1481 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1482 if (err)
1483 goto err;
1484 fuse_copy_finish(cs);
1485
1486 down_read(&fc->killsb);
1487 err = -ENOENT;
b21dda43
MS
1488 if (fc->sb) {
1489 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1490 outarg.off, outarg.len);
1491 }
3b463ae0
JM
1492 up_read(&fc->killsb);
1493 return err;
1494
1495err:
1496 fuse_copy_finish(cs);
1497 return err;
1498}
1499
1500static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1501 struct fuse_copy_state *cs)
1502{
1503 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1504 int err = -ENOMEM;
1505 char *buf;
3b463ae0
JM
1506 struct qstr name;
1507
b2d82ee3
FW
1508 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1509 if (!buf)
1510 goto err;
1511
1512 err = -EINVAL;
3b463ae0
JM
1513 if (size < sizeof(outarg))
1514 goto err;
1515
1516 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1517 if (err)
1518 goto err;
1519
1520 err = -ENAMETOOLONG;
1521 if (outarg.namelen > FUSE_NAME_MAX)
1522 goto err;
1523
c2183d1e
MS
1524 err = -EINVAL;
1525 if (size != sizeof(outarg) + outarg.namelen + 1)
1526 goto err;
1527
3b463ae0
JM
1528 name.name = buf;
1529 name.len = outarg.namelen;
1530 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1531 if (err)
1532 goto err;
1533 fuse_copy_finish(cs);
1534 buf[outarg.namelen] = 0;
1535 name.hash = full_name_hash(name.name, name.len);
1536
1537 down_read(&fc->killsb);
1538 err = -ENOENT;
b21dda43 1539 if (fc->sb)
451d0f59
JM
1540 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1541 up_read(&fc->killsb);
1542 kfree(buf);
1543 return err;
1544
1545err:
1546 kfree(buf);
1547 fuse_copy_finish(cs);
1548 return err;
1549}
1550
1551static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1552 struct fuse_copy_state *cs)
1553{
1554 struct fuse_notify_delete_out outarg;
1555 int err = -ENOMEM;
1556 char *buf;
1557 struct qstr name;
1558
1559 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1560 if (!buf)
1561 goto err;
1562
1563 err = -EINVAL;
1564 if (size < sizeof(outarg))
1565 goto err;
1566
1567 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1568 if (err)
1569 goto err;
1570
1571 err = -ENAMETOOLONG;
1572 if (outarg.namelen > FUSE_NAME_MAX)
1573 goto err;
1574
1575 err = -EINVAL;
1576 if (size != sizeof(outarg) + outarg.namelen + 1)
1577 goto err;
1578
1579 name.name = buf;
1580 name.len = outarg.namelen;
1581 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1582 if (err)
1583 goto err;
1584 fuse_copy_finish(cs);
1585 buf[outarg.namelen] = 0;
1586 name.hash = full_name_hash(name.name, name.len);
1587
1588 down_read(&fc->killsb);
1589 err = -ENOENT;
1590 if (fc->sb)
1591 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1592 outarg.child, &name);
3b463ae0 1593 up_read(&fc->killsb);
b2d82ee3 1594 kfree(buf);
3b463ae0
JM
1595 return err;
1596
1597err:
b2d82ee3 1598 kfree(buf);
3b463ae0
JM
1599 fuse_copy_finish(cs);
1600 return err;
1601}
1602
a1d75f25
MS
1603static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1604 struct fuse_copy_state *cs)
1605{
1606 struct fuse_notify_store_out outarg;
1607 struct inode *inode;
1608 struct address_space *mapping;
1609 u64 nodeid;
1610 int err;
1611 pgoff_t index;
1612 unsigned int offset;
1613 unsigned int num;
1614 loff_t file_size;
1615 loff_t end;
1616
1617 err = -EINVAL;
1618 if (size < sizeof(outarg))
1619 goto out_finish;
1620
1621 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1622 if (err)
1623 goto out_finish;
1624
1625 err = -EINVAL;
1626 if (size - sizeof(outarg) != outarg.size)
1627 goto out_finish;
1628
1629 nodeid = outarg.nodeid;
1630
1631 down_read(&fc->killsb);
1632
1633 err = -ENOENT;
1634 if (!fc->sb)
1635 goto out_up_killsb;
1636
1637 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1638 if (!inode)
1639 goto out_up_killsb;
1640
1641 mapping = inode->i_mapping;
1642 index = outarg.offset >> PAGE_CACHE_SHIFT;
1643 offset = outarg.offset & ~PAGE_CACHE_MASK;
1644 file_size = i_size_read(inode);
1645 end = outarg.offset + outarg.size;
1646 if (end > file_size) {
1647 file_size = end;
1648 fuse_write_update_size(inode, file_size);
1649 }
1650
1651 num = outarg.size;
1652 while (num) {
1653 struct page *page;
1654 unsigned int this_num;
1655
1656 err = -ENOMEM;
1657 page = find_or_create_page(mapping, index,
1658 mapping_gfp_mask(mapping));
1659 if (!page)
1660 goto out_iput;
1661
1662 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1663 err = fuse_copy_page(cs, &page, offset, this_num, 0);
063ec1e5
MS
1664 if (!err && offset == 0 &&
1665 (this_num == PAGE_CACHE_SIZE || file_size == end))
a1d75f25
MS
1666 SetPageUptodate(page);
1667 unlock_page(page);
1668 page_cache_release(page);
1669
1670 if (err)
1671 goto out_iput;
1672
1673 num -= this_num;
1674 offset = 0;
1675 index++;
1676 }
1677
1678 err = 0;
1679
1680out_iput:
1681 iput(inode);
1682out_up_killsb:
1683 up_read(&fc->killsb);
1684out_finish:
1685 fuse_copy_finish(cs);
1686 return err;
1687}
1688
2d45ba38
MS
1689static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1690{
b745bc85 1691 release_pages(req->pages, req->num_pages, false);
2d45ba38
MS
1692}
1693
1694static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1695 struct fuse_notify_retrieve_out *outarg)
1696{
1697 int err;
1698 struct address_space *mapping = inode->i_mapping;
1699 struct fuse_req *req;
1700 pgoff_t index;
1701 loff_t file_size;
1702 unsigned int num;
1703 unsigned int offset;
0157443c 1704 size_t total_len = 0;
4d53dc99 1705 int num_pages;
2d45ba38 1706
4d53dc99
MP
1707 offset = outarg->offset & ~PAGE_CACHE_MASK;
1708 file_size = i_size_read(inode);
1709
1710 num = outarg->size;
1711 if (outarg->offset > file_size)
1712 num = 0;
1713 else if (outarg->offset + num > file_size)
1714 num = file_size - outarg->offset;
1715
1716 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1717 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1718
1719 req = fuse_get_req(fc, num_pages);
2d45ba38
MS
1720 if (IS_ERR(req))
1721 return PTR_ERR(req);
1722
2d45ba38
MS
1723 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1724 req->in.h.nodeid = outarg->nodeid;
1725 req->in.numargs = 2;
1726 req->in.argpages = 1;
b2430d75 1727 req->page_descs[0].offset = offset;
2d45ba38
MS
1728 req->end = fuse_retrieve_end;
1729
1730 index = outarg->offset >> PAGE_CACHE_SHIFT;
2d45ba38 1731
4d53dc99 1732 while (num && req->num_pages < num_pages) {
2d45ba38
MS
1733 struct page *page;
1734 unsigned int this_num;
1735
1736 page = find_get_page(mapping, index);
1737 if (!page)
1738 break;
1739
1740 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1741 req->pages[req->num_pages] = page;
85f40aec 1742 req->page_descs[req->num_pages].length = this_num;
2d45ba38
MS
1743 req->num_pages++;
1744
c9e67d48 1745 offset = 0;
2d45ba38
MS
1746 num -= this_num;
1747 total_len += this_num;
48706d0a 1748 index++;
2d45ba38
MS
1749 }
1750 req->misc.retrieve_in.offset = outarg->offset;
1751 req->misc.retrieve_in.size = total_len;
1752 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1753 req->in.args[0].value = &req->misc.retrieve_in;
1754 req->in.args[1].size = total_len;
1755
1756 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1757 if (err)
1758 fuse_retrieve_end(fc, req);
1759
1760 return err;
1761}
1762
1763static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1764 struct fuse_copy_state *cs)
1765{
1766 struct fuse_notify_retrieve_out outarg;
1767 struct inode *inode;
1768 int err;
1769
1770 err = -EINVAL;
1771 if (size != sizeof(outarg))
1772 goto copy_finish;
1773
1774 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1775 if (err)
1776 goto copy_finish;
1777
1778 fuse_copy_finish(cs);
1779
1780 down_read(&fc->killsb);
1781 err = -ENOENT;
1782 if (fc->sb) {
1783 u64 nodeid = outarg.nodeid;
1784
1785 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1786 if (inode) {
1787 err = fuse_retrieve(fc, inode, &outarg);
1788 iput(inode);
1789 }
1790 }
1791 up_read(&fc->killsb);
1792
1793 return err;
1794
1795copy_finish:
1796 fuse_copy_finish(cs);
1797 return err;
1798}
1799
8599396b
TH
1800static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1801 unsigned int size, struct fuse_copy_state *cs)
1802{
0d278362
MS
1803 /* Don't try to move pages (yet) */
1804 cs->move_pages = 0;
1805
8599396b 1806 switch (code) {
95668a69
TH
1807 case FUSE_NOTIFY_POLL:
1808 return fuse_notify_poll(fc, size, cs);
1809
3b463ae0
JM
1810 case FUSE_NOTIFY_INVAL_INODE:
1811 return fuse_notify_inval_inode(fc, size, cs);
1812
1813 case FUSE_NOTIFY_INVAL_ENTRY:
1814 return fuse_notify_inval_entry(fc, size, cs);
1815
a1d75f25
MS
1816 case FUSE_NOTIFY_STORE:
1817 return fuse_notify_store(fc, size, cs);
1818
2d45ba38
MS
1819 case FUSE_NOTIFY_RETRIEVE:
1820 return fuse_notify_retrieve(fc, size, cs);
1821
451d0f59
JM
1822 case FUSE_NOTIFY_DELETE:
1823 return fuse_notify_delete(fc, size, cs);
1824
8599396b 1825 default:
f6d47a17 1826 fuse_copy_finish(cs);
8599396b
TH
1827 return -EINVAL;
1828 }
1829}
1830
334f485d 1831/* Look up request on processing list by unique ID */
3a2b5b9c 1832static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
334f485d 1833{
05726aca 1834 struct fuse_req *req;
334f485d 1835
3a2b5b9c 1836 list_for_each_entry(req, &fpq->processing, list) {
a4d27e75 1837 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1838 return req;
1839 }
1840 return NULL;
1841}
1842
1843static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1844 unsigned nbytes)
1845{
1846 unsigned reqsize = sizeof(struct fuse_out_header);
1847
1848 if (out->h.error)
1849 return nbytes != reqsize ? -EINVAL : 0;
1850
1851 reqsize += len_args(out->numargs, out->args);
1852
1853 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1854 return -EINVAL;
1855 else if (reqsize > nbytes) {
1856 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1857 unsigned diffsize = reqsize - nbytes;
1858 if (diffsize > lastarg->size)
1859 return -EINVAL;
1860 lastarg->size -= diffsize;
1861 }
1862 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1863 out->page_zeroing);
1864}
1865
1866/*
1867 * Write a single reply to a request. First the header is copied from
1868 * the write buffer. The request is then searched on the processing
1869 * list by the unique ID found in the header. If found, then remove
1870 * it from the list and copy the rest of the buffer to the request.
1871 * The request is finished by calling request_end()
1872 */
dd3bb14f
MS
1873static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1874 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1875{
1876 int err;
3a2b5b9c 1877 struct fuse_pqueue *fpq = &fc->pq;
334f485d
MS
1878 struct fuse_req *req;
1879 struct fuse_out_header oh;
334f485d 1880
334f485d
MS
1881 if (nbytes < sizeof(struct fuse_out_header))
1882 return -EINVAL;
1883
dd3bb14f 1884 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1885 if (err)
1886 goto err_finish;
8599396b
TH
1887
1888 err = -EINVAL;
1889 if (oh.len != nbytes)
1890 goto err_finish;
1891
1892 /*
1893 * Zero oh.unique indicates unsolicited notification message
1894 * and error contains notification code.
1895 */
1896 if (!oh.unique) {
dd3bb14f 1897 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1898 return err ? err : nbytes;
1899 }
1900
334f485d 1901 err = -EINVAL;
8599396b 1902 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1903 goto err_finish;
1904
d7133114 1905 spin_lock(&fc->lock);
45a91cb1 1906 spin_lock(&fpq->lock);
69a53bf2 1907 err = -ENOENT;
e96edd94 1908 if (!fpq->connected)
45a91cb1 1909 goto err_unlock_pq;
69a53bf2 1910
3a2b5b9c 1911 req = request_find(fpq, oh.unique);
334f485d 1912 if (!req)
45a91cb1 1913 goto err_unlock_pq;
334f485d 1914
a4d27e75
MS
1915 /* Is it an interrupt reply? */
1916 if (req->intr_unique == oh.unique) {
45a91cb1
MS
1917 spin_unlock(&fpq->lock);
1918
a4d27e75
MS
1919 err = -EINVAL;
1920 if (nbytes != sizeof(struct fuse_out_header))
1921 goto err_unlock;
1922
1923 if (oh.error == -ENOSYS)
1924 fc->no_interrupt = 1;
1925 else if (oh.error == -EAGAIN)
f88996a9 1926 queue_interrupt(&fc->iq, req);
a4d27e75
MS
1927
1928 spin_unlock(&fc->lock);
dd3bb14f 1929 fuse_copy_finish(cs);
a4d27e75
MS
1930 return nbytes;
1931 }
1932
33e14b4d 1933 clear_bit(FR_SENT, &req->flags);
3a2b5b9c 1934 list_move(&req->list, &fpq->io);
334f485d 1935 req->out.h = oh;
825d6d33 1936 set_bit(FR_LOCKED, &req->flags);
45a91cb1 1937 spin_unlock(&fpq->lock);
dd3bb14f 1938 cs->req = req;
ce534fb0
MS
1939 if (!req->out.page_replace)
1940 cs->move_pages = 0;
d7133114 1941 spin_unlock(&fc->lock);
334f485d 1942
dd3bb14f
MS
1943 err = copy_out_args(cs, &req->out, nbytes);
1944 fuse_copy_finish(cs);
334f485d 1945
d7133114 1946 spin_lock(&fc->lock);
45a91cb1 1947 spin_lock(&fpq->lock);
825d6d33 1948 clear_bit(FR_LOCKED, &req->flags);
e96edd94 1949 if (!fpq->connected)
0d8e84b0
MS
1950 err = -ENOENT;
1951 else if (err)
334f485d 1952 req->out.h.error = -EIO;
77cd9d48
MS
1953 if (!test_bit(FR_PRIVATE, &req->flags))
1954 list_del_init(&req->list);
45a91cb1 1955 spin_unlock(&fpq->lock);
334f485d
MS
1956 request_end(fc, req);
1957
1958 return err ? err : nbytes;
1959
45a91cb1
MS
1960 err_unlock_pq:
1961 spin_unlock(&fpq->lock);
334f485d 1962 err_unlock:
d7133114 1963 spin_unlock(&fc->lock);
334f485d 1964 err_finish:
dd3bb14f 1965 fuse_copy_finish(cs);
334f485d
MS
1966 return err;
1967}
1968
fbdbacca 1969static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
dd3bb14f
MS
1970{
1971 struct fuse_copy_state cs;
1972 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1973 if (!fc)
1974 return -EPERM;
1975
fbdbacca
AV
1976 if (!iter_is_iovec(from))
1977 return -EINVAL;
1978
dc00809a 1979 fuse_copy_init(&cs, 0, from);
dd3bb14f 1980
fbdbacca 1981 return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
dd3bb14f
MS
1982}
1983
1984static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1985 struct file *out, loff_t *ppos,
1986 size_t len, unsigned int flags)
1987{
1988 unsigned nbuf;
1989 unsigned idx;
1990 struct pipe_buffer *bufs;
1991 struct fuse_copy_state cs;
1992 struct fuse_conn *fc;
1993 size_t rem;
1994 ssize_t ret;
1995
1996 fc = fuse_get_conn(out);
1997 if (!fc)
1998 return -EPERM;
1999
07e77dca 2000 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
2001 if (!bufs)
2002 return -ENOMEM;
2003
2004 pipe_lock(pipe);
2005 nbuf = 0;
2006 rem = 0;
2007 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
2008 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
2009
2010 ret = -EINVAL;
2011 if (rem < len) {
2012 pipe_unlock(pipe);
2013 goto out;
2014 }
2015
2016 rem = len;
2017 while (rem) {
2018 struct pipe_buffer *ibuf;
2019 struct pipe_buffer *obuf;
2020
2021 BUG_ON(nbuf >= pipe->buffers);
2022 BUG_ON(!pipe->nrbufs);
2023 ibuf = &pipe->bufs[pipe->curbuf];
2024 obuf = &bufs[nbuf];
2025
2026 if (rem >= ibuf->len) {
2027 *obuf = *ibuf;
2028 ibuf->ops = NULL;
2029 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2030 pipe->nrbufs--;
2031 } else {
2032 ibuf->ops->get(pipe, ibuf);
2033 *obuf = *ibuf;
2034 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2035 obuf->len = rem;
2036 ibuf->offset += obuf->len;
2037 ibuf->len -= obuf->len;
2038 }
2039 nbuf++;
2040 rem -= obuf->len;
2041 }
2042 pipe_unlock(pipe);
2043
dc00809a 2044 fuse_copy_init(&cs, 0, NULL);
dd3bb14f 2045 cs.pipebufs = bufs;
6c09e94a 2046 cs.nr_segs = nbuf;
dd3bb14f
MS
2047 cs.pipe = pipe;
2048
ce534fb0
MS
2049 if (flags & SPLICE_F_MOVE)
2050 cs.move_pages = 1;
2051
dd3bb14f
MS
2052 ret = fuse_dev_do_write(fc, &cs, len);
2053
2054 for (idx = 0; idx < nbuf; idx++) {
2055 struct pipe_buffer *buf = &bufs[idx];
2056 buf->ops->release(pipe, buf);
2057 }
2058out:
2059 kfree(bufs);
2060 return ret;
2061}
2062
334f485d
MS
2063static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2064{
334f485d 2065 unsigned mask = POLLOUT | POLLWRNORM;
f88996a9 2066 struct fuse_iqueue *fiq;
7025d9ad 2067 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2068 if (!fc)
7025d9ad 2069 return POLLERR;
334f485d 2070
f88996a9
MS
2071 fiq = &fc->iq;
2072 poll_wait(file, &fiq->waitq, wait);
334f485d 2073
4ce60812 2074 spin_lock(&fiq->waitq.lock);
e16714d8 2075 if (!fiq->connected)
7025d9ad 2076 mask = POLLERR;
f88996a9 2077 else if (request_pending(fiq))
7025d9ad 2078 mask |= POLLIN | POLLRDNORM;
4ce60812 2079 spin_unlock(&fiq->waitq.lock);
334f485d
MS
2080
2081 return mask;
2082}
2083
69a53bf2
MS
2084/*
2085 * Abort all requests on the given list (pending or processing)
2086 *
d7133114 2087 * This function releases and reacquires fc->lock
69a53bf2 2088 */
334f485d 2089static void end_requests(struct fuse_conn *fc, struct list_head *head)
b9ca67b2
MS
2090__releases(fc->lock)
2091__acquires(fc->lock)
334f485d
MS
2092{
2093 while (!list_empty(head)) {
2094 struct fuse_req *req;
2095 req = list_entry(head->next, struct fuse_req, list);
334f485d 2096 req->out.h.error = -ECONNABORTED;
33e14b4d
MS
2097 clear_bit(FR_PENDING, &req->flags);
2098 clear_bit(FR_SENT, &req->flags);
f377cb79 2099 list_del_init(&req->list);
334f485d 2100 request_end(fc, req);
d7133114 2101 spin_lock(&fc->lock);
334f485d
MS
2102 }
2103}
2104
357ccf2b
BG
2105static void end_polls(struct fuse_conn *fc)
2106{
2107 struct rb_node *p;
2108
2109 p = rb_first(&fc->polled_files);
2110
2111 while (p) {
2112 struct fuse_file *ff;
2113 ff = rb_entry(p, struct fuse_file, polled_node);
2114 wake_up_interruptible_all(&ff->poll_wait);
2115
2116 p = rb_next(p);
2117 }
2118}
2119
69a53bf2
MS
2120/*
2121 * Abort all requests.
2122 *
b716d425
MS
2123 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2124 * filesystem.
2125 *
2126 * The same effect is usually achievable through killing the filesystem daemon
2127 * and all users of the filesystem. The exception is the combination of an
2128 * asynchronous request and the tricky deadlock (see
2129 * Documentation/filesystems/fuse.txt).
69a53bf2 2130 *
b716d425
MS
2131 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2132 * requests, they should be finished off immediately. Locked requests will be
2133 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2134 * requests. It is possible that some request will finish before we can. This
2135 * is OK, the request will in that case be removed from the list before we touch
2136 * it.
69a53bf2
MS
2137 */
2138void fuse_abort_conn(struct fuse_conn *fc)
2139{
f88996a9 2140 struct fuse_iqueue *fiq = &fc->iq;
3a2b5b9c 2141 struct fuse_pqueue *fpq = &fc->pq;
f88996a9 2142
d7133114 2143 spin_lock(&fc->lock);
69a53bf2 2144 if (fc->connected) {
b716d425 2145 struct fuse_req *req, *next;
41f98274
MS
2146 LIST_HEAD(to_end1);
2147 LIST_HEAD(to_end2);
b716d425 2148
69a53bf2 2149 fc->connected = 0;
51eb01e7 2150 fc->blocked = 0;
9759bd51 2151 fuse_set_initialized(fc);
45a91cb1 2152 spin_lock(&fpq->lock);
e96edd94 2153 fpq->connected = 0;
3a2b5b9c 2154 list_for_each_entry_safe(req, next, &fpq->io, list) {
b716d425
MS
2155 req->out.h.error = -ECONNABORTED;
2156 spin_lock(&req->waitq.lock);
2157 set_bit(FR_ABORTED, &req->flags);
77cd9d48
MS
2158 if (!test_bit(FR_LOCKED, &req->flags)) {
2159 set_bit(FR_PRIVATE, &req->flags);
41f98274 2160 list_move(&req->list, &to_end1);
77cd9d48 2161 }
b716d425
MS
2162 spin_unlock(&req->waitq.lock);
2163 }
24b4d33d 2164 list_splice_init(&fpq->processing, &to_end2);
45a91cb1 2165 spin_unlock(&fpq->lock);
41f98274
MS
2166 fc->max_background = UINT_MAX;
2167 flush_bg_queue(fc);
8c91189a 2168
4ce60812 2169 spin_lock(&fiq->waitq.lock);
8c91189a 2170 fiq->connected = 0;
f88996a9 2171 list_splice_init(&fiq->pending, &to_end2);
8c91189a
MS
2172 while (forget_pending(fiq))
2173 kfree(dequeue_forget(fiq, 1, NULL));
4ce60812
MS
2174 wake_up_all_locked(&fiq->waitq);
2175 spin_unlock(&fiq->waitq.lock);
8c91189a
MS
2176 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2177
41f98274
MS
2178 while (!list_empty(&to_end1)) {
2179 req = list_first_entry(&to_end1, struct fuse_req, list);
b716d425 2180 __fuse_get_request(req);
f377cb79 2181 list_del_init(&req->list);
b716d425
MS
2182 request_end(fc, req);
2183 spin_lock(&fc->lock);
2184 }
41f98274 2185 end_requests(fc, &to_end2);
357ccf2b 2186 end_polls(fc);
51eb01e7 2187 wake_up_all(&fc->blocked_waitq);
69a53bf2 2188 }
d7133114 2189 spin_unlock(&fc->lock);
69a53bf2 2190}
08cbf542 2191EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2192
08cbf542 2193int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2194{
0720b315 2195 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2196 if (fc) {
3a2b5b9c 2197 WARN_ON(!list_empty(&fc->pq.io));
f88996a9 2198 WARN_ON(fc->iq.fasync != NULL);
ccd0a0bd 2199 fuse_abort_conn(fc);
bafa9654 2200 fuse_conn_put(fc);
385a17bf 2201 }
f543f253 2202
334f485d
MS
2203 return 0;
2204}
08cbf542 2205EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2206
385a17bf
JD
2207static int fuse_dev_fasync(int fd, struct file *file, int on)
2208{
2209 struct fuse_conn *fc = fuse_get_conn(file);
2210 if (!fc)
a87046d8 2211 return -EPERM;
385a17bf
JD
2212
2213 /* No locking - fasync_helper does its own locking */
f88996a9 2214 return fasync_helper(fd, file, on, &fc->iq.fasync);
385a17bf
JD
2215}
2216
4b6f5d20 2217const struct file_operations fuse_dev_operations = {
334f485d 2218 .owner = THIS_MODULE,
94e4fe2c 2219 .open = fuse_dev_open,
334f485d 2220 .llseek = no_llseek,
fbdbacca 2221 .read_iter = fuse_dev_read,
c3021629 2222 .splice_read = fuse_dev_splice_read,
fbdbacca 2223 .write_iter = fuse_dev_write,
dd3bb14f 2224 .splice_write = fuse_dev_splice_write,
334f485d
MS
2225 .poll = fuse_dev_poll,
2226 .release = fuse_dev_release,
385a17bf 2227 .fasync = fuse_dev_fasync,
334f485d 2228};
08cbf542 2229EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2230
2231static struct miscdevice fuse_miscdevice = {
2232 .minor = FUSE_MINOR,
2233 .name = "fuse",
2234 .fops = &fuse_dev_operations,
2235};
2236
2237int __init fuse_dev_init(void)
2238{
2239 int err = -ENOMEM;
2240 fuse_req_cachep = kmem_cache_create("fuse_request",
2241 sizeof(struct fuse_req),
20c2df83 2242 0, 0, NULL);
334f485d
MS
2243 if (!fuse_req_cachep)
2244 goto out;
2245
2246 err = misc_register(&fuse_miscdevice);
2247 if (err)
2248 goto out_cache_clean;
2249
2250 return 0;
2251
2252 out_cache_clean:
2253 kmem_cache_destroy(fuse_req_cachep);
2254 out:
2255 return err;
2256}
2257
2258void fuse_dev_cleanup(void)
2259{
2260 misc_deregister(&fuse_miscdevice);
2261 kmem_cache_destroy(fuse_req_cachep);
2262}
This page took 1.100006 seconds and 4 git commands to generate.