]>
Commit | Line | Data |
---|---|---|
334f485d MS |
1 | /* |
2 | FUSE: Filesystem in Userspace | |
1729a16c | 3 | Copyright (C) 2001-2008 Miklos Szeredi <[email protected]> |
334f485d MS |
4 | |
5 | This program can be distributed under the terms of the GNU GPL. | |
6 | See the file COPYING. | |
7 | */ | |
8 | ||
9 | #include "fuse_i.h" | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/poll.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/miscdevice.h> | |
16 | #include <linux/pagemap.h> | |
17 | #include <linux/file.h> | |
18 | #include <linux/slab.h> | |
dd3bb14f | 19 | #include <linux/pipe_fs_i.h> |
ce534fb0 MS |
20 | #include <linux/swap.h> |
21 | #include <linux/splice.h> | |
334f485d MS |
22 | |
23 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); | |
578454ff | 24 | MODULE_ALIAS("devname:fuse"); |
334f485d | 25 | |
e18b890b | 26 | static struct kmem_cache *fuse_req_cachep; |
334f485d | 27 | |
8bfc016d | 28 | static struct fuse_conn *fuse_get_conn(struct file *file) |
334f485d | 29 | { |
0720b315 MS |
30 | /* |
31 | * Lockless access is OK, because file->private data is set | |
32 | * once during mount and is valid until the file is released. | |
33 | */ | |
34 | return file->private_data; | |
334f485d MS |
35 | } |
36 | ||
8bfc016d | 37 | static void fuse_request_init(struct fuse_req *req) |
334f485d MS |
38 | { |
39 | memset(req, 0, sizeof(*req)); | |
40 | INIT_LIST_HEAD(&req->list); | |
a4d27e75 | 41 | INIT_LIST_HEAD(&req->intr_entry); |
334f485d MS |
42 | init_waitqueue_head(&req->waitq); |
43 | atomic_set(&req->count, 1); | |
44 | } | |
45 | ||
46 | struct fuse_req *fuse_request_alloc(void) | |
47 | { | |
e94b1766 | 48 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); |
334f485d MS |
49 | if (req) |
50 | fuse_request_init(req); | |
51 | return req; | |
52 | } | |
08cbf542 | 53 | EXPORT_SYMBOL_GPL(fuse_request_alloc); |
334f485d | 54 | |
3be5a52b MS |
55 | struct fuse_req *fuse_request_alloc_nofs(void) |
56 | { | |
57 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS); | |
58 | if (req) | |
59 | fuse_request_init(req); | |
60 | return req; | |
61 | } | |
62 | ||
334f485d MS |
63 | void fuse_request_free(struct fuse_req *req) |
64 | { | |
65 | kmem_cache_free(fuse_req_cachep, req); | |
66 | } | |
67 | ||
8bfc016d | 68 | static void block_sigs(sigset_t *oldset) |
334f485d MS |
69 | { |
70 | sigset_t mask; | |
71 | ||
72 | siginitsetinv(&mask, sigmask(SIGKILL)); | |
73 | sigprocmask(SIG_BLOCK, &mask, oldset); | |
74 | } | |
75 | ||
8bfc016d | 76 | static void restore_sigs(sigset_t *oldset) |
334f485d MS |
77 | { |
78 | sigprocmask(SIG_SETMASK, oldset, NULL); | |
79 | } | |
80 | ||
334f485d MS |
81 | static void __fuse_get_request(struct fuse_req *req) |
82 | { | |
83 | atomic_inc(&req->count); | |
84 | } | |
85 | ||
86 | /* Must be called with > 1 refcount */ | |
87 | static void __fuse_put_request(struct fuse_req *req) | |
88 | { | |
89 | BUG_ON(atomic_read(&req->count) < 2); | |
90 | atomic_dec(&req->count); | |
91 | } | |
92 | ||
33649c91 MS |
93 | static void fuse_req_init_context(struct fuse_req *req) |
94 | { | |
2186a71c DH |
95 | req->in.h.uid = current_fsuid(); |
96 | req->in.h.gid = current_fsgid(); | |
33649c91 MS |
97 | req->in.h.pid = current->pid; |
98 | } | |
99 | ||
ce1d5a49 | 100 | struct fuse_req *fuse_get_req(struct fuse_conn *fc) |
334f485d | 101 | { |
08a53cdc MS |
102 | struct fuse_req *req; |
103 | sigset_t oldset; | |
9bc5ddda | 104 | int intr; |
08a53cdc MS |
105 | int err; |
106 | ||
9bc5ddda | 107 | atomic_inc(&fc->num_waiting); |
08a53cdc | 108 | block_sigs(&oldset); |
9bc5ddda | 109 | intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); |
08a53cdc | 110 | restore_sigs(&oldset); |
9bc5ddda MS |
111 | err = -EINTR; |
112 | if (intr) | |
113 | goto out; | |
08a53cdc | 114 | |
51eb01e7 MS |
115 | err = -ENOTCONN; |
116 | if (!fc->connected) | |
117 | goto out; | |
118 | ||
08a53cdc | 119 | req = fuse_request_alloc(); |
9bc5ddda | 120 | err = -ENOMEM; |
ce1d5a49 | 121 | if (!req) |
9bc5ddda | 122 | goto out; |
334f485d | 123 | |
33649c91 | 124 | fuse_req_init_context(req); |
9bc5ddda | 125 | req->waiting = 1; |
334f485d | 126 | return req; |
9bc5ddda MS |
127 | |
128 | out: | |
129 | atomic_dec(&fc->num_waiting); | |
130 | return ERR_PTR(err); | |
334f485d | 131 | } |
08cbf542 | 132 | EXPORT_SYMBOL_GPL(fuse_get_req); |
334f485d | 133 | |
33649c91 MS |
134 | /* |
135 | * Return request in fuse_file->reserved_req. However that may | |
136 | * currently be in use. If that is the case, wait for it to become | |
137 | * available. | |
138 | */ | |
139 | static struct fuse_req *get_reserved_req(struct fuse_conn *fc, | |
140 | struct file *file) | |
141 | { | |
142 | struct fuse_req *req = NULL; | |
143 | struct fuse_file *ff = file->private_data; | |
144 | ||
145 | do { | |
de5e3dec | 146 | wait_event(fc->reserved_req_waitq, ff->reserved_req); |
33649c91 MS |
147 | spin_lock(&fc->lock); |
148 | if (ff->reserved_req) { | |
149 | req = ff->reserved_req; | |
150 | ff->reserved_req = NULL; | |
151 | get_file(file); | |
152 | req->stolen_file = file; | |
153 | } | |
154 | spin_unlock(&fc->lock); | |
155 | } while (!req); | |
156 | ||
157 | return req; | |
158 | } | |
159 | ||
160 | /* | |
161 | * Put stolen request back into fuse_file->reserved_req | |
162 | */ | |
163 | static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) | |
164 | { | |
165 | struct file *file = req->stolen_file; | |
166 | struct fuse_file *ff = file->private_data; | |
167 | ||
168 | spin_lock(&fc->lock); | |
169 | fuse_request_init(req); | |
170 | BUG_ON(ff->reserved_req); | |
171 | ff->reserved_req = req; | |
de5e3dec | 172 | wake_up_all(&fc->reserved_req_waitq); |
33649c91 MS |
173 | spin_unlock(&fc->lock); |
174 | fput(file); | |
175 | } | |
176 | ||
177 | /* | |
178 | * Gets a requests for a file operation, always succeeds | |
179 | * | |
180 | * This is used for sending the FLUSH request, which must get to | |
181 | * userspace, due to POSIX locks which may need to be unlocked. | |
182 | * | |
183 | * If allocation fails due to OOM, use the reserved request in | |
184 | * fuse_file. | |
185 | * | |
186 | * This is very unlikely to deadlock accidentally, since the | |
187 | * filesystem should not have it's own file open. If deadlock is | |
188 | * intentional, it can still be broken by "aborting" the filesystem. | |
189 | */ | |
190 | struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file) | |
191 | { | |
192 | struct fuse_req *req; | |
193 | ||
194 | atomic_inc(&fc->num_waiting); | |
195 | wait_event(fc->blocked_waitq, !fc->blocked); | |
196 | req = fuse_request_alloc(); | |
197 | if (!req) | |
198 | req = get_reserved_req(fc, file); | |
199 | ||
200 | fuse_req_init_context(req); | |
201 | req->waiting = 1; | |
202 | return req; | |
203 | } | |
204 | ||
334f485d | 205 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
7128ec2a MS |
206 | { |
207 | if (atomic_dec_and_test(&req->count)) { | |
9bc5ddda MS |
208 | if (req->waiting) |
209 | atomic_dec(&fc->num_waiting); | |
33649c91 MS |
210 | |
211 | if (req->stolen_file) | |
212 | put_reserved_req(fc, req); | |
213 | else | |
214 | fuse_request_free(req); | |
7128ec2a MS |
215 | } |
216 | } | |
08cbf542 | 217 | EXPORT_SYMBOL_GPL(fuse_put_request); |
7128ec2a | 218 | |
d12def1b MS |
219 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
220 | { | |
221 | unsigned nbytes = 0; | |
222 | unsigned i; | |
223 | ||
224 | for (i = 0; i < numargs; i++) | |
225 | nbytes += args[i].size; | |
226 | ||
227 | return nbytes; | |
228 | } | |
229 | ||
230 | static u64 fuse_get_unique(struct fuse_conn *fc) | |
231 | { | |
232 | fc->reqctr++; | |
233 | /* zero is special */ | |
234 | if (fc->reqctr == 0) | |
235 | fc->reqctr = 1; | |
236 | ||
237 | return fc->reqctr; | |
238 | } | |
239 | ||
240 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |
241 | { | |
d12def1b MS |
242 | req->in.h.len = sizeof(struct fuse_in_header) + |
243 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); | |
244 | list_add_tail(&req->list, &fc->pending); | |
245 | req->state = FUSE_REQ_PENDING; | |
246 | if (!req->waiting) { | |
247 | req->waiting = 1; | |
248 | atomic_inc(&fc->num_waiting); | |
249 | } | |
250 | wake_up(&fc->waitq); | |
251 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | |
252 | } | |
253 | ||
07e77dca MS |
254 | void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, |
255 | u64 nodeid, u64 nlookup) | |
256 | { | |
02c048b9 MS |
257 | forget->forget_one.nodeid = nodeid; |
258 | forget->forget_one.nlookup = nlookup; | |
07e77dca MS |
259 | |
260 | spin_lock(&fc->lock); | |
5dfcc87f MS |
261 | if (fc->connected) { |
262 | fc->forget_list_tail->next = forget; | |
263 | fc->forget_list_tail = forget; | |
264 | wake_up(&fc->waitq); | |
265 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | |
266 | } else { | |
267 | kfree(forget); | |
268 | } | |
07e77dca MS |
269 | spin_unlock(&fc->lock); |
270 | } | |
271 | ||
d12def1b MS |
272 | static void flush_bg_queue(struct fuse_conn *fc) |
273 | { | |
7a6d3c8b | 274 | while (fc->active_background < fc->max_background && |
d12def1b MS |
275 | !list_empty(&fc->bg_queue)) { |
276 | struct fuse_req *req; | |
277 | ||
278 | req = list_entry(fc->bg_queue.next, struct fuse_req, list); | |
279 | list_del(&req->list); | |
280 | fc->active_background++; | |
2d45ba38 | 281 | req->in.h.unique = fuse_get_unique(fc); |
d12def1b MS |
282 | queue_request(fc, req); |
283 | } | |
284 | } | |
285 | ||
334f485d MS |
286 | /* |
287 | * This function is called when a request is finished. Either a reply | |
f9a2842e | 288 | * has arrived or it was aborted (and not yet sent) or some error |
f43b155a | 289 | * occurred during communication with userspace, or the device file |
51eb01e7 MS |
290 | * was closed. The requester thread is woken up (if still waiting), |
291 | * the 'end' callback is called if given, else the reference to the | |
292 | * request is released | |
7128ec2a | 293 | * |
d7133114 | 294 | * Called with fc->lock, unlocks it |
334f485d MS |
295 | */ |
296 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |
b9ca67b2 | 297 | __releases(fc->lock) |
334f485d | 298 | { |
51eb01e7 MS |
299 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
300 | req->end = NULL; | |
d77a1d5b | 301 | list_del(&req->list); |
a4d27e75 | 302 | list_del(&req->intr_entry); |
83cfd493 | 303 | req->state = FUSE_REQ_FINISHED; |
51eb01e7 | 304 | if (req->background) { |
7a6d3c8b | 305 | if (fc->num_background == fc->max_background) { |
51eb01e7 MS |
306 | fc->blocked = 0; |
307 | wake_up_all(&fc->blocked_waitq); | |
308 | } | |
7a6d3c8b | 309 | if (fc->num_background == fc->congestion_threshold && |
a325f9b9 | 310 | fc->connected && fc->bdi_initialized) { |
8aa7e847 JA |
311 | clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); |
312 | clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); | |
f92b99b9 | 313 | } |
51eb01e7 | 314 | fc->num_background--; |
d12def1b MS |
315 | fc->active_background--; |
316 | flush_bg_queue(fc); | |
334f485d | 317 | } |
51eb01e7 | 318 | spin_unlock(&fc->lock); |
51eb01e7 MS |
319 | wake_up(&req->waitq); |
320 | if (end) | |
321 | end(fc, req); | |
e9bb09dd | 322 | fuse_put_request(fc, req); |
334f485d MS |
323 | } |
324 | ||
a4d27e75 MS |
325 | static void wait_answer_interruptible(struct fuse_conn *fc, |
326 | struct fuse_req *req) | |
b9ca67b2 MS |
327 | __releases(fc->lock) |
328 | __acquires(fc->lock) | |
a4d27e75 MS |
329 | { |
330 | if (signal_pending(current)) | |
331 | return; | |
332 | ||
333 | spin_unlock(&fc->lock); | |
334 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); | |
335 | spin_lock(&fc->lock); | |
336 | } | |
337 | ||
338 | static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) | |
339 | { | |
340 | list_add_tail(&req->intr_entry, &fc->interrupts); | |
341 | wake_up(&fc->waitq); | |
342 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | |
343 | } | |
344 | ||
7c352bdf | 345 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
b9ca67b2 MS |
346 | __releases(fc->lock) |
347 | __acquires(fc->lock) | |
334f485d | 348 | { |
a4d27e75 MS |
349 | if (!fc->no_interrupt) { |
350 | /* Any signal may interrupt this */ | |
351 | wait_answer_interruptible(fc, req); | |
334f485d | 352 | |
a4d27e75 MS |
353 | if (req->aborted) |
354 | goto aborted; | |
355 | if (req->state == FUSE_REQ_FINISHED) | |
356 | return; | |
357 | ||
358 | req->interrupted = 1; | |
359 | if (req->state == FUSE_REQ_SENT) | |
360 | queue_interrupt(fc, req); | |
361 | } | |
362 | ||
a131de0a | 363 | if (!req->force) { |
a4d27e75 MS |
364 | sigset_t oldset; |
365 | ||
366 | /* Only fatal signals may interrupt this */ | |
51eb01e7 | 367 | block_sigs(&oldset); |
a4d27e75 | 368 | wait_answer_interruptible(fc, req); |
51eb01e7 | 369 | restore_sigs(&oldset); |
a131de0a MS |
370 | |
371 | if (req->aborted) | |
372 | goto aborted; | |
373 | if (req->state == FUSE_REQ_FINISHED) | |
374 | return; | |
375 | ||
376 | /* Request is not yet in userspace, bail out */ | |
377 | if (req->state == FUSE_REQ_PENDING) { | |
378 | list_del(&req->list); | |
379 | __fuse_put_request(req); | |
380 | req->out.h.error = -EINTR; | |
381 | return; | |
382 | } | |
51eb01e7 | 383 | } |
334f485d | 384 | |
a131de0a MS |
385 | /* |
386 | * Either request is already in userspace, or it was forced. | |
387 | * Wait it out. | |
388 | */ | |
389 | spin_unlock(&fc->lock); | |
390 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); | |
391 | spin_lock(&fc->lock); | |
a4d27e75 | 392 | |
a131de0a MS |
393 | if (!req->aborted) |
394 | return; | |
a4d27e75 MS |
395 | |
396 | aborted: | |
a131de0a | 397 | BUG_ON(req->state != FUSE_REQ_FINISHED); |
334f485d MS |
398 | if (req->locked) { |
399 | /* This is uninterruptible sleep, because data is | |
400 | being copied to/from the buffers of req. During | |
401 | locked state, there mustn't be any filesystem | |
402 | operation (e.g. page fault), since that could lead | |
403 | to deadlock */ | |
d7133114 | 404 | spin_unlock(&fc->lock); |
334f485d | 405 | wait_event(req->waitq, !req->locked); |
d7133114 | 406 | spin_lock(&fc->lock); |
334f485d | 407 | } |
334f485d MS |
408 | } |
409 | ||
b93f858a | 410 | void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
411 | { |
412 | req->isreply = 1; | |
d7133114 | 413 | spin_lock(&fc->lock); |
1e9a4ed9 | 414 | if (!fc->connected) |
334f485d MS |
415 | req->out.h.error = -ENOTCONN; |
416 | else if (fc->conn_error) | |
417 | req->out.h.error = -ECONNREFUSED; | |
418 | else { | |
2d45ba38 | 419 | req->in.h.unique = fuse_get_unique(fc); |
334f485d MS |
420 | queue_request(fc, req); |
421 | /* acquire extra reference, since request is still needed | |
422 | after request_end() */ | |
423 | __fuse_get_request(req); | |
424 | ||
7c352bdf | 425 | request_wait_answer(fc, req); |
334f485d | 426 | } |
d7133114 | 427 | spin_unlock(&fc->lock); |
334f485d | 428 | } |
08cbf542 | 429 | EXPORT_SYMBOL_GPL(fuse_request_send); |
334f485d | 430 | |
b93f858a TH |
431 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, |
432 | struct fuse_req *req) | |
d12def1b MS |
433 | { |
434 | req->background = 1; | |
435 | fc->num_background++; | |
7a6d3c8b | 436 | if (fc->num_background == fc->max_background) |
d12def1b | 437 | fc->blocked = 1; |
7a6d3c8b | 438 | if (fc->num_background == fc->congestion_threshold && |
a325f9b9 | 439 | fc->bdi_initialized) { |
8aa7e847 JA |
440 | set_bdi_congested(&fc->bdi, BLK_RW_SYNC); |
441 | set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); | |
d12def1b MS |
442 | } |
443 | list_add_tail(&req->list, &fc->bg_queue); | |
444 | flush_bg_queue(fc); | |
445 | } | |
446 | ||
b93f858a | 447 | static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
334f485d | 448 | { |
d7133114 | 449 | spin_lock(&fc->lock); |
1e9a4ed9 | 450 | if (fc->connected) { |
b93f858a | 451 | fuse_request_send_nowait_locked(fc, req); |
d7133114 | 452 | spin_unlock(&fc->lock); |
334f485d MS |
453 | } else { |
454 | req->out.h.error = -ENOTCONN; | |
455 | request_end(fc, req); | |
456 | } | |
457 | } | |
458 | ||
b93f858a | 459 | void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
460 | { |
461 | req->isreply = 1; | |
b93f858a | 462 | fuse_request_send_nowait(fc, req); |
334f485d | 463 | } |
08cbf542 | 464 | EXPORT_SYMBOL_GPL(fuse_request_send_background); |
334f485d | 465 | |
2d45ba38 MS |
466 | static int fuse_request_send_notify_reply(struct fuse_conn *fc, |
467 | struct fuse_req *req, u64 unique) | |
468 | { | |
469 | int err = -ENODEV; | |
470 | ||
471 | req->isreply = 0; | |
472 | req->in.h.unique = unique; | |
473 | spin_lock(&fc->lock); | |
474 | if (fc->connected) { | |
475 | queue_request(fc, req); | |
476 | err = 0; | |
477 | } | |
478 | spin_unlock(&fc->lock); | |
479 | ||
480 | return err; | |
481 | } | |
482 | ||
3be5a52b MS |
483 | /* |
484 | * Called under fc->lock | |
485 | * | |
486 | * fc->connected must have been checked previously | |
487 | */ | |
b93f858a TH |
488 | void fuse_request_send_background_locked(struct fuse_conn *fc, |
489 | struct fuse_req *req) | |
3be5a52b MS |
490 | { |
491 | req->isreply = 1; | |
b93f858a | 492 | fuse_request_send_nowait_locked(fc, req); |
3be5a52b MS |
493 | } |
494 | ||
334f485d MS |
495 | /* |
496 | * Lock the request. Up to the next unlock_request() there mustn't be | |
497 | * anything that could cause a page-fault. If the request was already | |
f9a2842e | 498 | * aborted bail out. |
334f485d | 499 | */ |
d7133114 | 500 | static int lock_request(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
501 | { |
502 | int err = 0; | |
503 | if (req) { | |
d7133114 | 504 | spin_lock(&fc->lock); |
f9a2842e | 505 | if (req->aborted) |
334f485d MS |
506 | err = -ENOENT; |
507 | else | |
508 | req->locked = 1; | |
d7133114 | 509 | spin_unlock(&fc->lock); |
334f485d MS |
510 | } |
511 | return err; | |
512 | } | |
513 | ||
514 | /* | |
f9a2842e | 515 | * Unlock request. If it was aborted during being locked, the |
334f485d MS |
516 | * requester thread is currently waiting for it to be unlocked, so |
517 | * wake it up. | |
518 | */ | |
d7133114 | 519 | static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) |
334f485d MS |
520 | { |
521 | if (req) { | |
d7133114 | 522 | spin_lock(&fc->lock); |
334f485d | 523 | req->locked = 0; |
f9a2842e | 524 | if (req->aborted) |
334f485d | 525 | wake_up(&req->waitq); |
d7133114 | 526 | spin_unlock(&fc->lock); |
334f485d MS |
527 | } |
528 | } | |
529 | ||
530 | struct fuse_copy_state { | |
d7133114 | 531 | struct fuse_conn *fc; |
334f485d MS |
532 | int write; |
533 | struct fuse_req *req; | |
534 | const struct iovec *iov; | |
dd3bb14f MS |
535 | struct pipe_buffer *pipebufs; |
536 | struct pipe_buffer *currbuf; | |
537 | struct pipe_inode_info *pipe; | |
334f485d MS |
538 | unsigned long nr_segs; |
539 | unsigned long seglen; | |
540 | unsigned long addr; | |
541 | struct page *pg; | |
542 | void *mapaddr; | |
543 | void *buf; | |
544 | unsigned len; | |
ce534fb0 | 545 | unsigned move_pages:1; |
334f485d MS |
546 | }; |
547 | ||
d7133114 | 548 | static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, |
c3021629 | 549 | int write, |
d7133114 | 550 | const struct iovec *iov, unsigned long nr_segs) |
334f485d MS |
551 | { |
552 | memset(cs, 0, sizeof(*cs)); | |
d7133114 | 553 | cs->fc = fc; |
334f485d | 554 | cs->write = write; |
334f485d MS |
555 | cs->iov = iov; |
556 | cs->nr_segs = nr_segs; | |
557 | } | |
558 | ||
559 | /* Unmap and put previous page of userspace buffer */ | |
8bfc016d | 560 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
334f485d | 561 | { |
dd3bb14f MS |
562 | if (cs->currbuf) { |
563 | struct pipe_buffer *buf = cs->currbuf; | |
564 | ||
c3021629 MS |
565 | if (!cs->write) { |
566 | buf->ops->unmap(cs->pipe, buf, cs->mapaddr); | |
567 | } else { | |
7909b1c6 | 568 | kunmap(buf->page); |
c3021629 MS |
569 | buf->len = PAGE_SIZE - cs->len; |
570 | } | |
dd3bb14f MS |
571 | cs->currbuf = NULL; |
572 | cs->mapaddr = NULL; | |
573 | } else if (cs->mapaddr) { | |
7909b1c6 | 574 | kunmap(cs->pg); |
334f485d MS |
575 | if (cs->write) { |
576 | flush_dcache_page(cs->pg); | |
577 | set_page_dirty_lock(cs->pg); | |
578 | } | |
579 | put_page(cs->pg); | |
580 | cs->mapaddr = NULL; | |
581 | } | |
582 | } | |
583 | ||
584 | /* | |
585 | * Get another pagefull of userspace buffer, and map it to kernel | |
586 | * address space, and lock request | |
587 | */ | |
588 | static int fuse_copy_fill(struct fuse_copy_state *cs) | |
589 | { | |
590 | unsigned long offset; | |
591 | int err; | |
592 | ||
d7133114 | 593 | unlock_request(cs->fc, cs->req); |
334f485d | 594 | fuse_copy_finish(cs); |
dd3bb14f MS |
595 | if (cs->pipebufs) { |
596 | struct pipe_buffer *buf = cs->pipebufs; | |
597 | ||
c3021629 MS |
598 | if (!cs->write) { |
599 | err = buf->ops->confirm(cs->pipe, buf); | |
600 | if (err) | |
601 | return err; | |
602 | ||
603 | BUG_ON(!cs->nr_segs); | |
604 | cs->currbuf = buf; | |
7909b1c6 | 605 | cs->mapaddr = buf->ops->map(cs->pipe, buf, 0); |
c3021629 MS |
606 | cs->len = buf->len; |
607 | cs->buf = cs->mapaddr + buf->offset; | |
608 | cs->pipebufs++; | |
609 | cs->nr_segs--; | |
610 | } else { | |
611 | struct page *page; | |
dd3bb14f | 612 | |
c3021629 MS |
613 | if (cs->nr_segs == cs->pipe->buffers) |
614 | return -EIO; | |
615 | ||
616 | page = alloc_page(GFP_HIGHUSER); | |
617 | if (!page) | |
618 | return -ENOMEM; | |
619 | ||
620 | buf->page = page; | |
621 | buf->offset = 0; | |
622 | buf->len = 0; | |
623 | ||
624 | cs->currbuf = buf; | |
7909b1c6 | 625 | cs->mapaddr = kmap(page); |
c3021629 MS |
626 | cs->buf = cs->mapaddr; |
627 | cs->len = PAGE_SIZE; | |
628 | cs->pipebufs++; | |
629 | cs->nr_segs++; | |
630 | } | |
dd3bb14f MS |
631 | } else { |
632 | if (!cs->seglen) { | |
633 | BUG_ON(!cs->nr_segs); | |
634 | cs->seglen = cs->iov[0].iov_len; | |
635 | cs->addr = (unsigned long) cs->iov[0].iov_base; | |
636 | cs->iov++; | |
637 | cs->nr_segs--; | |
638 | } | |
639 | err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); | |
640 | if (err < 0) | |
641 | return err; | |
642 | BUG_ON(err != 1); | |
643 | offset = cs->addr % PAGE_SIZE; | |
7909b1c6 | 644 | cs->mapaddr = kmap(cs->pg); |
dd3bb14f MS |
645 | cs->buf = cs->mapaddr + offset; |
646 | cs->len = min(PAGE_SIZE - offset, cs->seglen); | |
647 | cs->seglen -= cs->len; | |
648 | cs->addr += cs->len; | |
334f485d | 649 | } |
334f485d | 650 | |
d7133114 | 651 | return lock_request(cs->fc, cs->req); |
334f485d MS |
652 | } |
653 | ||
654 | /* Do as much copy to/from userspace buffer as we can */ | |
8bfc016d | 655 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
334f485d MS |
656 | { |
657 | unsigned ncpy = min(*size, cs->len); | |
658 | if (val) { | |
659 | if (cs->write) | |
660 | memcpy(cs->buf, *val, ncpy); | |
661 | else | |
662 | memcpy(*val, cs->buf, ncpy); | |
663 | *val += ncpy; | |
664 | } | |
665 | *size -= ncpy; | |
666 | cs->len -= ncpy; | |
667 | cs->buf += ncpy; | |
668 | return ncpy; | |
669 | } | |
670 | ||
ce534fb0 MS |
671 | static int fuse_check_page(struct page *page) |
672 | { | |
673 | if (page_mapcount(page) || | |
674 | page->mapping != NULL || | |
675 | page_count(page) != 1 || | |
676 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP & | |
677 | ~(1 << PG_locked | | |
678 | 1 << PG_referenced | | |
679 | 1 << PG_uptodate | | |
680 | 1 << PG_lru | | |
681 | 1 << PG_active | | |
682 | 1 << PG_reclaim))) { | |
683 | printk(KERN_WARNING "fuse: trying to steal weird page\n"); | |
684 | printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); | |
685 | return 1; | |
686 | } | |
687 | return 0; | |
688 | } | |
689 | ||
690 | static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |
691 | { | |
692 | int err; | |
693 | struct page *oldpage = *pagep; | |
694 | struct page *newpage; | |
695 | struct pipe_buffer *buf = cs->pipebufs; | |
696 | struct address_space *mapping; | |
697 | pgoff_t index; | |
698 | ||
699 | unlock_request(cs->fc, cs->req); | |
700 | fuse_copy_finish(cs); | |
701 | ||
702 | err = buf->ops->confirm(cs->pipe, buf); | |
703 | if (err) | |
704 | return err; | |
705 | ||
706 | BUG_ON(!cs->nr_segs); | |
707 | cs->currbuf = buf; | |
708 | cs->len = buf->len; | |
709 | cs->pipebufs++; | |
710 | cs->nr_segs--; | |
711 | ||
712 | if (cs->len != PAGE_SIZE) | |
713 | goto out_fallback; | |
714 | ||
715 | if (buf->ops->steal(cs->pipe, buf) != 0) | |
716 | goto out_fallback; | |
717 | ||
718 | newpage = buf->page; | |
719 | ||
720 | if (WARN_ON(!PageUptodate(newpage))) | |
721 | return -EIO; | |
722 | ||
723 | ClearPageMappedToDisk(newpage); | |
724 | ||
725 | if (fuse_check_page(newpage) != 0) | |
726 | goto out_fallback_unlock; | |
727 | ||
728 | mapping = oldpage->mapping; | |
729 | index = oldpage->index; | |
730 | ||
731 | /* | |
732 | * This is a new and locked page, it shouldn't be mapped or | |
733 | * have any special flags on it | |
734 | */ | |
735 | if (WARN_ON(page_mapped(oldpage))) | |
736 | goto out_fallback_unlock; | |
737 | if (WARN_ON(page_has_private(oldpage))) | |
738 | goto out_fallback_unlock; | |
739 | if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) | |
740 | goto out_fallback_unlock; | |
741 | if (WARN_ON(PageMlocked(oldpage))) | |
742 | goto out_fallback_unlock; | |
743 | ||
ef6a3c63 | 744 | err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); |
ce534fb0 | 745 | if (err) { |
ef6a3c63 MS |
746 | unlock_page(newpage); |
747 | return err; | |
ce534fb0 | 748 | } |
ef6a3c63 | 749 | |
ce534fb0 MS |
750 | page_cache_get(newpage); |
751 | ||
752 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) | |
753 | lru_cache_add_file(newpage); | |
754 | ||
755 | err = 0; | |
756 | spin_lock(&cs->fc->lock); | |
757 | if (cs->req->aborted) | |
758 | err = -ENOENT; | |
759 | else | |
760 | *pagep = newpage; | |
761 | spin_unlock(&cs->fc->lock); | |
762 | ||
763 | if (err) { | |
764 | unlock_page(newpage); | |
765 | page_cache_release(newpage); | |
766 | return err; | |
767 | } | |
768 | ||
769 | unlock_page(oldpage); | |
770 | page_cache_release(oldpage); | |
771 | cs->len = 0; | |
772 | ||
773 | return 0; | |
774 | ||
775 | out_fallback_unlock: | |
776 | unlock_page(newpage); | |
777 | out_fallback: | |
778 | cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); | |
779 | cs->buf = cs->mapaddr + buf->offset; | |
780 | ||
781 | err = lock_request(cs->fc, cs->req); | |
782 | if (err) | |
783 | return err; | |
784 | ||
785 | return 1; | |
786 | } | |
787 | ||
c3021629 MS |
788 | static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, |
789 | unsigned offset, unsigned count) | |
790 | { | |
791 | struct pipe_buffer *buf; | |
792 | ||
793 | if (cs->nr_segs == cs->pipe->buffers) | |
794 | return -EIO; | |
795 | ||
796 | unlock_request(cs->fc, cs->req); | |
797 | fuse_copy_finish(cs); | |
798 | ||
799 | buf = cs->pipebufs; | |
800 | page_cache_get(page); | |
801 | buf->page = page; | |
802 | buf->offset = offset; | |
803 | buf->len = count; | |
804 | ||
805 | cs->pipebufs++; | |
806 | cs->nr_segs++; | |
807 | cs->len = 0; | |
808 | ||
809 | return 0; | |
810 | } | |
811 | ||
334f485d MS |
812 | /* |
813 | * Copy a page in the request to/from the userspace buffer. Must be | |
814 | * done atomically | |
815 | */ | |
ce534fb0 | 816 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, |
8bfc016d | 817 | unsigned offset, unsigned count, int zeroing) |
334f485d | 818 | { |
ce534fb0 MS |
819 | int err; |
820 | struct page *page = *pagep; | |
821 | ||
b6777c40 MS |
822 | if (page && zeroing && count < PAGE_SIZE) |
823 | clear_highpage(page); | |
824 | ||
334f485d | 825 | while (count) { |
c3021629 MS |
826 | if (cs->write && cs->pipebufs && page) { |
827 | return fuse_ref_page(cs, page, offset, count); | |
828 | } else if (!cs->len) { | |
ce534fb0 MS |
829 | if (cs->move_pages && page && |
830 | offset == 0 && count == PAGE_SIZE) { | |
831 | err = fuse_try_move_page(cs, pagep); | |
832 | if (err <= 0) | |
833 | return err; | |
834 | } else { | |
835 | err = fuse_copy_fill(cs); | |
836 | if (err) | |
837 | return err; | |
838 | } | |
1729a16c | 839 | } |
334f485d | 840 | if (page) { |
b6777c40 | 841 | void *mapaddr = kmap_atomic(page, KM_USER0); |
334f485d MS |
842 | void *buf = mapaddr + offset; |
843 | offset += fuse_copy_do(cs, &buf, &count); | |
b6777c40 | 844 | kunmap_atomic(mapaddr, KM_USER0); |
334f485d MS |
845 | } else |
846 | offset += fuse_copy_do(cs, NULL, &count); | |
847 | } | |
848 | if (page && !cs->write) | |
849 | flush_dcache_page(page); | |
850 | return 0; | |
851 | } | |
852 | ||
853 | /* Copy pages in the request to/from userspace buffer */ | |
854 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, | |
855 | int zeroing) | |
856 | { | |
857 | unsigned i; | |
858 | struct fuse_req *req = cs->req; | |
859 | unsigned offset = req->page_offset; | |
860 | unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); | |
861 | ||
862 | for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { | |
ce534fb0 MS |
863 | int err; |
864 | ||
865 | err = fuse_copy_page(cs, &req->pages[i], offset, count, | |
866 | zeroing); | |
334f485d MS |
867 | if (err) |
868 | return err; | |
869 | ||
870 | nbytes -= count; | |
871 | count = min(nbytes, (unsigned) PAGE_SIZE); | |
872 | offset = 0; | |
873 | } | |
874 | return 0; | |
875 | } | |
876 | ||
877 | /* Copy a single argument in the request to/from userspace buffer */ | |
878 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) | |
879 | { | |
880 | while (size) { | |
1729a16c MS |
881 | if (!cs->len) { |
882 | int err = fuse_copy_fill(cs); | |
883 | if (err) | |
884 | return err; | |
885 | } | |
334f485d MS |
886 | fuse_copy_do(cs, &val, &size); |
887 | } | |
888 | return 0; | |
889 | } | |
890 | ||
891 | /* Copy request arguments to/from userspace buffer */ | |
892 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, | |
893 | unsigned argpages, struct fuse_arg *args, | |
894 | int zeroing) | |
895 | { | |
896 | int err = 0; | |
897 | unsigned i; | |
898 | ||
899 | for (i = 0; !err && i < numargs; i++) { | |
900 | struct fuse_arg *arg = &args[i]; | |
901 | if (i == numargs - 1 && argpages) | |
902 | err = fuse_copy_pages(cs, arg->size, zeroing); | |
903 | else | |
904 | err = fuse_copy_one(cs, arg->value, arg->size); | |
905 | } | |
906 | return err; | |
907 | } | |
908 | ||
07e77dca MS |
909 | static int forget_pending(struct fuse_conn *fc) |
910 | { | |
911 | return fc->forget_list_head.next != NULL; | |
912 | } | |
913 | ||
a4d27e75 MS |
914 | static int request_pending(struct fuse_conn *fc) |
915 | { | |
07e77dca MS |
916 | return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) || |
917 | forget_pending(fc); | |
a4d27e75 MS |
918 | } |
919 | ||
334f485d MS |
920 | /* Wait until a request is available on the pending list */ |
921 | static void request_wait(struct fuse_conn *fc) | |
b9ca67b2 MS |
922 | __releases(fc->lock) |
923 | __acquires(fc->lock) | |
334f485d MS |
924 | { |
925 | DECLARE_WAITQUEUE(wait, current); | |
926 | ||
927 | add_wait_queue_exclusive(&fc->waitq, &wait); | |
a4d27e75 | 928 | while (fc->connected && !request_pending(fc)) { |
334f485d MS |
929 | set_current_state(TASK_INTERRUPTIBLE); |
930 | if (signal_pending(current)) | |
931 | break; | |
932 | ||
d7133114 | 933 | spin_unlock(&fc->lock); |
334f485d | 934 | schedule(); |
d7133114 | 935 | spin_lock(&fc->lock); |
334f485d MS |
936 | } |
937 | set_current_state(TASK_RUNNING); | |
938 | remove_wait_queue(&fc->waitq, &wait); | |
939 | } | |
940 | ||
a4d27e75 MS |
941 | /* |
942 | * Transfer an interrupt request to userspace | |
943 | * | |
944 | * Unlike other requests this is assembled on demand, without a need | |
945 | * to allocate a separate fuse_req structure. | |
946 | * | |
947 | * Called with fc->lock held, releases it | |
948 | */ | |
c3021629 MS |
949 | static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, |
950 | size_t nbytes, struct fuse_req *req) | |
b9ca67b2 | 951 | __releases(fc->lock) |
a4d27e75 | 952 | { |
a4d27e75 MS |
953 | struct fuse_in_header ih; |
954 | struct fuse_interrupt_in arg; | |
955 | unsigned reqsize = sizeof(ih) + sizeof(arg); | |
956 | int err; | |
957 | ||
958 | list_del_init(&req->intr_entry); | |
959 | req->intr_unique = fuse_get_unique(fc); | |
960 | memset(&ih, 0, sizeof(ih)); | |
961 | memset(&arg, 0, sizeof(arg)); | |
962 | ih.len = reqsize; | |
963 | ih.opcode = FUSE_INTERRUPT; | |
964 | ih.unique = req->intr_unique; | |
965 | arg.unique = req->in.h.unique; | |
966 | ||
967 | spin_unlock(&fc->lock); | |
c3021629 | 968 | if (nbytes < reqsize) |
a4d27e75 MS |
969 | return -EINVAL; |
970 | ||
c3021629 | 971 | err = fuse_copy_one(cs, &ih, sizeof(ih)); |
a4d27e75 | 972 | if (!err) |
c3021629 MS |
973 | err = fuse_copy_one(cs, &arg, sizeof(arg)); |
974 | fuse_copy_finish(cs); | |
a4d27e75 MS |
975 | |
976 | return err ? err : reqsize; | |
977 | } | |
978 | ||
02c048b9 MS |
979 | static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, |
980 | unsigned max, | |
981 | unsigned *countp) | |
07e77dca | 982 | { |
02c048b9 MS |
983 | struct fuse_forget_link *head = fc->forget_list_head.next; |
984 | struct fuse_forget_link **newhead = &head; | |
985 | unsigned count; | |
07e77dca | 986 | |
02c048b9 MS |
987 | for (count = 0; *newhead != NULL && count < max; count++) |
988 | newhead = &(*newhead)->next; | |
989 | ||
990 | fc->forget_list_head.next = *newhead; | |
991 | *newhead = NULL; | |
07e77dca MS |
992 | if (fc->forget_list_head.next == NULL) |
993 | fc->forget_list_tail = &fc->forget_list_head; | |
994 | ||
02c048b9 MS |
995 | if (countp != NULL) |
996 | *countp = count; | |
997 | ||
998 | return head; | |
07e77dca MS |
999 | } |
1000 | ||
1001 | static int fuse_read_single_forget(struct fuse_conn *fc, | |
1002 | struct fuse_copy_state *cs, | |
1003 | size_t nbytes) | |
1004 | __releases(fc->lock) | |
1005 | { | |
1006 | int err; | |
02c048b9 | 1007 | struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL); |
07e77dca | 1008 | struct fuse_forget_in arg = { |
02c048b9 | 1009 | .nlookup = forget->forget_one.nlookup, |
07e77dca MS |
1010 | }; |
1011 | struct fuse_in_header ih = { | |
1012 | .opcode = FUSE_FORGET, | |
02c048b9 | 1013 | .nodeid = forget->forget_one.nodeid, |
07e77dca MS |
1014 | .unique = fuse_get_unique(fc), |
1015 | .len = sizeof(ih) + sizeof(arg), | |
1016 | }; | |
1017 | ||
1018 | spin_unlock(&fc->lock); | |
1019 | kfree(forget); | |
1020 | if (nbytes < ih.len) | |
1021 | return -EINVAL; | |
1022 | ||
1023 | err = fuse_copy_one(cs, &ih, sizeof(ih)); | |
1024 | if (!err) | |
1025 | err = fuse_copy_one(cs, &arg, sizeof(arg)); | |
1026 | fuse_copy_finish(cs); | |
1027 | ||
1028 | if (err) | |
1029 | return err; | |
1030 | ||
1031 | return ih.len; | |
1032 | } | |
1033 | ||
02c048b9 MS |
1034 | static int fuse_read_batch_forget(struct fuse_conn *fc, |
1035 | struct fuse_copy_state *cs, size_t nbytes) | |
1036 | __releases(fc->lock) | |
1037 | { | |
1038 | int err; | |
1039 | unsigned max_forgets; | |
1040 | unsigned count; | |
1041 | struct fuse_forget_link *head; | |
1042 | struct fuse_batch_forget_in arg = { .count = 0 }; | |
1043 | struct fuse_in_header ih = { | |
1044 | .opcode = FUSE_BATCH_FORGET, | |
1045 | .unique = fuse_get_unique(fc), | |
1046 | .len = sizeof(ih) + sizeof(arg), | |
1047 | }; | |
1048 | ||
1049 | if (nbytes < ih.len) { | |
1050 | spin_unlock(&fc->lock); | |
1051 | return -EINVAL; | |
1052 | } | |
1053 | ||
1054 | max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); | |
1055 | head = dequeue_forget(fc, max_forgets, &count); | |
1056 | spin_unlock(&fc->lock); | |
1057 | ||
1058 | arg.count = count; | |
1059 | ih.len += count * sizeof(struct fuse_forget_one); | |
1060 | err = fuse_copy_one(cs, &ih, sizeof(ih)); | |
1061 | if (!err) | |
1062 | err = fuse_copy_one(cs, &arg, sizeof(arg)); | |
1063 | ||
1064 | while (head) { | |
1065 | struct fuse_forget_link *forget = head; | |
1066 | ||
1067 | if (!err) { | |
1068 | err = fuse_copy_one(cs, &forget->forget_one, | |
1069 | sizeof(forget->forget_one)); | |
1070 | } | |
1071 | head = forget->next; | |
1072 | kfree(forget); | |
1073 | } | |
1074 | ||
1075 | fuse_copy_finish(cs); | |
1076 | ||
1077 | if (err) | |
1078 | return err; | |
1079 | ||
1080 | return ih.len; | |
1081 | } | |
1082 | ||
1083 | static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, | |
1084 | size_t nbytes) | |
1085 | __releases(fc->lock) | |
1086 | { | |
1087 | if (fc->minor < 16 || fc->forget_list_head.next->next == NULL) | |
1088 | return fuse_read_single_forget(fc, cs, nbytes); | |
1089 | else | |
1090 | return fuse_read_batch_forget(fc, cs, nbytes); | |
1091 | } | |
1092 | ||
334f485d MS |
1093 | /* |
1094 | * Read a single request into the userspace filesystem's buffer. This | |
1095 | * function waits until a request is available, then removes it from | |
1096 | * the pending list and copies request data to userspace buffer. If | |
f9a2842e MS |
1097 | * no reply is needed (FORGET) or request has been aborted or there |
1098 | * was an error during the copying then it's finished by calling | |
334f485d MS |
1099 | * request_end(). Otherwise add it to the processing list, and set |
1100 | * the 'sent' flag. | |
1101 | */ | |
c3021629 MS |
1102 | static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, |
1103 | struct fuse_copy_state *cs, size_t nbytes) | |
334f485d MS |
1104 | { |
1105 | int err; | |
334f485d MS |
1106 | struct fuse_req *req; |
1107 | struct fuse_in *in; | |
334f485d MS |
1108 | unsigned reqsize; |
1109 | ||
1d3d752b | 1110 | restart: |
d7133114 | 1111 | spin_lock(&fc->lock); |
e5ac1d1e JD |
1112 | err = -EAGAIN; |
1113 | if ((file->f_flags & O_NONBLOCK) && fc->connected && | |
a4d27e75 | 1114 | !request_pending(fc)) |
e5ac1d1e JD |
1115 | goto err_unlock; |
1116 | ||
334f485d MS |
1117 | request_wait(fc); |
1118 | err = -ENODEV; | |
9ba7cbba | 1119 | if (!fc->connected) |
334f485d MS |
1120 | goto err_unlock; |
1121 | err = -ERESTARTSYS; | |
a4d27e75 | 1122 | if (!request_pending(fc)) |
334f485d MS |
1123 | goto err_unlock; |
1124 | ||
a4d27e75 MS |
1125 | if (!list_empty(&fc->interrupts)) { |
1126 | req = list_entry(fc->interrupts.next, struct fuse_req, | |
1127 | intr_entry); | |
c3021629 | 1128 | return fuse_read_interrupt(fc, cs, nbytes, req); |
a4d27e75 MS |
1129 | } |
1130 | ||
07e77dca MS |
1131 | if (forget_pending(fc)) { |
1132 | if (list_empty(&fc->pending) || fc->forget_batch-- > 0) | |
02c048b9 | 1133 | return fuse_read_forget(fc, cs, nbytes); |
07e77dca MS |
1134 | |
1135 | if (fc->forget_batch <= -8) | |
1136 | fc->forget_batch = 16; | |
1137 | } | |
1138 | ||
334f485d | 1139 | req = list_entry(fc->pending.next, struct fuse_req, list); |
83cfd493 | 1140 | req->state = FUSE_REQ_READING; |
d77a1d5b | 1141 | list_move(&req->list, &fc->io); |
334f485d MS |
1142 | |
1143 | in = &req->in; | |
1d3d752b MS |
1144 | reqsize = in->h.len; |
1145 | /* If request is too large, reply with an error and restart the read */ | |
c3021629 | 1146 | if (nbytes < reqsize) { |
1d3d752b MS |
1147 | req->out.h.error = -EIO; |
1148 | /* SETXATTR is special, since it may contain too large data */ | |
1149 | if (in->h.opcode == FUSE_SETXATTR) | |
1150 | req->out.h.error = -E2BIG; | |
1151 | request_end(fc, req); | |
1152 | goto restart; | |
334f485d | 1153 | } |
d7133114 | 1154 | spin_unlock(&fc->lock); |
c3021629 MS |
1155 | cs->req = req; |
1156 | err = fuse_copy_one(cs, &in->h, sizeof(in->h)); | |
1d3d752b | 1157 | if (!err) |
c3021629 | 1158 | err = fuse_copy_args(cs, in->numargs, in->argpages, |
1d3d752b | 1159 | (struct fuse_arg *) in->args, 0); |
c3021629 | 1160 | fuse_copy_finish(cs); |
d7133114 | 1161 | spin_lock(&fc->lock); |
334f485d | 1162 | req->locked = 0; |
c9c9d7df MS |
1163 | if (req->aborted) { |
1164 | request_end(fc, req); | |
1165 | return -ENODEV; | |
1166 | } | |
334f485d | 1167 | if (err) { |
c9c9d7df | 1168 | req->out.h.error = -EIO; |
334f485d MS |
1169 | request_end(fc, req); |
1170 | return err; | |
1171 | } | |
1172 | if (!req->isreply) | |
1173 | request_end(fc, req); | |
1174 | else { | |
83cfd493 | 1175 | req->state = FUSE_REQ_SENT; |
d77a1d5b | 1176 | list_move_tail(&req->list, &fc->processing); |
a4d27e75 MS |
1177 | if (req->interrupted) |
1178 | queue_interrupt(fc, req); | |
d7133114 | 1179 | spin_unlock(&fc->lock); |
334f485d MS |
1180 | } |
1181 | return reqsize; | |
1182 | ||
1183 | err_unlock: | |
d7133114 | 1184 | spin_unlock(&fc->lock); |
334f485d MS |
1185 | return err; |
1186 | } | |
1187 | ||
c3021629 MS |
1188 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, |
1189 | unsigned long nr_segs, loff_t pos) | |
1190 | { | |
1191 | struct fuse_copy_state cs; | |
1192 | struct file *file = iocb->ki_filp; | |
1193 | struct fuse_conn *fc = fuse_get_conn(file); | |
1194 | if (!fc) | |
1195 | return -EPERM; | |
1196 | ||
1197 | fuse_copy_init(&cs, fc, 1, iov, nr_segs); | |
1198 | ||
1199 | return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs)); | |
1200 | } | |
1201 | ||
1202 | static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe, | |
1203 | struct pipe_buffer *buf) | |
1204 | { | |
1205 | return 1; | |
1206 | } | |
1207 | ||
1208 | static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = { | |
1209 | .can_merge = 0, | |
1210 | .map = generic_pipe_buf_map, | |
1211 | .unmap = generic_pipe_buf_unmap, | |
1212 | .confirm = generic_pipe_buf_confirm, | |
1213 | .release = generic_pipe_buf_release, | |
1214 | .steal = fuse_dev_pipe_buf_steal, | |
1215 | .get = generic_pipe_buf_get, | |
1216 | }; | |
1217 | ||
1218 | static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, | |
1219 | struct pipe_inode_info *pipe, | |
1220 | size_t len, unsigned int flags) | |
1221 | { | |
1222 | int ret; | |
1223 | int page_nr = 0; | |
1224 | int do_wakeup = 0; | |
1225 | struct pipe_buffer *bufs; | |
1226 | struct fuse_copy_state cs; | |
1227 | struct fuse_conn *fc = fuse_get_conn(in); | |
1228 | if (!fc) | |
1229 | return -EPERM; | |
1230 | ||
07e77dca | 1231 | bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); |
c3021629 MS |
1232 | if (!bufs) |
1233 | return -ENOMEM; | |
1234 | ||
1235 | fuse_copy_init(&cs, fc, 1, NULL, 0); | |
1236 | cs.pipebufs = bufs; | |
1237 | cs.pipe = pipe; | |
1238 | ret = fuse_dev_do_read(fc, in, &cs, len); | |
1239 | if (ret < 0) | |
1240 | goto out; | |
1241 | ||
1242 | ret = 0; | |
1243 | pipe_lock(pipe); | |
1244 | ||
1245 | if (!pipe->readers) { | |
1246 | send_sig(SIGPIPE, current, 0); | |
1247 | if (!ret) | |
1248 | ret = -EPIPE; | |
1249 | goto out_unlock; | |
1250 | } | |
1251 | ||
1252 | if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { | |
1253 | ret = -EIO; | |
1254 | goto out_unlock; | |
1255 | } | |
1256 | ||
1257 | while (page_nr < cs.nr_segs) { | |
1258 | int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); | |
1259 | struct pipe_buffer *buf = pipe->bufs + newbuf; | |
1260 | ||
1261 | buf->page = bufs[page_nr].page; | |
1262 | buf->offset = bufs[page_nr].offset; | |
1263 | buf->len = bufs[page_nr].len; | |
1264 | buf->ops = &fuse_dev_pipe_buf_ops; | |
1265 | ||
1266 | pipe->nrbufs++; | |
1267 | page_nr++; | |
1268 | ret += buf->len; | |
1269 | ||
1270 | if (pipe->inode) | |
1271 | do_wakeup = 1; | |
1272 | } | |
1273 | ||
1274 | out_unlock: | |
1275 | pipe_unlock(pipe); | |
1276 | ||
1277 | if (do_wakeup) { | |
1278 | smp_mb(); | |
1279 | if (waitqueue_active(&pipe->wait)) | |
1280 | wake_up_interruptible(&pipe->wait); | |
1281 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
1282 | } | |
1283 | ||
1284 | out: | |
1285 | for (; page_nr < cs.nr_segs; page_nr++) | |
1286 | page_cache_release(bufs[page_nr].page); | |
1287 | ||
1288 | kfree(bufs); | |
1289 | return ret; | |
1290 | } | |
1291 | ||
95668a69 TH |
1292 | static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, |
1293 | struct fuse_copy_state *cs) | |
1294 | { | |
1295 | struct fuse_notify_poll_wakeup_out outarg; | |
f6d47a17 | 1296 | int err = -EINVAL; |
95668a69 TH |
1297 | |
1298 | if (size != sizeof(outarg)) | |
f6d47a17 | 1299 | goto err; |
95668a69 TH |
1300 | |
1301 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
1302 | if (err) | |
f6d47a17 | 1303 | goto err; |
95668a69 | 1304 | |
f6d47a17 | 1305 | fuse_copy_finish(cs); |
95668a69 | 1306 | return fuse_notify_poll_wakeup(fc, &outarg); |
f6d47a17 MS |
1307 | |
1308 | err: | |
1309 | fuse_copy_finish(cs); | |
1310 | return err; | |
95668a69 TH |
1311 | } |
1312 | ||
3b463ae0 JM |
1313 | static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, |
1314 | struct fuse_copy_state *cs) | |
1315 | { | |
1316 | struct fuse_notify_inval_inode_out outarg; | |
1317 | int err = -EINVAL; | |
1318 | ||
1319 | if (size != sizeof(outarg)) | |
1320 | goto err; | |
1321 | ||
1322 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
1323 | if (err) | |
1324 | goto err; | |
1325 | fuse_copy_finish(cs); | |
1326 | ||
1327 | down_read(&fc->killsb); | |
1328 | err = -ENOENT; | |
b21dda43 MS |
1329 | if (fc->sb) { |
1330 | err = fuse_reverse_inval_inode(fc->sb, outarg.ino, | |
1331 | outarg.off, outarg.len); | |
1332 | } | |
3b463ae0 JM |
1333 | up_read(&fc->killsb); |
1334 | return err; | |
1335 | ||
1336 | err: | |
1337 | fuse_copy_finish(cs); | |
1338 | return err; | |
1339 | } | |
1340 | ||
1341 | static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, | |
1342 | struct fuse_copy_state *cs) | |
1343 | { | |
1344 | struct fuse_notify_inval_entry_out outarg; | |
b2d82ee3 FW |
1345 | int err = -ENOMEM; |
1346 | char *buf; | |
3b463ae0 JM |
1347 | struct qstr name; |
1348 | ||
b2d82ee3 FW |
1349 | buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); |
1350 | if (!buf) | |
1351 | goto err; | |
1352 | ||
1353 | err = -EINVAL; | |
3b463ae0 JM |
1354 | if (size < sizeof(outarg)) |
1355 | goto err; | |
1356 | ||
1357 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
1358 | if (err) | |
1359 | goto err; | |
1360 | ||
1361 | err = -ENAMETOOLONG; | |
1362 | if (outarg.namelen > FUSE_NAME_MAX) | |
1363 | goto err; | |
1364 | ||
c2183d1e MS |
1365 | err = -EINVAL; |
1366 | if (size != sizeof(outarg) + outarg.namelen + 1) | |
1367 | goto err; | |
1368 | ||
3b463ae0 JM |
1369 | name.name = buf; |
1370 | name.len = outarg.namelen; | |
1371 | err = fuse_copy_one(cs, buf, outarg.namelen + 1); | |
1372 | if (err) | |
1373 | goto err; | |
1374 | fuse_copy_finish(cs); | |
1375 | buf[outarg.namelen] = 0; | |
1376 | name.hash = full_name_hash(name.name, name.len); | |
1377 | ||
1378 | down_read(&fc->killsb); | |
1379 | err = -ENOENT; | |
b21dda43 MS |
1380 | if (fc->sb) |
1381 | err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); | |
3b463ae0 | 1382 | up_read(&fc->killsb); |
b2d82ee3 | 1383 | kfree(buf); |
3b463ae0 JM |
1384 | return err; |
1385 | ||
1386 | err: | |
b2d82ee3 | 1387 | kfree(buf); |
3b463ae0 JM |
1388 | fuse_copy_finish(cs); |
1389 | return err; | |
1390 | } | |
1391 | ||
a1d75f25 MS |
1392 | static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, |
1393 | struct fuse_copy_state *cs) | |
1394 | { | |
1395 | struct fuse_notify_store_out outarg; | |
1396 | struct inode *inode; | |
1397 | struct address_space *mapping; | |
1398 | u64 nodeid; | |
1399 | int err; | |
1400 | pgoff_t index; | |
1401 | unsigned int offset; | |
1402 | unsigned int num; | |
1403 | loff_t file_size; | |
1404 | loff_t end; | |
1405 | ||
1406 | err = -EINVAL; | |
1407 | if (size < sizeof(outarg)) | |
1408 | goto out_finish; | |
1409 | ||
1410 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
1411 | if (err) | |
1412 | goto out_finish; | |
1413 | ||
1414 | err = -EINVAL; | |
1415 | if (size - sizeof(outarg) != outarg.size) | |
1416 | goto out_finish; | |
1417 | ||
1418 | nodeid = outarg.nodeid; | |
1419 | ||
1420 | down_read(&fc->killsb); | |
1421 | ||
1422 | err = -ENOENT; | |
1423 | if (!fc->sb) | |
1424 | goto out_up_killsb; | |
1425 | ||
1426 | inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); | |
1427 | if (!inode) | |
1428 | goto out_up_killsb; | |
1429 | ||
1430 | mapping = inode->i_mapping; | |
1431 | index = outarg.offset >> PAGE_CACHE_SHIFT; | |
1432 | offset = outarg.offset & ~PAGE_CACHE_MASK; | |
1433 | file_size = i_size_read(inode); | |
1434 | end = outarg.offset + outarg.size; | |
1435 | if (end > file_size) { | |
1436 | file_size = end; | |
1437 | fuse_write_update_size(inode, file_size); | |
1438 | } | |
1439 | ||
1440 | num = outarg.size; | |
1441 | while (num) { | |
1442 | struct page *page; | |
1443 | unsigned int this_num; | |
1444 | ||
1445 | err = -ENOMEM; | |
1446 | page = find_or_create_page(mapping, index, | |
1447 | mapping_gfp_mask(mapping)); | |
1448 | if (!page) | |
1449 | goto out_iput; | |
1450 | ||
1451 | this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); | |
1452 | err = fuse_copy_page(cs, &page, offset, this_num, 0); | |
1453 | if (!err && offset == 0 && (num != 0 || file_size == end)) | |
1454 | SetPageUptodate(page); | |
1455 | unlock_page(page); | |
1456 | page_cache_release(page); | |
1457 | ||
1458 | if (err) | |
1459 | goto out_iput; | |
1460 | ||
1461 | num -= this_num; | |
1462 | offset = 0; | |
1463 | index++; | |
1464 | } | |
1465 | ||
1466 | err = 0; | |
1467 | ||
1468 | out_iput: | |
1469 | iput(inode); | |
1470 | out_up_killsb: | |
1471 | up_read(&fc->killsb); | |
1472 | out_finish: | |
1473 | fuse_copy_finish(cs); | |
1474 | return err; | |
1475 | } | |
1476 | ||
2d45ba38 MS |
1477 | static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) |
1478 | { | |
0be8557b | 1479 | release_pages(req->pages, req->num_pages, 0); |
2d45ba38 MS |
1480 | } |
1481 | ||
1482 | static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | |
1483 | struct fuse_notify_retrieve_out *outarg) | |
1484 | { | |
1485 | int err; | |
1486 | struct address_space *mapping = inode->i_mapping; | |
1487 | struct fuse_req *req; | |
1488 | pgoff_t index; | |
1489 | loff_t file_size; | |
1490 | unsigned int num; | |
1491 | unsigned int offset; | |
0157443c | 1492 | size_t total_len = 0; |
2d45ba38 MS |
1493 | |
1494 | req = fuse_get_req(fc); | |
1495 | if (IS_ERR(req)) | |
1496 | return PTR_ERR(req); | |
1497 | ||
1498 | offset = outarg->offset & ~PAGE_CACHE_MASK; | |
1499 | ||
1500 | req->in.h.opcode = FUSE_NOTIFY_REPLY; | |
1501 | req->in.h.nodeid = outarg->nodeid; | |
1502 | req->in.numargs = 2; | |
1503 | req->in.argpages = 1; | |
1504 | req->page_offset = offset; | |
1505 | req->end = fuse_retrieve_end; | |
1506 | ||
1507 | index = outarg->offset >> PAGE_CACHE_SHIFT; | |
1508 | file_size = i_size_read(inode); | |
1509 | num = outarg->size; | |
1510 | if (outarg->offset > file_size) | |
1511 | num = 0; | |
1512 | else if (outarg->offset + num > file_size) | |
1513 | num = file_size - outarg->offset; | |
1514 | ||
1515 | while (num) { | |
1516 | struct page *page; | |
1517 | unsigned int this_num; | |
1518 | ||
1519 | page = find_get_page(mapping, index); | |
1520 | if (!page) | |
1521 | break; | |
1522 | ||
1523 | this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); | |
1524 | req->pages[req->num_pages] = page; | |
1525 | req->num_pages++; | |
1526 | ||
1527 | num -= this_num; | |
1528 | total_len += this_num; | |
1529 | } | |
1530 | req->misc.retrieve_in.offset = outarg->offset; | |
1531 | req->misc.retrieve_in.size = total_len; | |
1532 | req->in.args[0].size = sizeof(req->misc.retrieve_in); | |
1533 | req->in.args[0].value = &req->misc.retrieve_in; | |
1534 | req->in.args[1].size = total_len; | |
1535 | ||
1536 | err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); | |
1537 | if (err) | |
1538 | fuse_retrieve_end(fc, req); | |
1539 | ||
1540 | return err; | |
1541 | } | |
1542 | ||
1543 | static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, | |
1544 | struct fuse_copy_state *cs) | |
1545 | { | |
1546 | struct fuse_notify_retrieve_out outarg; | |
1547 | struct inode *inode; | |
1548 | int err; | |
1549 | ||
1550 | err = -EINVAL; | |
1551 | if (size != sizeof(outarg)) | |
1552 | goto copy_finish; | |
1553 | ||
1554 | err = fuse_copy_one(cs, &outarg, sizeof(outarg)); | |
1555 | if (err) | |
1556 | goto copy_finish; | |
1557 | ||
1558 | fuse_copy_finish(cs); | |
1559 | ||
1560 | down_read(&fc->killsb); | |
1561 | err = -ENOENT; | |
1562 | if (fc->sb) { | |
1563 | u64 nodeid = outarg.nodeid; | |
1564 | ||
1565 | inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); | |
1566 | if (inode) { | |
1567 | err = fuse_retrieve(fc, inode, &outarg); | |
1568 | iput(inode); | |
1569 | } | |
1570 | } | |
1571 | up_read(&fc->killsb); | |
1572 | ||
1573 | return err; | |
1574 | ||
1575 | copy_finish: | |
1576 | fuse_copy_finish(cs); | |
1577 | return err; | |
1578 | } | |
1579 | ||
8599396b TH |
1580 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
1581 | unsigned int size, struct fuse_copy_state *cs) | |
1582 | { | |
1583 | switch (code) { | |
95668a69 TH |
1584 | case FUSE_NOTIFY_POLL: |
1585 | return fuse_notify_poll(fc, size, cs); | |
1586 | ||
3b463ae0 JM |
1587 | case FUSE_NOTIFY_INVAL_INODE: |
1588 | return fuse_notify_inval_inode(fc, size, cs); | |
1589 | ||
1590 | case FUSE_NOTIFY_INVAL_ENTRY: | |
1591 | return fuse_notify_inval_entry(fc, size, cs); | |
1592 | ||
a1d75f25 MS |
1593 | case FUSE_NOTIFY_STORE: |
1594 | return fuse_notify_store(fc, size, cs); | |
1595 | ||
2d45ba38 MS |
1596 | case FUSE_NOTIFY_RETRIEVE: |
1597 | return fuse_notify_retrieve(fc, size, cs); | |
1598 | ||
8599396b | 1599 | default: |
f6d47a17 | 1600 | fuse_copy_finish(cs); |
8599396b TH |
1601 | return -EINVAL; |
1602 | } | |
1603 | } | |
1604 | ||
334f485d MS |
1605 | /* Look up request on processing list by unique ID */ |
1606 | static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) | |
1607 | { | |
1608 | struct list_head *entry; | |
1609 | ||
1610 | list_for_each(entry, &fc->processing) { | |
1611 | struct fuse_req *req; | |
1612 | req = list_entry(entry, struct fuse_req, list); | |
a4d27e75 | 1613 | if (req->in.h.unique == unique || req->intr_unique == unique) |
334f485d MS |
1614 | return req; |
1615 | } | |
1616 | return NULL; | |
1617 | } | |
1618 | ||
1619 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, | |
1620 | unsigned nbytes) | |
1621 | { | |
1622 | unsigned reqsize = sizeof(struct fuse_out_header); | |
1623 | ||
1624 | if (out->h.error) | |
1625 | return nbytes != reqsize ? -EINVAL : 0; | |
1626 | ||
1627 | reqsize += len_args(out->numargs, out->args); | |
1628 | ||
1629 | if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) | |
1630 | return -EINVAL; | |
1631 | else if (reqsize > nbytes) { | |
1632 | struct fuse_arg *lastarg = &out->args[out->numargs-1]; | |
1633 | unsigned diffsize = reqsize - nbytes; | |
1634 | if (diffsize > lastarg->size) | |
1635 | return -EINVAL; | |
1636 | lastarg->size -= diffsize; | |
1637 | } | |
1638 | return fuse_copy_args(cs, out->numargs, out->argpages, out->args, | |
1639 | out->page_zeroing); | |
1640 | } | |
1641 | ||
1642 | /* | |
1643 | * Write a single reply to a request. First the header is copied from | |
1644 | * the write buffer. The request is then searched on the processing | |
1645 | * list by the unique ID found in the header. If found, then remove | |
1646 | * it from the list and copy the rest of the buffer to the request. | |
1647 | * The request is finished by calling request_end() | |
1648 | */ | |
dd3bb14f MS |
1649 | static ssize_t fuse_dev_do_write(struct fuse_conn *fc, |
1650 | struct fuse_copy_state *cs, size_t nbytes) | |
334f485d MS |
1651 | { |
1652 | int err; | |
334f485d MS |
1653 | struct fuse_req *req; |
1654 | struct fuse_out_header oh; | |
334f485d | 1655 | |
334f485d MS |
1656 | if (nbytes < sizeof(struct fuse_out_header)) |
1657 | return -EINVAL; | |
1658 | ||
dd3bb14f | 1659 | err = fuse_copy_one(cs, &oh, sizeof(oh)); |
334f485d MS |
1660 | if (err) |
1661 | goto err_finish; | |
8599396b TH |
1662 | |
1663 | err = -EINVAL; | |
1664 | if (oh.len != nbytes) | |
1665 | goto err_finish; | |
1666 | ||
1667 | /* | |
1668 | * Zero oh.unique indicates unsolicited notification message | |
1669 | * and error contains notification code. | |
1670 | */ | |
1671 | if (!oh.unique) { | |
dd3bb14f | 1672 | err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); |
8599396b TH |
1673 | return err ? err : nbytes; |
1674 | } | |
1675 | ||
334f485d | 1676 | err = -EINVAL; |
8599396b | 1677 | if (oh.error <= -1000 || oh.error > 0) |
334f485d MS |
1678 | goto err_finish; |
1679 | ||
d7133114 | 1680 | spin_lock(&fc->lock); |
69a53bf2 MS |
1681 | err = -ENOENT; |
1682 | if (!fc->connected) | |
1683 | goto err_unlock; | |
1684 | ||
334f485d | 1685 | req = request_find(fc, oh.unique); |
334f485d MS |
1686 | if (!req) |
1687 | goto err_unlock; | |
1688 | ||
f9a2842e | 1689 | if (req->aborted) { |
d7133114 | 1690 | spin_unlock(&fc->lock); |
dd3bb14f | 1691 | fuse_copy_finish(cs); |
d7133114 | 1692 | spin_lock(&fc->lock); |
222f1d69 | 1693 | request_end(fc, req); |
334f485d MS |
1694 | return -ENOENT; |
1695 | } | |
a4d27e75 MS |
1696 | /* Is it an interrupt reply? */ |
1697 | if (req->intr_unique == oh.unique) { | |
1698 | err = -EINVAL; | |
1699 | if (nbytes != sizeof(struct fuse_out_header)) | |
1700 | goto err_unlock; | |
1701 | ||
1702 | if (oh.error == -ENOSYS) | |
1703 | fc->no_interrupt = 1; | |
1704 | else if (oh.error == -EAGAIN) | |
1705 | queue_interrupt(fc, req); | |
1706 | ||
1707 | spin_unlock(&fc->lock); | |
dd3bb14f | 1708 | fuse_copy_finish(cs); |
a4d27e75 MS |
1709 | return nbytes; |
1710 | } | |
1711 | ||
1712 | req->state = FUSE_REQ_WRITING; | |
d77a1d5b | 1713 | list_move(&req->list, &fc->io); |
334f485d MS |
1714 | req->out.h = oh; |
1715 | req->locked = 1; | |
dd3bb14f | 1716 | cs->req = req; |
ce534fb0 MS |
1717 | if (!req->out.page_replace) |
1718 | cs->move_pages = 0; | |
d7133114 | 1719 | spin_unlock(&fc->lock); |
334f485d | 1720 | |
dd3bb14f MS |
1721 | err = copy_out_args(cs, &req->out, nbytes); |
1722 | fuse_copy_finish(cs); | |
334f485d | 1723 | |
d7133114 | 1724 | spin_lock(&fc->lock); |
334f485d MS |
1725 | req->locked = 0; |
1726 | if (!err) { | |
f9a2842e | 1727 | if (req->aborted) |
334f485d | 1728 | err = -ENOENT; |
f9a2842e | 1729 | } else if (!req->aborted) |
334f485d MS |
1730 | req->out.h.error = -EIO; |
1731 | request_end(fc, req); | |
1732 | ||
1733 | return err ? err : nbytes; | |
1734 | ||
1735 | err_unlock: | |
d7133114 | 1736 | spin_unlock(&fc->lock); |
334f485d | 1737 | err_finish: |
dd3bb14f | 1738 | fuse_copy_finish(cs); |
334f485d MS |
1739 | return err; |
1740 | } | |
1741 | ||
dd3bb14f MS |
1742 | static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, |
1743 | unsigned long nr_segs, loff_t pos) | |
1744 | { | |
1745 | struct fuse_copy_state cs; | |
1746 | struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); | |
1747 | if (!fc) | |
1748 | return -EPERM; | |
1749 | ||
c3021629 | 1750 | fuse_copy_init(&cs, fc, 0, iov, nr_segs); |
dd3bb14f MS |
1751 | |
1752 | return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs)); | |
1753 | } | |
1754 | ||
1755 | static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |
1756 | struct file *out, loff_t *ppos, | |
1757 | size_t len, unsigned int flags) | |
1758 | { | |
1759 | unsigned nbuf; | |
1760 | unsigned idx; | |
1761 | struct pipe_buffer *bufs; | |
1762 | struct fuse_copy_state cs; | |
1763 | struct fuse_conn *fc; | |
1764 | size_t rem; | |
1765 | ssize_t ret; | |
1766 | ||
1767 | fc = fuse_get_conn(out); | |
1768 | if (!fc) | |
1769 | return -EPERM; | |
1770 | ||
07e77dca | 1771 | bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); |
dd3bb14f MS |
1772 | if (!bufs) |
1773 | return -ENOMEM; | |
1774 | ||
1775 | pipe_lock(pipe); | |
1776 | nbuf = 0; | |
1777 | rem = 0; | |
1778 | for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) | |
1779 | rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; | |
1780 | ||
1781 | ret = -EINVAL; | |
1782 | if (rem < len) { | |
1783 | pipe_unlock(pipe); | |
1784 | goto out; | |
1785 | } | |
1786 | ||
1787 | rem = len; | |
1788 | while (rem) { | |
1789 | struct pipe_buffer *ibuf; | |
1790 | struct pipe_buffer *obuf; | |
1791 | ||
1792 | BUG_ON(nbuf >= pipe->buffers); | |
1793 | BUG_ON(!pipe->nrbufs); | |
1794 | ibuf = &pipe->bufs[pipe->curbuf]; | |
1795 | obuf = &bufs[nbuf]; | |
1796 | ||
1797 | if (rem >= ibuf->len) { | |
1798 | *obuf = *ibuf; | |
1799 | ibuf->ops = NULL; | |
1800 | pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); | |
1801 | pipe->nrbufs--; | |
1802 | } else { | |
1803 | ibuf->ops->get(pipe, ibuf); | |
1804 | *obuf = *ibuf; | |
1805 | obuf->flags &= ~PIPE_BUF_FLAG_GIFT; | |
1806 | obuf->len = rem; | |
1807 | ibuf->offset += obuf->len; | |
1808 | ibuf->len -= obuf->len; | |
1809 | } | |
1810 | nbuf++; | |
1811 | rem -= obuf->len; | |
1812 | } | |
1813 | pipe_unlock(pipe); | |
1814 | ||
c3021629 | 1815 | fuse_copy_init(&cs, fc, 0, NULL, nbuf); |
dd3bb14f | 1816 | cs.pipebufs = bufs; |
dd3bb14f MS |
1817 | cs.pipe = pipe; |
1818 | ||
ce534fb0 MS |
1819 | if (flags & SPLICE_F_MOVE) |
1820 | cs.move_pages = 1; | |
1821 | ||
dd3bb14f MS |
1822 | ret = fuse_dev_do_write(fc, &cs, len); |
1823 | ||
1824 | for (idx = 0; idx < nbuf; idx++) { | |
1825 | struct pipe_buffer *buf = &bufs[idx]; | |
1826 | buf->ops->release(pipe, buf); | |
1827 | } | |
1828 | out: | |
1829 | kfree(bufs); | |
1830 | return ret; | |
1831 | } | |
1832 | ||
334f485d MS |
1833 | static unsigned fuse_dev_poll(struct file *file, poll_table *wait) |
1834 | { | |
334f485d | 1835 | unsigned mask = POLLOUT | POLLWRNORM; |
7025d9ad | 1836 | struct fuse_conn *fc = fuse_get_conn(file); |
334f485d | 1837 | if (!fc) |
7025d9ad | 1838 | return POLLERR; |
334f485d MS |
1839 | |
1840 | poll_wait(file, &fc->waitq, wait); | |
1841 | ||
d7133114 | 1842 | spin_lock(&fc->lock); |
7025d9ad MS |
1843 | if (!fc->connected) |
1844 | mask = POLLERR; | |
a4d27e75 | 1845 | else if (request_pending(fc)) |
7025d9ad | 1846 | mask |= POLLIN | POLLRDNORM; |
d7133114 | 1847 | spin_unlock(&fc->lock); |
334f485d MS |
1848 | |
1849 | return mask; | |
1850 | } | |
1851 | ||
69a53bf2 MS |
1852 | /* |
1853 | * Abort all requests on the given list (pending or processing) | |
1854 | * | |
d7133114 | 1855 | * This function releases and reacquires fc->lock |
69a53bf2 | 1856 | */ |
334f485d | 1857 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
b9ca67b2 MS |
1858 | __releases(fc->lock) |
1859 | __acquires(fc->lock) | |
334f485d MS |
1860 | { |
1861 | while (!list_empty(head)) { | |
1862 | struct fuse_req *req; | |
1863 | req = list_entry(head->next, struct fuse_req, list); | |
334f485d MS |
1864 | req->out.h.error = -ECONNABORTED; |
1865 | request_end(fc, req); | |
d7133114 | 1866 | spin_lock(&fc->lock); |
334f485d MS |
1867 | } |
1868 | } | |
1869 | ||
69a53bf2 MS |
1870 | /* |
1871 | * Abort requests under I/O | |
1872 | * | |
f9a2842e | 1873 | * The requests are set to aborted and finished, and the request |
69a53bf2 MS |
1874 | * waiter is woken up. This will make request_wait_answer() wait |
1875 | * until the request is unlocked and then return. | |
64c6d8ed MS |
1876 | * |
1877 | * If the request is asynchronous, then the end function needs to be | |
1878 | * called after waiting for the request to be unlocked (if it was | |
1879 | * locked). | |
69a53bf2 MS |
1880 | */ |
1881 | static void end_io_requests(struct fuse_conn *fc) | |
b9ca67b2 MS |
1882 | __releases(fc->lock) |
1883 | __acquires(fc->lock) | |
69a53bf2 MS |
1884 | { |
1885 | while (!list_empty(&fc->io)) { | |
64c6d8ed MS |
1886 | struct fuse_req *req = |
1887 | list_entry(fc->io.next, struct fuse_req, list); | |
1888 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | |
1889 | ||
f9a2842e | 1890 | req->aborted = 1; |
69a53bf2 MS |
1891 | req->out.h.error = -ECONNABORTED; |
1892 | req->state = FUSE_REQ_FINISHED; | |
1893 | list_del_init(&req->list); | |
1894 | wake_up(&req->waitq); | |
64c6d8ed MS |
1895 | if (end) { |
1896 | req->end = NULL; | |
64c6d8ed | 1897 | __fuse_get_request(req); |
d7133114 | 1898 | spin_unlock(&fc->lock); |
64c6d8ed MS |
1899 | wait_event(req->waitq, !req->locked); |
1900 | end(fc, req); | |
e9bb09dd | 1901 | fuse_put_request(fc, req); |
d7133114 | 1902 | spin_lock(&fc->lock); |
64c6d8ed | 1903 | } |
69a53bf2 MS |
1904 | } |
1905 | } | |
1906 | ||
595afaf9 | 1907 | static void end_queued_requests(struct fuse_conn *fc) |
b9ca67b2 MS |
1908 | __releases(fc->lock) |
1909 | __acquires(fc->lock) | |
595afaf9 MS |
1910 | { |
1911 | fc->max_background = UINT_MAX; | |
1912 | flush_bg_queue(fc); | |
1913 | end_requests(fc, &fc->pending); | |
1914 | end_requests(fc, &fc->processing); | |
07e77dca | 1915 | while (forget_pending(fc)) |
02c048b9 | 1916 | kfree(dequeue_forget(fc, 1, NULL)); |
595afaf9 MS |
1917 | } |
1918 | ||
357ccf2b BG |
1919 | static void end_polls(struct fuse_conn *fc) |
1920 | { | |
1921 | struct rb_node *p; | |
1922 | ||
1923 | p = rb_first(&fc->polled_files); | |
1924 | ||
1925 | while (p) { | |
1926 | struct fuse_file *ff; | |
1927 | ff = rb_entry(p, struct fuse_file, polled_node); | |
1928 | wake_up_interruptible_all(&ff->poll_wait); | |
1929 | ||
1930 | p = rb_next(p); | |
1931 | } | |
1932 | } | |
1933 | ||
69a53bf2 MS |
1934 | /* |
1935 | * Abort all requests. | |
1936 | * | |
1937 | * Emergency exit in case of a malicious or accidental deadlock, or | |
1938 | * just a hung filesystem. | |
1939 | * | |
1940 | * The same effect is usually achievable through killing the | |
1941 | * filesystem daemon and all users of the filesystem. The exception | |
1942 | * is the combination of an asynchronous request and the tricky | |
1943 | * deadlock (see Documentation/filesystems/fuse.txt). | |
1944 | * | |
1945 | * During the aborting, progression of requests from the pending and | |
1946 | * processing lists onto the io list, and progression of new requests | |
1947 | * onto the pending list is prevented by req->connected being false. | |
1948 | * | |
1949 | * Progression of requests under I/O to the processing list is | |
f9a2842e MS |
1950 | * prevented by the req->aborted flag being true for these requests. |
1951 | * For this reason requests on the io list must be aborted first. | |
69a53bf2 MS |
1952 | */ |
1953 | void fuse_abort_conn(struct fuse_conn *fc) | |
1954 | { | |
d7133114 | 1955 | spin_lock(&fc->lock); |
69a53bf2 MS |
1956 | if (fc->connected) { |
1957 | fc->connected = 0; | |
51eb01e7 | 1958 | fc->blocked = 0; |
69a53bf2 | 1959 | end_io_requests(fc); |
595afaf9 | 1960 | end_queued_requests(fc); |
357ccf2b | 1961 | end_polls(fc); |
69a53bf2 | 1962 | wake_up_all(&fc->waitq); |
51eb01e7 | 1963 | wake_up_all(&fc->blocked_waitq); |
385a17bf | 1964 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
69a53bf2 | 1965 | } |
d7133114 | 1966 | spin_unlock(&fc->lock); |
69a53bf2 | 1967 | } |
08cbf542 | 1968 | EXPORT_SYMBOL_GPL(fuse_abort_conn); |
69a53bf2 | 1969 | |
08cbf542 | 1970 | int fuse_dev_release(struct inode *inode, struct file *file) |
334f485d | 1971 | { |
0720b315 | 1972 | struct fuse_conn *fc = fuse_get_conn(file); |
334f485d | 1973 | if (fc) { |
d7133114 | 1974 | spin_lock(&fc->lock); |
1e9a4ed9 | 1975 | fc->connected = 0; |
595afaf9 MS |
1976 | fc->blocked = 0; |
1977 | end_queued_requests(fc); | |
357ccf2b | 1978 | end_polls(fc); |
595afaf9 | 1979 | wake_up_all(&fc->blocked_waitq); |
d7133114 | 1980 | spin_unlock(&fc->lock); |
bafa9654 | 1981 | fuse_conn_put(fc); |
385a17bf | 1982 | } |
f543f253 | 1983 | |
334f485d MS |
1984 | return 0; |
1985 | } | |
08cbf542 | 1986 | EXPORT_SYMBOL_GPL(fuse_dev_release); |
334f485d | 1987 | |
385a17bf JD |
1988 | static int fuse_dev_fasync(int fd, struct file *file, int on) |
1989 | { | |
1990 | struct fuse_conn *fc = fuse_get_conn(file); | |
1991 | if (!fc) | |
a87046d8 | 1992 | return -EPERM; |
385a17bf JD |
1993 | |
1994 | /* No locking - fasync_helper does its own locking */ | |
1995 | return fasync_helper(fd, file, on, &fc->fasync); | |
1996 | } | |
1997 | ||
4b6f5d20 | 1998 | const struct file_operations fuse_dev_operations = { |
334f485d MS |
1999 | .owner = THIS_MODULE, |
2000 | .llseek = no_llseek, | |
ee0b3e67 BP |
2001 | .read = do_sync_read, |
2002 | .aio_read = fuse_dev_read, | |
c3021629 | 2003 | .splice_read = fuse_dev_splice_read, |
ee0b3e67 BP |
2004 | .write = do_sync_write, |
2005 | .aio_write = fuse_dev_write, | |
dd3bb14f | 2006 | .splice_write = fuse_dev_splice_write, |
334f485d MS |
2007 | .poll = fuse_dev_poll, |
2008 | .release = fuse_dev_release, | |
385a17bf | 2009 | .fasync = fuse_dev_fasync, |
334f485d | 2010 | }; |
08cbf542 | 2011 | EXPORT_SYMBOL_GPL(fuse_dev_operations); |
334f485d MS |
2012 | |
2013 | static struct miscdevice fuse_miscdevice = { | |
2014 | .minor = FUSE_MINOR, | |
2015 | .name = "fuse", | |
2016 | .fops = &fuse_dev_operations, | |
2017 | }; | |
2018 | ||
2019 | int __init fuse_dev_init(void) | |
2020 | { | |
2021 | int err = -ENOMEM; | |
2022 | fuse_req_cachep = kmem_cache_create("fuse_request", | |
2023 | sizeof(struct fuse_req), | |
20c2df83 | 2024 | 0, 0, NULL); |
334f485d MS |
2025 | if (!fuse_req_cachep) |
2026 | goto out; | |
2027 | ||
2028 | err = misc_register(&fuse_miscdevice); | |
2029 | if (err) | |
2030 | goto out_cache_clean; | |
2031 | ||
2032 | return 0; | |
2033 | ||
2034 | out_cache_clean: | |
2035 | kmem_cache_destroy(fuse_req_cachep); | |
2036 | out: | |
2037 | return err; | |
2038 | } | |
2039 | ||
2040 | void fuse_dev_cleanup(void) | |
2041 | { | |
2042 | misc_deregister(&fuse_miscdevice); | |
2043 | kmem_cache_destroy(fuse_req_cachep); | |
2044 | } |