]>
Commit | Line | Data |
---|---|---|
b6aeaded MS |
1 | /* |
2 | FUSE: Filesystem in Userspace | |
1729a16c | 3 | Copyright (C) 2001-2008 Miklos Szeredi <[email protected]> |
b6aeaded MS |
4 | |
5 | This program can be distributed under the terms of the GNU GPL. | |
6 | See the file COPYING. | |
7 | */ | |
8 | ||
9 | #include "fuse_i.h" | |
10 | ||
11 | #include <linux/pagemap.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/kernel.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
08cbf542 | 15 | #include <linux/module.h> |
d9d318d3 | 16 | #include <linux/compat.h> |
478e0841 | 17 | #include <linux/swap.h> |
b6aeaded | 18 | |
4b6f5d20 | 19 | static const struct file_operations fuse_direct_io_file_operations; |
45323fb7 | 20 | |
91fe96b4 MS |
21 | static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
22 | int opcode, struct fuse_open_out *outargp) | |
b6aeaded | 23 | { |
b6aeaded | 24 | struct fuse_open_in inarg; |
fd72faac MS |
25 | struct fuse_req *req; |
26 | int err; | |
27 | ||
b111c8c0 | 28 | req = fuse_get_req_nopages(fc); |
ce1d5a49 MS |
29 | if (IS_ERR(req)) |
30 | return PTR_ERR(req); | |
fd72faac MS |
31 | |
32 | memset(&inarg, 0, sizeof(inarg)); | |
6ff958ed MS |
33 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); |
34 | if (!fc->atomic_o_trunc) | |
35 | inarg.flags &= ~O_TRUNC; | |
91fe96b4 MS |
36 | req->in.h.opcode = opcode; |
37 | req->in.h.nodeid = nodeid; | |
fd72faac MS |
38 | req->in.numargs = 1; |
39 | req->in.args[0].size = sizeof(inarg); | |
40 | req->in.args[0].value = &inarg; | |
41 | req->out.numargs = 1; | |
42 | req->out.args[0].size = sizeof(*outargp); | |
43 | req->out.args[0].value = outargp; | |
b93f858a | 44 | fuse_request_send(fc, req); |
fd72faac MS |
45 | err = req->out.h.error; |
46 | fuse_put_request(fc, req); | |
47 | ||
48 | return err; | |
49 | } | |
50 | ||
acf99433 | 51 | struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) |
fd72faac MS |
52 | { |
53 | struct fuse_file *ff; | |
6b2db28a | 54 | |
fd72faac | 55 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); |
6b2db28a TH |
56 | if (unlikely(!ff)) |
57 | return NULL; | |
58 | ||
da5e4714 | 59 | ff->fc = fc; |
4250c066 | 60 | ff->reserved_req = fuse_request_alloc(0); |
6b2db28a TH |
61 | if (unlikely(!ff->reserved_req)) { |
62 | kfree(ff); | |
63 | return NULL; | |
fd72faac | 64 | } |
6b2db28a TH |
65 | |
66 | INIT_LIST_HEAD(&ff->write_entry); | |
67 | atomic_set(&ff->count, 0); | |
68 | RB_CLEAR_NODE(&ff->polled_node); | |
69 | init_waitqueue_head(&ff->poll_wait); | |
70 | ||
71 | spin_lock(&fc->lock); | |
72 | ff->kh = ++fc->khctr; | |
73 | spin_unlock(&fc->lock); | |
74 | ||
fd72faac MS |
75 | return ff; |
76 | } | |
77 | ||
78 | void fuse_file_free(struct fuse_file *ff) | |
79 | { | |
33649c91 | 80 | fuse_request_free(ff->reserved_req); |
fd72faac MS |
81 | kfree(ff); |
82 | } | |
83 | ||
c7b7143c | 84 | struct fuse_file *fuse_file_get(struct fuse_file *ff) |
c756e0a4 MS |
85 | { |
86 | atomic_inc(&ff->count); | |
87 | return ff; | |
88 | } | |
89 | ||
5a18ec17 MS |
90 | static void fuse_release_async(struct work_struct *work) |
91 | { | |
92 | struct fuse_req *req; | |
93 | struct fuse_conn *fc; | |
94 | struct path path; | |
95 | ||
96 | req = container_of(work, struct fuse_req, misc.release.work); | |
97 | path = req->misc.release.path; | |
98 | fc = get_fuse_conn(path.dentry->d_inode); | |
99 | ||
100 | fuse_put_request(fc, req); | |
101 | path_put(&path); | |
102 | } | |
103 | ||
819c4b3b MS |
104 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
105 | { | |
5a18ec17 MS |
106 | if (fc->destroy_req) { |
107 | /* | |
108 | * If this is a fuseblk mount, then it's possible that | |
109 | * releasing the path will result in releasing the | |
110 | * super block and sending the DESTROY request. If | |
111 | * the server is single threaded, this would hang. | |
112 | * For this reason do the path_put() in a separate | |
113 | * thread. | |
114 | */ | |
115 | atomic_inc(&req->count); | |
116 | INIT_WORK(&req->misc.release.work, fuse_release_async); | |
117 | schedule_work(&req->misc.release.work); | |
118 | } else { | |
119 | path_put(&req->misc.release.path); | |
120 | } | |
819c4b3b MS |
121 | } |
122 | ||
5a18ec17 | 123 | static void fuse_file_put(struct fuse_file *ff, bool sync) |
c756e0a4 MS |
124 | { |
125 | if (atomic_dec_and_test(&ff->count)) { | |
126 | struct fuse_req *req = ff->reserved_req; | |
8b0797a4 | 127 | |
5a18ec17 | 128 | if (sync) { |
8b41e671 | 129 | req->background = 0; |
5a18ec17 MS |
130 | fuse_request_send(ff->fc, req); |
131 | path_put(&req->misc.release.path); | |
132 | fuse_put_request(ff->fc, req); | |
133 | } else { | |
134 | req->end = fuse_release_end; | |
8b41e671 | 135 | req->background = 1; |
5a18ec17 MS |
136 | fuse_request_send_background(ff->fc, req); |
137 | } | |
c756e0a4 MS |
138 | kfree(ff); |
139 | } | |
140 | } | |
141 | ||
08cbf542 TH |
142 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
143 | bool isdir) | |
91fe96b4 MS |
144 | { |
145 | struct fuse_open_out outarg; | |
146 | struct fuse_file *ff; | |
147 | int err; | |
148 | int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; | |
149 | ||
150 | ff = fuse_file_alloc(fc); | |
151 | if (!ff) | |
152 | return -ENOMEM; | |
153 | ||
154 | err = fuse_send_open(fc, nodeid, file, opcode, &outarg); | |
155 | if (err) { | |
156 | fuse_file_free(ff); | |
157 | return err; | |
158 | } | |
159 | ||
160 | if (isdir) | |
161 | outarg.open_flags &= ~FOPEN_DIRECT_IO; | |
162 | ||
163 | ff->fh = outarg.fh; | |
164 | ff->nodeid = nodeid; | |
165 | ff->open_flags = outarg.open_flags; | |
166 | file->private_data = fuse_file_get(ff); | |
167 | ||
168 | return 0; | |
169 | } | |
08cbf542 | 170 | EXPORT_SYMBOL_GPL(fuse_do_open); |
91fe96b4 | 171 | |
c7b7143c | 172 | void fuse_finish_open(struct inode *inode, struct file *file) |
fd72faac | 173 | { |
c7b7143c | 174 | struct fuse_file *ff = file->private_data; |
a0822c55 | 175 | struct fuse_conn *fc = get_fuse_conn(inode); |
c7b7143c MS |
176 | |
177 | if (ff->open_flags & FOPEN_DIRECT_IO) | |
fd72faac | 178 | file->f_op = &fuse_direct_io_file_operations; |
c7b7143c | 179 | if (!(ff->open_flags & FOPEN_KEEP_CACHE)) |
b1009979 | 180 | invalidate_inode_pages2(inode->i_mapping); |
c7b7143c | 181 | if (ff->open_flags & FOPEN_NONSEEKABLE) |
a7c1b990 | 182 | nonseekable_open(inode, file); |
a0822c55 KS |
183 | if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { |
184 | struct fuse_inode *fi = get_fuse_inode(inode); | |
185 | ||
186 | spin_lock(&fc->lock); | |
187 | fi->attr_version = ++fc->attr_version; | |
188 | i_size_write(inode, 0); | |
189 | spin_unlock(&fc->lock); | |
190 | fuse_invalidate_attr(inode); | |
191 | } | |
fd72faac MS |
192 | } |
193 | ||
91fe96b4 | 194 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) |
fd72faac | 195 | { |
acf99433 | 196 | struct fuse_conn *fc = get_fuse_conn(inode); |
b6aeaded | 197 | int err; |
b6aeaded MS |
198 | |
199 | err = generic_file_open(inode, file); | |
200 | if (err) | |
201 | return err; | |
202 | ||
91fe96b4 | 203 | err = fuse_do_open(fc, get_node_id(inode), file, isdir); |
fd72faac | 204 | if (err) |
91fe96b4 | 205 | return err; |
b6aeaded | 206 | |
91fe96b4 MS |
207 | fuse_finish_open(inode, file); |
208 | ||
209 | return 0; | |
b6aeaded MS |
210 | } |
211 | ||
8b0797a4 | 212 | static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) |
64c6d8ed | 213 | { |
8b0797a4 | 214 | struct fuse_conn *fc = ff->fc; |
33649c91 | 215 | struct fuse_req *req = ff->reserved_req; |
b57d4264 | 216 | struct fuse_release_in *inarg = &req->misc.release.in; |
b6aeaded | 217 | |
8b0797a4 MS |
218 | spin_lock(&fc->lock); |
219 | list_del(&ff->write_entry); | |
220 | if (!RB_EMPTY_NODE(&ff->polled_node)) | |
221 | rb_erase(&ff->polled_node, &fc->polled_files); | |
222 | spin_unlock(&fc->lock); | |
223 | ||
357ccf2b | 224 | wake_up_interruptible_all(&ff->poll_wait); |
8b0797a4 | 225 | |
b6aeaded | 226 | inarg->fh = ff->fh; |
fd72faac | 227 | inarg->flags = flags; |
51eb01e7 | 228 | req->in.h.opcode = opcode; |
c7b7143c | 229 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
230 | req->in.numargs = 1; |
231 | req->in.args[0].size = sizeof(struct fuse_release_in); | |
232 | req->in.args[0].value = inarg; | |
fd72faac MS |
233 | } |
234 | ||
8b0797a4 | 235 | void fuse_release_common(struct file *file, int opcode) |
fd72faac | 236 | { |
6b2db28a TH |
237 | struct fuse_file *ff; |
238 | struct fuse_req *req; | |
b6aeaded | 239 | |
6b2db28a TH |
240 | ff = file->private_data; |
241 | if (unlikely(!ff)) | |
8b0797a4 | 242 | return; |
6b2db28a | 243 | |
6b2db28a | 244 | req = ff->reserved_req; |
8b0797a4 | 245 | fuse_prepare_release(ff, file->f_flags, opcode); |
6b2db28a | 246 | |
37fb3a30 MS |
247 | if (ff->flock) { |
248 | struct fuse_release_in *inarg = &req->misc.release.in; | |
249 | inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; | |
250 | inarg->lock_owner = fuse_lock_owner_id(ff->fc, | |
251 | (fl_owner_t) file); | |
252 | } | |
6b2db28a | 253 | /* Hold vfsmount and dentry until release is finished */ |
b0be46eb MS |
254 | path_get(&file->f_path); |
255 | req->misc.release.path = file->f_path; | |
6b2db28a | 256 | |
6b2db28a TH |
257 | /* |
258 | * Normally this will send the RELEASE request, however if | |
259 | * some asynchronous READ or WRITE requests are outstanding, | |
260 | * the sending will be delayed. | |
5a18ec17 MS |
261 | * |
262 | * Make the release synchronous if this is a fuseblk mount, | |
263 | * synchronous RELEASE is allowed (and desirable) in this case | |
264 | * because the server can be trusted not to screw up. | |
6b2db28a | 265 | */ |
5a18ec17 | 266 | fuse_file_put(ff, ff->fc->destroy_req != NULL); |
b6aeaded MS |
267 | } |
268 | ||
04730fef MS |
269 | static int fuse_open(struct inode *inode, struct file *file) |
270 | { | |
91fe96b4 | 271 | return fuse_open_common(inode, file, false); |
04730fef MS |
272 | } |
273 | ||
274 | static int fuse_release(struct inode *inode, struct file *file) | |
275 | { | |
8b0797a4 MS |
276 | fuse_release_common(file, FUSE_RELEASE); |
277 | ||
278 | /* return value is ignored by VFS */ | |
279 | return 0; | |
280 | } | |
281 | ||
282 | void fuse_sync_release(struct fuse_file *ff, int flags) | |
283 | { | |
284 | WARN_ON(atomic_read(&ff->count) > 1); | |
285 | fuse_prepare_release(ff, flags, FUSE_RELEASE); | |
286 | ff->reserved_req->force = 1; | |
8b41e671 | 287 | ff->reserved_req->background = 0; |
8b0797a4 MS |
288 | fuse_request_send(ff->fc, ff->reserved_req); |
289 | fuse_put_request(ff->fc, ff->reserved_req); | |
290 | kfree(ff); | |
04730fef | 291 | } |
08cbf542 | 292 | EXPORT_SYMBOL_GPL(fuse_sync_release); |
04730fef | 293 | |
71421259 | 294 | /* |
9c8ef561 MS |
295 | * Scramble the ID space with XTEA, so that the value of the files_struct |
296 | * pointer is not exposed to userspace. | |
71421259 | 297 | */ |
f3332114 | 298 | u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) |
71421259 | 299 | { |
9c8ef561 MS |
300 | u32 *k = fc->scramble_key; |
301 | u64 v = (unsigned long) id; | |
302 | u32 v0 = v; | |
303 | u32 v1 = v >> 32; | |
304 | u32 sum = 0; | |
305 | int i; | |
306 | ||
307 | for (i = 0; i < 32; i++) { | |
308 | v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); | |
309 | sum += 0x9E3779B9; | |
310 | v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); | |
311 | } | |
312 | ||
313 | return (u64) v0 + ((u64) v1 << 32); | |
71421259 MS |
314 | } |
315 | ||
3be5a52b MS |
316 | /* |
317 | * Check if page is under writeback | |
318 | * | |
319 | * This is currently done by walking the list of writepage requests | |
320 | * for the inode, which can be pretty inefficient. | |
321 | */ | |
322 | static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) | |
323 | { | |
324 | struct fuse_conn *fc = get_fuse_conn(inode); | |
325 | struct fuse_inode *fi = get_fuse_inode(inode); | |
326 | struct fuse_req *req; | |
327 | bool found = false; | |
328 | ||
329 | spin_lock(&fc->lock); | |
330 | list_for_each_entry(req, &fi->writepages, writepages_entry) { | |
331 | pgoff_t curr_index; | |
332 | ||
333 | BUG_ON(req->inode != inode); | |
334 | curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
335 | if (curr_index == index) { | |
336 | found = true; | |
337 | break; | |
338 | } | |
339 | } | |
340 | spin_unlock(&fc->lock); | |
341 | ||
342 | return found; | |
343 | } | |
344 | ||
345 | /* | |
346 | * Wait for page writeback to be completed. | |
347 | * | |
348 | * Since fuse doesn't rely on the VM writeback tracking, this has to | |
349 | * use some other means. | |
350 | */ | |
351 | static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) | |
352 | { | |
353 | struct fuse_inode *fi = get_fuse_inode(inode); | |
354 | ||
355 | wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); | |
356 | return 0; | |
357 | } | |
358 | ||
75e1fcc0 | 359 | static int fuse_flush(struct file *file, fl_owner_t id) |
b6aeaded | 360 | { |
6131ffaa | 361 | struct inode *inode = file_inode(file); |
b6aeaded MS |
362 | struct fuse_conn *fc = get_fuse_conn(inode); |
363 | struct fuse_file *ff = file->private_data; | |
364 | struct fuse_req *req; | |
365 | struct fuse_flush_in inarg; | |
366 | int err; | |
367 | ||
248d86e8 MS |
368 | if (is_bad_inode(inode)) |
369 | return -EIO; | |
370 | ||
b6aeaded MS |
371 | if (fc->no_flush) |
372 | return 0; | |
373 | ||
b111c8c0 | 374 | req = fuse_get_req_nofail_nopages(fc, file); |
b6aeaded MS |
375 | memset(&inarg, 0, sizeof(inarg)); |
376 | inarg.fh = ff->fh; | |
9c8ef561 | 377 | inarg.lock_owner = fuse_lock_owner_id(fc, id); |
b6aeaded MS |
378 | req->in.h.opcode = FUSE_FLUSH; |
379 | req->in.h.nodeid = get_node_id(inode); | |
b6aeaded MS |
380 | req->in.numargs = 1; |
381 | req->in.args[0].size = sizeof(inarg); | |
382 | req->in.args[0].value = &inarg; | |
71421259 | 383 | req->force = 1; |
b93f858a | 384 | fuse_request_send(fc, req); |
b6aeaded MS |
385 | err = req->out.h.error; |
386 | fuse_put_request(fc, req); | |
387 | if (err == -ENOSYS) { | |
388 | fc->no_flush = 1; | |
389 | err = 0; | |
390 | } | |
391 | return err; | |
392 | } | |
393 | ||
3be5a52b MS |
394 | /* |
395 | * Wait for all pending writepages on the inode to finish. | |
396 | * | |
397 | * This is currently done by blocking further writes with FUSE_NOWRITE | |
398 | * and waiting for all sent writes to complete. | |
399 | * | |
400 | * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage | |
401 | * could conflict with truncation. | |
402 | */ | |
403 | static void fuse_sync_writes(struct inode *inode) | |
404 | { | |
405 | fuse_set_nowrite(inode); | |
406 | fuse_release_nowrite(inode); | |
407 | } | |
408 | ||
02c24a82 JB |
409 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, |
410 | int datasync, int isdir) | |
b6aeaded | 411 | { |
7ea80859 | 412 | struct inode *inode = file->f_mapping->host; |
b6aeaded MS |
413 | struct fuse_conn *fc = get_fuse_conn(inode); |
414 | struct fuse_file *ff = file->private_data; | |
415 | struct fuse_req *req; | |
416 | struct fuse_fsync_in inarg; | |
417 | int err; | |
418 | ||
248d86e8 MS |
419 | if (is_bad_inode(inode)) |
420 | return -EIO; | |
421 | ||
02c24a82 JB |
422 | err = filemap_write_and_wait_range(inode->i_mapping, start, end); |
423 | if (err) | |
424 | return err; | |
425 | ||
82547981 | 426 | if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) |
b6aeaded MS |
427 | return 0; |
428 | ||
02c24a82 JB |
429 | mutex_lock(&inode->i_mutex); |
430 | ||
3be5a52b MS |
431 | /* |
432 | * Start writeback against all dirty pages of the inode, then | |
433 | * wait for all outstanding writes, before sending the FSYNC | |
434 | * request. | |
435 | */ | |
436 | err = write_inode_now(inode, 0); | |
437 | if (err) | |
02c24a82 | 438 | goto out; |
3be5a52b MS |
439 | |
440 | fuse_sync_writes(inode); | |
441 | ||
b111c8c0 | 442 | req = fuse_get_req_nopages(fc); |
02c24a82 JB |
443 | if (IS_ERR(req)) { |
444 | err = PTR_ERR(req); | |
445 | goto out; | |
446 | } | |
b6aeaded MS |
447 | |
448 | memset(&inarg, 0, sizeof(inarg)); | |
449 | inarg.fh = ff->fh; | |
450 | inarg.fsync_flags = datasync ? 1 : 0; | |
82547981 | 451 | req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; |
b6aeaded | 452 | req->in.h.nodeid = get_node_id(inode); |
b6aeaded MS |
453 | req->in.numargs = 1; |
454 | req->in.args[0].size = sizeof(inarg); | |
455 | req->in.args[0].value = &inarg; | |
b93f858a | 456 | fuse_request_send(fc, req); |
b6aeaded MS |
457 | err = req->out.h.error; |
458 | fuse_put_request(fc, req); | |
459 | if (err == -ENOSYS) { | |
82547981 MS |
460 | if (isdir) |
461 | fc->no_fsyncdir = 1; | |
462 | else | |
463 | fc->no_fsync = 1; | |
b6aeaded MS |
464 | err = 0; |
465 | } | |
02c24a82 JB |
466 | out: |
467 | mutex_unlock(&inode->i_mutex); | |
b6aeaded MS |
468 | return err; |
469 | } | |
470 | ||
02c24a82 JB |
471 | static int fuse_fsync(struct file *file, loff_t start, loff_t end, |
472 | int datasync) | |
82547981 | 473 | { |
02c24a82 | 474 | return fuse_fsync_common(file, start, end, datasync, 0); |
82547981 MS |
475 | } |
476 | ||
2106cb18 MS |
477 | void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, |
478 | size_t count, int opcode) | |
b6aeaded | 479 | { |
5c5c5e51 | 480 | struct fuse_read_in *inarg = &req->misc.read.in; |
a6643094 | 481 | struct fuse_file *ff = file->private_data; |
b6aeaded | 482 | |
361b1eb5 MS |
483 | inarg->fh = ff->fh; |
484 | inarg->offset = pos; | |
485 | inarg->size = count; | |
a6643094 | 486 | inarg->flags = file->f_flags; |
361b1eb5 | 487 | req->in.h.opcode = opcode; |
2106cb18 | 488 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
489 | req->in.numargs = 1; |
490 | req->in.args[0].size = sizeof(struct fuse_read_in); | |
c1aa96a5 | 491 | req->in.args[0].value = inarg; |
b6aeaded MS |
492 | req->out.argvar = 1; |
493 | req->out.numargs = 1; | |
494 | req->out.args[0].size = count; | |
b6aeaded MS |
495 | } |
496 | ||
8bfc016d | 497 | static size_t fuse_send_read(struct fuse_req *req, struct file *file, |
2106cb18 | 498 | loff_t pos, size_t count, fl_owner_t owner) |
04730fef | 499 | { |
2106cb18 MS |
500 | struct fuse_file *ff = file->private_data; |
501 | struct fuse_conn *fc = ff->fc; | |
f3332114 | 502 | |
2106cb18 | 503 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
f3332114 | 504 | if (owner != NULL) { |
5c5c5e51 | 505 | struct fuse_read_in *inarg = &req->misc.read.in; |
f3332114 MS |
506 | |
507 | inarg->read_flags |= FUSE_READ_LOCKOWNER; | |
508 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
509 | } | |
b93f858a | 510 | fuse_request_send(fc, req); |
361b1eb5 | 511 | return req->out.args[0].size; |
04730fef MS |
512 | } |
513 | ||
5c5c5e51 MS |
514 | static void fuse_read_update_size(struct inode *inode, loff_t size, |
515 | u64 attr_ver) | |
516 | { | |
517 | struct fuse_conn *fc = get_fuse_conn(inode); | |
518 | struct fuse_inode *fi = get_fuse_inode(inode); | |
519 | ||
520 | spin_lock(&fc->lock); | |
521 | if (attr_ver == fi->attr_version && size < inode->i_size) { | |
522 | fi->attr_version = ++fc->attr_version; | |
523 | i_size_write(inode, size); | |
524 | } | |
525 | spin_unlock(&fc->lock); | |
526 | } | |
527 | ||
b6aeaded MS |
528 | static int fuse_readpage(struct file *file, struct page *page) |
529 | { | |
530 | struct inode *inode = page->mapping->host; | |
531 | struct fuse_conn *fc = get_fuse_conn(inode); | |
248d86e8 | 532 | struct fuse_req *req; |
5c5c5e51 MS |
533 | size_t num_read; |
534 | loff_t pos = page_offset(page); | |
535 | size_t count = PAGE_CACHE_SIZE; | |
536 | u64 attr_ver; | |
248d86e8 MS |
537 | int err; |
538 | ||
539 | err = -EIO; | |
540 | if (is_bad_inode(inode)) | |
541 | goto out; | |
542 | ||
3be5a52b | 543 | /* |
25985edc | 544 | * Page writeback can extend beyond the lifetime of the |
3be5a52b MS |
545 | * page-cache page, so make sure we read a properly synced |
546 | * page. | |
547 | */ | |
548 | fuse_wait_on_page_writeback(inode, page->index); | |
549 | ||
b111c8c0 | 550 | req = fuse_get_req(fc, 1); |
ce1d5a49 MS |
551 | err = PTR_ERR(req); |
552 | if (IS_ERR(req)) | |
b6aeaded MS |
553 | goto out; |
554 | ||
5c5c5e51 MS |
555 | attr_ver = fuse_get_attr_version(fc); |
556 | ||
b6aeaded | 557 | req->out.page_zeroing = 1; |
f4975c67 | 558 | req->out.argpages = 1; |
b6aeaded MS |
559 | req->num_pages = 1; |
560 | req->pages[0] = page; | |
85f40aec | 561 | req->page_descs[0].length = count; |
2106cb18 | 562 | num_read = fuse_send_read(req, file, pos, count, NULL); |
b6aeaded MS |
563 | err = req->out.h.error; |
564 | fuse_put_request(fc, req); | |
5c5c5e51 MS |
565 | |
566 | if (!err) { | |
567 | /* | |
568 | * Short read means EOF. If file size is larger, truncate it | |
569 | */ | |
570 | if (num_read < count) | |
571 | fuse_read_update_size(inode, pos + num_read, attr_ver); | |
572 | ||
b6aeaded | 573 | SetPageUptodate(page); |
5c5c5e51 MS |
574 | } |
575 | ||
b36c31ba | 576 | fuse_invalidate_attr(inode); /* atime changed */ |
b6aeaded MS |
577 | out: |
578 | unlock_page(page); | |
579 | return err; | |
580 | } | |
581 | ||
c1aa96a5 | 582 | static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) |
db50b96c | 583 | { |
c1aa96a5 | 584 | int i; |
5c5c5e51 MS |
585 | size_t count = req->misc.read.in.size; |
586 | size_t num_read = req->out.args[0].size; | |
ce534fb0 | 587 | struct address_space *mapping = NULL; |
c1aa96a5 | 588 | |
ce534fb0 MS |
589 | for (i = 0; mapping == NULL && i < req->num_pages; i++) |
590 | mapping = req->pages[i]->mapping; | |
5c5c5e51 | 591 | |
ce534fb0 MS |
592 | if (mapping) { |
593 | struct inode *inode = mapping->host; | |
594 | ||
595 | /* | |
596 | * Short read means EOF. If file size is larger, truncate it | |
597 | */ | |
598 | if (!req->out.h.error && num_read < count) { | |
599 | loff_t pos; | |
600 | ||
601 | pos = page_offset(req->pages[0]) + num_read; | |
602 | fuse_read_update_size(inode, pos, | |
603 | req->misc.read.attr_ver); | |
604 | } | |
605 | fuse_invalidate_attr(inode); /* atime changed */ | |
606 | } | |
c1aa96a5 | 607 | |
db50b96c MS |
608 | for (i = 0; i < req->num_pages; i++) { |
609 | struct page *page = req->pages[i]; | |
610 | if (!req->out.h.error) | |
611 | SetPageUptodate(page); | |
c1aa96a5 MS |
612 | else |
613 | SetPageError(page); | |
db50b96c | 614 | unlock_page(page); |
b5dd3285 | 615 | page_cache_release(page); |
db50b96c | 616 | } |
c756e0a4 | 617 | if (req->ff) |
5a18ec17 | 618 | fuse_file_put(req->ff, false); |
c1aa96a5 MS |
619 | } |
620 | ||
2106cb18 | 621 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
c1aa96a5 | 622 | { |
2106cb18 MS |
623 | struct fuse_file *ff = file->private_data; |
624 | struct fuse_conn *fc = ff->fc; | |
c1aa96a5 MS |
625 | loff_t pos = page_offset(req->pages[0]); |
626 | size_t count = req->num_pages << PAGE_CACHE_SHIFT; | |
f4975c67 MS |
627 | |
628 | req->out.argpages = 1; | |
c1aa96a5 | 629 | req->out.page_zeroing = 1; |
ce534fb0 | 630 | req->out.page_replace = 1; |
2106cb18 | 631 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
5c5c5e51 | 632 | req->misc.read.attr_ver = fuse_get_attr_version(fc); |
9cd68455 | 633 | if (fc->async_read) { |
c756e0a4 | 634 | req->ff = fuse_file_get(ff); |
9cd68455 | 635 | req->end = fuse_readpages_end; |
b93f858a | 636 | fuse_request_send_background(fc, req); |
9cd68455 | 637 | } else { |
b93f858a | 638 | fuse_request_send(fc, req); |
9cd68455 | 639 | fuse_readpages_end(fc, req); |
e9bb09dd | 640 | fuse_put_request(fc, req); |
9cd68455 | 641 | } |
db50b96c MS |
642 | } |
643 | ||
c756e0a4 | 644 | struct fuse_fill_data { |
db50b96c | 645 | struct fuse_req *req; |
a6643094 | 646 | struct file *file; |
db50b96c | 647 | struct inode *inode; |
f8dbdf81 | 648 | unsigned nr_pages; |
db50b96c MS |
649 | }; |
650 | ||
651 | static int fuse_readpages_fill(void *_data, struct page *page) | |
652 | { | |
c756e0a4 | 653 | struct fuse_fill_data *data = _data; |
db50b96c MS |
654 | struct fuse_req *req = data->req; |
655 | struct inode *inode = data->inode; | |
656 | struct fuse_conn *fc = get_fuse_conn(inode); | |
657 | ||
3be5a52b MS |
658 | fuse_wait_on_page_writeback(inode, page->index); |
659 | ||
db50b96c MS |
660 | if (req->num_pages && |
661 | (req->num_pages == FUSE_MAX_PAGES_PER_REQ || | |
662 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || | |
663 | req->pages[req->num_pages - 1]->index + 1 != page->index)) { | |
f8dbdf81 MP |
664 | int nr_alloc = min_t(unsigned, data->nr_pages, |
665 | FUSE_MAX_PAGES_PER_REQ); | |
2106cb18 | 666 | fuse_send_readpages(req, data->file); |
8b41e671 MP |
667 | if (fc->async_read) |
668 | req = fuse_get_req_for_background(fc, nr_alloc); | |
669 | else | |
670 | req = fuse_get_req(fc, nr_alloc); | |
671 | ||
672 | data->req = req; | |
ce1d5a49 | 673 | if (IS_ERR(req)) { |
db50b96c | 674 | unlock_page(page); |
ce1d5a49 | 675 | return PTR_ERR(req); |
db50b96c | 676 | } |
db50b96c | 677 | } |
f8dbdf81 MP |
678 | |
679 | if (WARN_ON(req->num_pages >= req->max_pages)) { | |
680 | fuse_put_request(fc, req); | |
681 | return -EIO; | |
682 | } | |
683 | ||
b5dd3285 | 684 | page_cache_get(page); |
db50b96c | 685 | req->pages[req->num_pages] = page; |
85f40aec | 686 | req->page_descs[req->num_pages].length = PAGE_SIZE; |
1729a16c | 687 | req->num_pages++; |
f8dbdf81 | 688 | data->nr_pages--; |
db50b96c MS |
689 | return 0; |
690 | } | |
691 | ||
692 | static int fuse_readpages(struct file *file, struct address_space *mapping, | |
693 | struct list_head *pages, unsigned nr_pages) | |
694 | { | |
695 | struct inode *inode = mapping->host; | |
696 | struct fuse_conn *fc = get_fuse_conn(inode); | |
c756e0a4 | 697 | struct fuse_fill_data data; |
db50b96c | 698 | int err; |
f8dbdf81 | 699 | int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); |
248d86e8 | 700 | |
1d7ea732 | 701 | err = -EIO; |
248d86e8 | 702 | if (is_bad_inode(inode)) |
2e990021 | 703 | goto out; |
248d86e8 | 704 | |
a6643094 | 705 | data.file = file; |
db50b96c | 706 | data.inode = inode; |
8b41e671 MP |
707 | if (fc->async_read) |
708 | data.req = fuse_get_req_for_background(fc, nr_alloc); | |
709 | else | |
710 | data.req = fuse_get_req(fc, nr_alloc); | |
f8dbdf81 | 711 | data.nr_pages = nr_pages; |
1d7ea732 | 712 | err = PTR_ERR(data.req); |
ce1d5a49 | 713 | if (IS_ERR(data.req)) |
2e990021 | 714 | goto out; |
db50b96c MS |
715 | |
716 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); | |
d3406ffa MS |
717 | if (!err) { |
718 | if (data.req->num_pages) | |
2106cb18 | 719 | fuse_send_readpages(data.req, file); |
d3406ffa MS |
720 | else |
721 | fuse_put_request(fc, data.req); | |
722 | } | |
2e990021 | 723 | out: |
1d7ea732 | 724 | return err; |
db50b96c MS |
725 | } |
726 | ||
bcb4be80 MS |
727 | static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, |
728 | unsigned long nr_segs, loff_t pos) | |
729 | { | |
730 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
a8894274 | 731 | struct fuse_conn *fc = get_fuse_conn(inode); |
bcb4be80 | 732 | |
a8894274 BF |
733 | /* |
734 | * In auto invalidate mode, always update attributes on read. | |
735 | * Otherwise, only update if we attempt to read past EOF (to ensure | |
736 | * i_size is up to date). | |
737 | */ | |
738 | if (fc->auto_inval_data || | |
739 | (pos + iov_length(iov, nr_segs) > i_size_read(inode))) { | |
bcb4be80 | 740 | int err; |
bcb4be80 MS |
741 | err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); |
742 | if (err) | |
743 | return err; | |
744 | } | |
745 | ||
746 | return generic_file_aio_read(iocb, iov, nr_segs, pos); | |
747 | } | |
748 | ||
2d698b07 | 749 | static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, |
2106cb18 | 750 | loff_t pos, size_t count) |
b6aeaded | 751 | { |
b25e82e5 MS |
752 | struct fuse_write_in *inarg = &req->misc.write.in; |
753 | struct fuse_write_out *outarg = &req->misc.write.out; | |
b6aeaded | 754 | |
b25e82e5 MS |
755 | inarg->fh = ff->fh; |
756 | inarg->offset = pos; | |
757 | inarg->size = count; | |
b6aeaded | 758 | req->in.h.opcode = FUSE_WRITE; |
2106cb18 | 759 | req->in.h.nodeid = ff->nodeid; |
b6aeaded | 760 | req->in.numargs = 2; |
2106cb18 | 761 | if (ff->fc->minor < 9) |
f3332114 MS |
762 | req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; |
763 | else | |
764 | req->in.args[0].size = sizeof(struct fuse_write_in); | |
b25e82e5 | 765 | req->in.args[0].value = inarg; |
b6aeaded MS |
766 | req->in.args[1].size = count; |
767 | req->out.numargs = 1; | |
768 | req->out.args[0].size = sizeof(struct fuse_write_out); | |
b25e82e5 MS |
769 | req->out.args[0].value = outarg; |
770 | } | |
771 | ||
772 | static size_t fuse_send_write(struct fuse_req *req, struct file *file, | |
2106cb18 | 773 | loff_t pos, size_t count, fl_owner_t owner) |
b25e82e5 | 774 | { |
2106cb18 MS |
775 | struct fuse_file *ff = file->private_data; |
776 | struct fuse_conn *fc = ff->fc; | |
2d698b07 MS |
777 | struct fuse_write_in *inarg = &req->misc.write.in; |
778 | ||
2106cb18 | 779 | fuse_write_fill(req, ff, pos, count); |
2d698b07 | 780 | inarg->flags = file->f_flags; |
f3332114 | 781 | if (owner != NULL) { |
f3332114 MS |
782 | inarg->write_flags |= FUSE_WRITE_LOCKOWNER; |
783 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
784 | } | |
b93f858a | 785 | fuse_request_send(fc, req); |
b25e82e5 | 786 | return req->misc.write.out.size; |
b6aeaded MS |
787 | } |
788 | ||
a1d75f25 | 789 | void fuse_write_update_size(struct inode *inode, loff_t pos) |
854512ec MS |
790 | { |
791 | struct fuse_conn *fc = get_fuse_conn(inode); | |
792 | struct fuse_inode *fi = get_fuse_inode(inode); | |
793 | ||
794 | spin_lock(&fc->lock); | |
795 | fi->attr_version = ++fc->attr_version; | |
796 | if (pos > inode->i_size) | |
797 | i_size_write(inode, pos); | |
798 | spin_unlock(&fc->lock); | |
799 | } | |
800 | ||
ea9b9907 NP |
801 | static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, |
802 | struct inode *inode, loff_t pos, | |
803 | size_t count) | |
804 | { | |
805 | size_t res; | |
806 | unsigned offset; | |
807 | unsigned i; | |
808 | ||
809 | for (i = 0; i < req->num_pages; i++) | |
810 | fuse_wait_on_page_writeback(inode, req->pages[i]->index); | |
811 | ||
2106cb18 | 812 | res = fuse_send_write(req, file, pos, count, NULL); |
ea9b9907 | 813 | |
b2430d75 | 814 | offset = req->page_descs[0].offset; |
ea9b9907 NP |
815 | count = res; |
816 | for (i = 0; i < req->num_pages; i++) { | |
817 | struct page *page = req->pages[i]; | |
818 | ||
819 | if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) | |
820 | SetPageUptodate(page); | |
821 | ||
822 | if (count > PAGE_CACHE_SIZE - offset) | |
823 | count -= PAGE_CACHE_SIZE - offset; | |
824 | else | |
825 | count = 0; | |
826 | offset = 0; | |
827 | ||
828 | unlock_page(page); | |
829 | page_cache_release(page); | |
830 | } | |
831 | ||
832 | return res; | |
833 | } | |
834 | ||
835 | static ssize_t fuse_fill_write_pages(struct fuse_req *req, | |
836 | struct address_space *mapping, | |
837 | struct iov_iter *ii, loff_t pos) | |
838 | { | |
839 | struct fuse_conn *fc = get_fuse_conn(mapping->host); | |
840 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | |
841 | size_t count = 0; | |
842 | int err; | |
843 | ||
f4975c67 | 844 | req->in.argpages = 1; |
b2430d75 | 845 | req->page_descs[0].offset = offset; |
ea9b9907 NP |
846 | |
847 | do { | |
848 | size_t tmp; | |
849 | struct page *page; | |
850 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | |
851 | size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, | |
852 | iov_iter_count(ii)); | |
853 | ||
854 | bytes = min_t(size_t, bytes, fc->max_write - count); | |
855 | ||
856 | again: | |
857 | err = -EFAULT; | |
858 | if (iov_iter_fault_in_readable(ii, bytes)) | |
859 | break; | |
860 | ||
861 | err = -ENOMEM; | |
54566b2c | 862 | page = grab_cache_page_write_begin(mapping, index, 0); |
ea9b9907 NP |
863 | if (!page) |
864 | break; | |
865 | ||
931e80e4 | 866 | if (mapping_writably_mapped(mapping)) |
867 | flush_dcache_page(page); | |
868 | ||
ea9b9907 NP |
869 | pagefault_disable(); |
870 | tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); | |
871 | pagefault_enable(); | |
872 | flush_dcache_page(page); | |
873 | ||
478e0841 JW |
874 | mark_page_accessed(page); |
875 | ||
ea9b9907 NP |
876 | if (!tmp) { |
877 | unlock_page(page); | |
878 | page_cache_release(page); | |
879 | bytes = min(bytes, iov_iter_single_seg_count(ii)); | |
880 | goto again; | |
881 | } | |
882 | ||
883 | err = 0; | |
884 | req->pages[req->num_pages] = page; | |
85f40aec | 885 | req->page_descs[req->num_pages].length = tmp; |
ea9b9907 NP |
886 | req->num_pages++; |
887 | ||
888 | iov_iter_advance(ii, tmp); | |
889 | count += tmp; | |
890 | pos += tmp; | |
891 | offset += tmp; | |
892 | if (offset == PAGE_CACHE_SIZE) | |
893 | offset = 0; | |
894 | ||
78bb6cb9 MS |
895 | if (!fc->big_writes) |
896 | break; | |
ea9b9907 | 897 | } while (iov_iter_count(ii) && count < fc->max_write && |
d07f09f5 | 898 | req->num_pages < req->max_pages && offset == 0); |
ea9b9907 NP |
899 | |
900 | return count > 0 ? count : err; | |
901 | } | |
902 | ||
d07f09f5 MP |
903 | static inline unsigned fuse_wr_pages(loff_t pos, size_t len) |
904 | { | |
905 | return min_t(unsigned, | |
906 | ((pos + len - 1) >> PAGE_CACHE_SHIFT) - | |
907 | (pos >> PAGE_CACHE_SHIFT) + 1, | |
908 | FUSE_MAX_PAGES_PER_REQ); | |
909 | } | |
910 | ||
ea9b9907 NP |
911 | static ssize_t fuse_perform_write(struct file *file, |
912 | struct address_space *mapping, | |
913 | struct iov_iter *ii, loff_t pos) | |
914 | { | |
915 | struct inode *inode = mapping->host; | |
916 | struct fuse_conn *fc = get_fuse_conn(inode); | |
917 | int err = 0; | |
918 | ssize_t res = 0; | |
919 | ||
920 | if (is_bad_inode(inode)) | |
921 | return -EIO; | |
922 | ||
923 | do { | |
924 | struct fuse_req *req; | |
925 | ssize_t count; | |
d07f09f5 | 926 | unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); |
ea9b9907 | 927 | |
d07f09f5 | 928 | req = fuse_get_req(fc, nr_pages); |
ea9b9907 NP |
929 | if (IS_ERR(req)) { |
930 | err = PTR_ERR(req); | |
931 | break; | |
932 | } | |
933 | ||
934 | count = fuse_fill_write_pages(req, mapping, ii, pos); | |
935 | if (count <= 0) { | |
936 | err = count; | |
937 | } else { | |
938 | size_t num_written; | |
939 | ||
940 | num_written = fuse_send_write_pages(req, file, inode, | |
941 | pos, count); | |
942 | err = req->out.h.error; | |
943 | if (!err) { | |
944 | res += num_written; | |
945 | pos += num_written; | |
946 | ||
947 | /* break out of the loop on short write */ | |
948 | if (num_written != count) | |
949 | err = -EIO; | |
950 | } | |
951 | } | |
952 | fuse_put_request(fc, req); | |
953 | } while (!err && iov_iter_count(ii)); | |
954 | ||
955 | if (res > 0) | |
956 | fuse_write_update_size(inode, pos); | |
957 | ||
958 | fuse_invalidate_attr(inode); | |
959 | ||
960 | return res > 0 ? res : err; | |
961 | } | |
962 | ||
963 | static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
964 | unsigned long nr_segs, loff_t pos) | |
965 | { | |
966 | struct file *file = iocb->ki_filp; | |
967 | struct address_space *mapping = file->f_mapping; | |
968 | size_t count = 0; | |
4273b793 | 969 | size_t ocount = 0; |
ea9b9907 | 970 | ssize_t written = 0; |
4273b793 | 971 | ssize_t written_buffered = 0; |
ea9b9907 NP |
972 | struct inode *inode = mapping->host; |
973 | ssize_t err; | |
974 | struct iov_iter i; | |
4273b793 | 975 | loff_t endbyte = 0; |
ea9b9907 NP |
976 | |
977 | WARN_ON(iocb->ki_pos != pos); | |
978 | ||
4273b793 AA |
979 | ocount = 0; |
980 | err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | |
ea9b9907 NP |
981 | if (err) |
982 | return err; | |
983 | ||
4273b793 | 984 | count = ocount; |
58ef6a75 | 985 | sb_start_write(inode->i_sb); |
ea9b9907 | 986 | mutex_lock(&inode->i_mutex); |
ea9b9907 NP |
987 | |
988 | /* We can write back this queue in page reclaim */ | |
989 | current->backing_dev_info = mapping->backing_dev_info; | |
990 | ||
991 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | |
992 | if (err) | |
993 | goto out; | |
994 | ||
995 | if (count == 0) | |
996 | goto out; | |
997 | ||
2f1936b8 | 998 | err = file_remove_suid(file); |
ea9b9907 NP |
999 | if (err) |
1000 | goto out; | |
1001 | ||
c3b2da31 JB |
1002 | err = file_update_time(file); |
1003 | if (err) | |
1004 | goto out; | |
ea9b9907 | 1005 | |
4273b793 AA |
1006 | if (file->f_flags & O_DIRECT) { |
1007 | written = generic_file_direct_write(iocb, iov, &nr_segs, | |
1008 | pos, &iocb->ki_pos, | |
1009 | count, ocount); | |
1010 | if (written < 0 || written == count) | |
1011 | goto out; | |
1012 | ||
1013 | pos += written; | |
1014 | count -= written; | |
ea9b9907 | 1015 | |
4273b793 AA |
1016 | iov_iter_init(&i, iov, nr_segs, count, written); |
1017 | written_buffered = fuse_perform_write(file, mapping, &i, pos); | |
1018 | if (written_buffered < 0) { | |
1019 | err = written_buffered; | |
1020 | goto out; | |
1021 | } | |
1022 | endbyte = pos + written_buffered - 1; | |
1023 | ||
1024 | err = filemap_write_and_wait_range(file->f_mapping, pos, | |
1025 | endbyte); | |
1026 | if (err) | |
1027 | goto out; | |
1028 | ||
1029 | invalidate_mapping_pages(file->f_mapping, | |
1030 | pos >> PAGE_CACHE_SHIFT, | |
1031 | endbyte >> PAGE_CACHE_SHIFT); | |
1032 | ||
1033 | written += written_buffered; | |
1034 | iocb->ki_pos = pos + written_buffered; | |
1035 | } else { | |
1036 | iov_iter_init(&i, iov, nr_segs, count, 0); | |
1037 | written = fuse_perform_write(file, mapping, &i, pos); | |
1038 | if (written >= 0) | |
1039 | iocb->ki_pos = pos + written; | |
1040 | } | |
ea9b9907 NP |
1041 | out: |
1042 | current->backing_dev_info = NULL; | |
1043 | mutex_unlock(&inode->i_mutex); | |
58ef6a75 | 1044 | sb_end_write(inode->i_sb); |
ea9b9907 NP |
1045 | |
1046 | return written ? written : err; | |
1047 | } | |
1048 | ||
413ef8cb MS |
1049 | static void fuse_release_user_pages(struct fuse_req *req, int write) |
1050 | { | |
1051 | unsigned i; | |
1052 | ||
1053 | for (i = 0; i < req->num_pages; i++) { | |
1054 | struct page *page = req->pages[i]; | |
1055 | if (write) | |
1056 | set_page_dirty_lock(page); | |
1057 | put_page(page); | |
1058 | } | |
1059 | } | |
1060 | ||
7c190c8b MP |
1061 | static inline void fuse_page_descs_length_init(struct fuse_req *req, |
1062 | unsigned index, unsigned nr_pages) | |
85f40aec MP |
1063 | { |
1064 | int i; | |
1065 | ||
7c190c8b | 1066 | for (i = index; i < index + nr_pages; i++) |
85f40aec MP |
1067 | req->page_descs[i].length = PAGE_SIZE - |
1068 | req->page_descs[i].offset; | |
1069 | } | |
1070 | ||
7c190c8b MP |
1071 | static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) |
1072 | { | |
1073 | return (unsigned long)ii->iov->iov_base + ii->iov_offset; | |
1074 | } | |
1075 | ||
1076 | static inline size_t fuse_get_frag_size(const struct iov_iter *ii, | |
1077 | size_t max_size) | |
1078 | { | |
1079 | return min(iov_iter_single_seg_count(ii), max_size); | |
1080 | } | |
1081 | ||
b98d023a | 1082 | static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, |
ce60a2f1 | 1083 | size_t *nbytesp, int write) |
413ef8cb | 1084 | { |
7c190c8b | 1085 | size_t nbytes = 0; /* # bytes already packed in req */ |
b98d023a | 1086 | |
f4975c67 MS |
1087 | /* Special case for kernel I/O: can copy directly into the buffer */ |
1088 | if (segment_eq(get_fs(), KERNEL_DS)) { | |
7c190c8b MP |
1089 | unsigned long user_addr = fuse_get_user_addr(ii); |
1090 | size_t frag_size = fuse_get_frag_size(ii, *nbytesp); | |
1091 | ||
f4975c67 MS |
1092 | if (write) |
1093 | req->in.args[1].value = (void *) user_addr; | |
1094 | else | |
1095 | req->out.args[0].value = (void *) user_addr; | |
1096 | ||
b98d023a MP |
1097 | iov_iter_advance(ii, frag_size); |
1098 | *nbytesp = frag_size; | |
f4975c67 MS |
1099 | return 0; |
1100 | } | |
413ef8cb | 1101 | |
5565a9d8 | 1102 | while (nbytes < *nbytesp && req->num_pages < req->max_pages) { |
7c190c8b MP |
1103 | unsigned npages; |
1104 | unsigned long user_addr = fuse_get_user_addr(ii); | |
1105 | unsigned offset = user_addr & ~PAGE_MASK; | |
1106 | size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes); | |
1107 | int ret; | |
413ef8cb | 1108 | |
5565a9d8 | 1109 | unsigned n = req->max_pages - req->num_pages; |
7c190c8b MP |
1110 | frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT); |
1111 | ||
1112 | npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1113 | npages = clamp(npages, 1U, n); | |
1114 | ||
1115 | ret = get_user_pages_fast(user_addr, npages, !write, | |
1116 | &req->pages[req->num_pages]); | |
1117 | if (ret < 0) | |
1118 | return ret; | |
1119 | ||
1120 | npages = ret; | |
1121 | frag_size = min_t(size_t, frag_size, | |
1122 | (npages << PAGE_SHIFT) - offset); | |
1123 | iov_iter_advance(ii, frag_size); | |
1124 | ||
1125 | req->page_descs[req->num_pages].offset = offset; | |
1126 | fuse_page_descs_length_init(req, req->num_pages, npages); | |
1127 | ||
1128 | req->num_pages += npages; | |
1129 | req->page_descs[req->num_pages - 1].length -= | |
1130 | (npages << PAGE_SHIFT) - offset - frag_size; | |
1131 | ||
1132 | nbytes += frag_size; | |
1133 | } | |
f4975c67 MS |
1134 | |
1135 | if (write) | |
1136 | req->in.argpages = 1; | |
1137 | else | |
1138 | req->out.argpages = 1; | |
1139 | ||
7c190c8b | 1140 | *nbytesp = nbytes; |
f4975c67 | 1141 | |
413ef8cb MS |
1142 | return 0; |
1143 | } | |
1144 | ||
5565a9d8 MP |
1145 | static inline int fuse_iter_npages(const struct iov_iter *ii_p) |
1146 | { | |
1147 | struct iov_iter ii = *ii_p; | |
1148 | int npages = 0; | |
1149 | ||
1150 | while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) { | |
1151 | unsigned long user_addr = fuse_get_user_addr(&ii); | |
1152 | unsigned offset = user_addr & ~PAGE_MASK; | |
1153 | size_t frag_size = iov_iter_single_seg_count(&ii); | |
1154 | ||
1155 | npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1156 | iov_iter_advance(&ii, frag_size); | |
1157 | } | |
1158 | ||
1159 | return min(npages, FUSE_MAX_PAGES_PER_REQ); | |
1160 | } | |
1161 | ||
fb05f41f MS |
1162 | ssize_t fuse_direct_io(struct file *file, const struct iovec *iov, |
1163 | unsigned long nr_segs, size_t count, loff_t *ppos, | |
1164 | int write) | |
413ef8cb | 1165 | { |
2106cb18 MS |
1166 | struct fuse_file *ff = file->private_data; |
1167 | struct fuse_conn *fc = ff->fc; | |
413ef8cb MS |
1168 | size_t nmax = write ? fc->max_write : fc->max_read; |
1169 | loff_t pos = *ppos; | |
1170 | ssize_t res = 0; | |
248d86e8 | 1171 | struct fuse_req *req; |
b98d023a MP |
1172 | struct iov_iter ii; |
1173 | ||
1174 | iov_iter_init(&ii, iov, nr_segs, count, 0); | |
248d86e8 | 1175 | |
5565a9d8 | 1176 | req = fuse_get_req(fc, fuse_iter_npages(&ii)); |
ce1d5a49 MS |
1177 | if (IS_ERR(req)) |
1178 | return PTR_ERR(req); | |
413ef8cb MS |
1179 | |
1180 | while (count) { | |
413ef8cb | 1181 | size_t nres; |
2106cb18 | 1182 | fl_owner_t owner = current->files; |
f4975c67 | 1183 | size_t nbytes = min(count, nmax); |
b98d023a | 1184 | int err = fuse_get_user_pages(req, &ii, &nbytes, write); |
413ef8cb MS |
1185 | if (err) { |
1186 | res = err; | |
1187 | break; | |
1188 | } | |
f4975c67 | 1189 | |
413ef8cb | 1190 | if (write) |
2106cb18 | 1191 | nres = fuse_send_write(req, file, pos, nbytes, owner); |
413ef8cb | 1192 | else |
2106cb18 MS |
1193 | nres = fuse_send_read(req, file, pos, nbytes, owner); |
1194 | ||
413ef8cb MS |
1195 | fuse_release_user_pages(req, !write); |
1196 | if (req->out.h.error) { | |
1197 | if (!res) | |
1198 | res = req->out.h.error; | |
1199 | break; | |
1200 | } else if (nres > nbytes) { | |
1201 | res = -EIO; | |
1202 | break; | |
1203 | } | |
1204 | count -= nres; | |
1205 | res += nres; | |
1206 | pos += nres; | |
413ef8cb MS |
1207 | if (nres != nbytes) |
1208 | break; | |
56cf34ff MS |
1209 | if (count) { |
1210 | fuse_put_request(fc, req); | |
5565a9d8 | 1211 | req = fuse_get_req(fc, fuse_iter_npages(&ii)); |
56cf34ff MS |
1212 | if (IS_ERR(req)) |
1213 | break; | |
1214 | } | |
413ef8cb | 1215 | } |
f60311d5 AA |
1216 | if (!IS_ERR(req)) |
1217 | fuse_put_request(fc, req); | |
d09cb9d7 | 1218 | if (res > 0) |
413ef8cb | 1219 | *ppos = pos; |
413ef8cb MS |
1220 | |
1221 | return res; | |
1222 | } | |
08cbf542 | 1223 | EXPORT_SYMBOL_GPL(fuse_direct_io); |
413ef8cb | 1224 | |
b98d023a MP |
1225 | static ssize_t __fuse_direct_read(struct file *file, const struct iovec *iov, |
1226 | unsigned long nr_segs, loff_t *ppos) | |
413ef8cb | 1227 | { |
d09cb9d7 | 1228 | ssize_t res; |
6131ffaa | 1229 | struct inode *inode = file_inode(file); |
d09cb9d7 MS |
1230 | |
1231 | if (is_bad_inode(inode)) | |
1232 | return -EIO; | |
1233 | ||
fb05f41f MS |
1234 | res = fuse_direct_io(file, iov, nr_segs, iov_length(iov, nr_segs), |
1235 | ppos, 0); | |
d09cb9d7 MS |
1236 | |
1237 | fuse_invalidate_attr(inode); | |
1238 | ||
1239 | return res; | |
413ef8cb MS |
1240 | } |
1241 | ||
b98d023a MP |
1242 | static ssize_t fuse_direct_read(struct file *file, char __user *buf, |
1243 | size_t count, loff_t *ppos) | |
1244 | { | |
fb05f41f | 1245 | struct iovec iov = { .iov_base = buf, .iov_len = count }; |
b98d023a MP |
1246 | return __fuse_direct_read(file, &iov, 1, ppos); |
1247 | } | |
1248 | ||
1249 | static ssize_t __fuse_direct_write(struct file *file, const struct iovec *iov, | |
1250 | unsigned long nr_segs, loff_t *ppos) | |
413ef8cb | 1251 | { |
6131ffaa | 1252 | struct inode *inode = file_inode(file); |
b98d023a | 1253 | size_t count = iov_length(iov, nr_segs); |
413ef8cb | 1254 | ssize_t res; |
d09cb9d7 | 1255 | |
889f7848 | 1256 | res = generic_write_checks(file, ppos, &count, 0); |
d09cb9d7 | 1257 | if (!res) { |
fb05f41f | 1258 | res = fuse_direct_io(file, iov, nr_segs, count, ppos, 1); |
d09cb9d7 MS |
1259 | if (res > 0) |
1260 | fuse_write_update_size(inode, *ppos); | |
1261 | } | |
d09cb9d7 MS |
1262 | |
1263 | fuse_invalidate_attr(inode); | |
1264 | ||
413ef8cb MS |
1265 | return res; |
1266 | } | |
1267 | ||
4273b793 AA |
1268 | static ssize_t fuse_direct_write(struct file *file, const char __user *buf, |
1269 | size_t count, loff_t *ppos) | |
1270 | { | |
fb05f41f | 1271 | struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; |
6131ffaa | 1272 | struct inode *inode = file_inode(file); |
4273b793 AA |
1273 | ssize_t res; |
1274 | ||
1275 | if (is_bad_inode(inode)) | |
1276 | return -EIO; | |
1277 | ||
1278 | /* Don't allow parallel writes to the same file */ | |
1279 | mutex_lock(&inode->i_mutex); | |
b98d023a | 1280 | res = __fuse_direct_write(file, &iov, 1, ppos); |
4273b793 AA |
1281 | mutex_unlock(&inode->i_mutex); |
1282 | ||
1283 | return res; | |
1284 | } | |
1285 | ||
3be5a52b | 1286 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
b6aeaded | 1287 | { |
3be5a52b | 1288 | __free_page(req->pages[0]); |
5a18ec17 | 1289 | fuse_file_put(req->ff, false); |
3be5a52b MS |
1290 | } |
1291 | ||
1292 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | |
1293 | { | |
1294 | struct inode *inode = req->inode; | |
1295 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1296 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; | |
1297 | ||
1298 | list_del(&req->writepages_entry); | |
1299 | dec_bdi_stat(bdi, BDI_WRITEBACK); | |
1300 | dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); | |
1301 | bdi_writeout_inc(bdi); | |
1302 | wake_up(&fi->page_waitq); | |
1303 | } | |
1304 | ||
1305 | /* Called under fc->lock, may release and reacquire it */ | |
1306 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) | |
b9ca67b2 MS |
1307 | __releases(fc->lock) |
1308 | __acquires(fc->lock) | |
3be5a52b MS |
1309 | { |
1310 | struct fuse_inode *fi = get_fuse_inode(req->inode); | |
1311 | loff_t size = i_size_read(req->inode); | |
1312 | struct fuse_write_in *inarg = &req->misc.write.in; | |
1313 | ||
1314 | if (!fc->connected) | |
1315 | goto out_free; | |
1316 | ||
1317 | if (inarg->offset + PAGE_CACHE_SIZE <= size) { | |
1318 | inarg->size = PAGE_CACHE_SIZE; | |
1319 | } else if (inarg->offset < size) { | |
1320 | inarg->size = size & (PAGE_CACHE_SIZE - 1); | |
1321 | } else { | |
1322 | /* Got truncated off completely */ | |
1323 | goto out_free; | |
b6aeaded | 1324 | } |
3be5a52b MS |
1325 | |
1326 | req->in.args[1].size = inarg->size; | |
1327 | fi->writectr++; | |
b93f858a | 1328 | fuse_request_send_background_locked(fc, req); |
3be5a52b MS |
1329 | return; |
1330 | ||
1331 | out_free: | |
1332 | fuse_writepage_finish(fc, req); | |
1333 | spin_unlock(&fc->lock); | |
1334 | fuse_writepage_free(fc, req); | |
e9bb09dd | 1335 | fuse_put_request(fc, req); |
3be5a52b | 1336 | spin_lock(&fc->lock); |
b6aeaded MS |
1337 | } |
1338 | ||
3be5a52b MS |
1339 | /* |
1340 | * If fi->writectr is positive (no truncate or fsync going on) send | |
1341 | * all queued writepage requests. | |
1342 | * | |
1343 | * Called with fc->lock | |
1344 | */ | |
1345 | void fuse_flush_writepages(struct inode *inode) | |
b9ca67b2 MS |
1346 | __releases(fc->lock) |
1347 | __acquires(fc->lock) | |
b6aeaded | 1348 | { |
3be5a52b MS |
1349 | struct fuse_conn *fc = get_fuse_conn(inode); |
1350 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1351 | struct fuse_req *req; | |
1352 | ||
1353 | while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { | |
1354 | req = list_entry(fi->queued_writes.next, struct fuse_req, list); | |
1355 | list_del_init(&req->list); | |
1356 | fuse_send_writepage(fc, req); | |
1357 | } | |
1358 | } | |
1359 | ||
1360 | static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) | |
1361 | { | |
1362 | struct inode *inode = req->inode; | |
1363 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1364 | ||
1365 | mapping_set_error(inode->i_mapping, req->out.h.error); | |
1366 | spin_lock(&fc->lock); | |
1367 | fi->writectr--; | |
1368 | fuse_writepage_finish(fc, req); | |
1369 | spin_unlock(&fc->lock); | |
1370 | fuse_writepage_free(fc, req); | |
1371 | } | |
1372 | ||
1373 | static int fuse_writepage_locked(struct page *page) | |
1374 | { | |
1375 | struct address_space *mapping = page->mapping; | |
1376 | struct inode *inode = mapping->host; | |
1377 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1378 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1379 | struct fuse_req *req; | |
1380 | struct fuse_file *ff; | |
1381 | struct page *tmp_page; | |
1382 | ||
1383 | set_page_writeback(page); | |
1384 | ||
4250c066 | 1385 | req = fuse_request_alloc_nofs(1); |
3be5a52b MS |
1386 | if (!req) |
1387 | goto err; | |
1388 | ||
8b41e671 | 1389 | req->background = 1; /* writeback always goes to bg_queue */ |
3be5a52b MS |
1390 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); |
1391 | if (!tmp_page) | |
1392 | goto err_free; | |
1393 | ||
1394 | spin_lock(&fc->lock); | |
1395 | BUG_ON(list_empty(&fi->write_files)); | |
1396 | ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); | |
1397 | req->ff = fuse_file_get(ff); | |
1398 | spin_unlock(&fc->lock); | |
1399 | ||
2106cb18 | 1400 | fuse_write_fill(req, ff, page_offset(page), 0); |
3be5a52b MS |
1401 | |
1402 | copy_highpage(tmp_page, page); | |
2d698b07 | 1403 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; |
f4975c67 | 1404 | req->in.argpages = 1; |
3be5a52b MS |
1405 | req->num_pages = 1; |
1406 | req->pages[0] = tmp_page; | |
b2430d75 | 1407 | req->page_descs[0].offset = 0; |
85f40aec | 1408 | req->page_descs[0].length = PAGE_SIZE; |
3be5a52b MS |
1409 | req->end = fuse_writepage_end; |
1410 | req->inode = inode; | |
1411 | ||
1412 | inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); | |
1413 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | |
1414 | end_page_writeback(page); | |
1415 | ||
1416 | spin_lock(&fc->lock); | |
1417 | list_add(&req->writepages_entry, &fi->writepages); | |
1418 | list_add_tail(&req->list, &fi->queued_writes); | |
1419 | fuse_flush_writepages(inode); | |
1420 | spin_unlock(&fc->lock); | |
1421 | ||
1422 | return 0; | |
1423 | ||
1424 | err_free: | |
1425 | fuse_request_free(req); | |
1426 | err: | |
1427 | end_page_writeback(page); | |
1428 | return -ENOMEM; | |
1429 | } | |
1430 | ||
1431 | static int fuse_writepage(struct page *page, struct writeback_control *wbc) | |
1432 | { | |
1433 | int err; | |
1434 | ||
1435 | err = fuse_writepage_locked(page); | |
1436 | unlock_page(page); | |
1437 | ||
1438 | return err; | |
1439 | } | |
1440 | ||
1441 | static int fuse_launder_page(struct page *page) | |
1442 | { | |
1443 | int err = 0; | |
1444 | if (clear_page_dirty_for_io(page)) { | |
1445 | struct inode *inode = page->mapping->host; | |
1446 | err = fuse_writepage_locked(page); | |
1447 | if (!err) | |
1448 | fuse_wait_on_page_writeback(inode, page->index); | |
1449 | } | |
1450 | return err; | |
1451 | } | |
1452 | ||
1453 | /* | |
1454 | * Write back dirty pages now, because there may not be any suitable | |
1455 | * open files later | |
1456 | */ | |
1457 | static void fuse_vma_close(struct vm_area_struct *vma) | |
1458 | { | |
1459 | filemap_write_and_wait(vma->vm_file->f_mapping); | |
1460 | } | |
1461 | ||
1462 | /* | |
1463 | * Wait for writeback against this page to complete before allowing it | |
1464 | * to be marked dirty again, and hence written back again, possibly | |
1465 | * before the previous writepage completed. | |
1466 | * | |
1467 | * Block here, instead of in ->writepage(), so that the userspace fs | |
1468 | * can only block processes actually operating on the filesystem. | |
1469 | * | |
1470 | * Otherwise unprivileged userspace fs would be able to block | |
1471 | * unrelated: | |
1472 | * | |
1473 | * - page migration | |
1474 | * - sync(2) | |
1475 | * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER | |
1476 | */ | |
c2ec175c | 1477 | static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
3be5a52b | 1478 | { |
c2ec175c | 1479 | struct page *page = vmf->page; |
3be5a52b MS |
1480 | /* |
1481 | * Don't use page->mapping as it may become NULL from a | |
1482 | * concurrent truncate. | |
1483 | */ | |
1484 | struct inode *inode = vma->vm_file->f_mapping->host; | |
1485 | ||
1486 | fuse_wait_on_page_writeback(inode, page->index); | |
1487 | return 0; | |
1488 | } | |
1489 | ||
f0f37e2f | 1490 | static const struct vm_operations_struct fuse_file_vm_ops = { |
3be5a52b MS |
1491 | .close = fuse_vma_close, |
1492 | .fault = filemap_fault, | |
1493 | .page_mkwrite = fuse_page_mkwrite, | |
0b173bc4 | 1494 | .remap_pages = generic_file_remap_pages, |
3be5a52b MS |
1495 | }; |
1496 | ||
1497 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) | |
1498 | { | |
1499 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { | |
6131ffaa | 1500 | struct inode *inode = file_inode(file); |
3be5a52b MS |
1501 | struct fuse_conn *fc = get_fuse_conn(inode); |
1502 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1503 | struct fuse_file *ff = file->private_data; | |
1504 | /* | |
1505 | * file may be written through mmap, so chain it onto the | |
1506 | * inodes's write_file list | |
1507 | */ | |
1508 | spin_lock(&fc->lock); | |
1509 | if (list_empty(&ff->write_entry)) | |
1510 | list_add(&ff->write_entry, &fi->write_files); | |
1511 | spin_unlock(&fc->lock); | |
1512 | } | |
1513 | file_accessed(file); | |
1514 | vma->vm_ops = &fuse_file_vm_ops; | |
b6aeaded MS |
1515 | return 0; |
1516 | } | |
1517 | ||
fc280c96 MS |
1518 | static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) |
1519 | { | |
1520 | /* Can't provide the coherency needed for MAP_SHARED */ | |
1521 | if (vma->vm_flags & VM_MAYSHARE) | |
1522 | return -ENODEV; | |
1523 | ||
3121bfe7 MS |
1524 | invalidate_inode_pages2(file->f_mapping); |
1525 | ||
fc280c96 MS |
1526 | return generic_file_mmap(file, vma); |
1527 | } | |
1528 | ||
71421259 MS |
1529 | static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, |
1530 | struct file_lock *fl) | |
1531 | { | |
1532 | switch (ffl->type) { | |
1533 | case F_UNLCK: | |
1534 | break; | |
1535 | ||
1536 | case F_RDLCK: | |
1537 | case F_WRLCK: | |
1538 | if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || | |
1539 | ffl->end < ffl->start) | |
1540 | return -EIO; | |
1541 | ||
1542 | fl->fl_start = ffl->start; | |
1543 | fl->fl_end = ffl->end; | |
1544 | fl->fl_pid = ffl->pid; | |
1545 | break; | |
1546 | ||
1547 | default: | |
1548 | return -EIO; | |
1549 | } | |
1550 | fl->fl_type = ffl->type; | |
1551 | return 0; | |
1552 | } | |
1553 | ||
1554 | static void fuse_lk_fill(struct fuse_req *req, struct file *file, | |
a9ff4f87 MS |
1555 | const struct file_lock *fl, int opcode, pid_t pid, |
1556 | int flock) | |
71421259 | 1557 | { |
6131ffaa | 1558 | struct inode *inode = file_inode(file); |
9c8ef561 | 1559 | struct fuse_conn *fc = get_fuse_conn(inode); |
71421259 MS |
1560 | struct fuse_file *ff = file->private_data; |
1561 | struct fuse_lk_in *arg = &req->misc.lk_in; | |
1562 | ||
1563 | arg->fh = ff->fh; | |
9c8ef561 | 1564 | arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); |
71421259 MS |
1565 | arg->lk.start = fl->fl_start; |
1566 | arg->lk.end = fl->fl_end; | |
1567 | arg->lk.type = fl->fl_type; | |
1568 | arg->lk.pid = pid; | |
a9ff4f87 MS |
1569 | if (flock) |
1570 | arg->lk_flags |= FUSE_LK_FLOCK; | |
71421259 MS |
1571 | req->in.h.opcode = opcode; |
1572 | req->in.h.nodeid = get_node_id(inode); | |
1573 | req->in.numargs = 1; | |
1574 | req->in.args[0].size = sizeof(*arg); | |
1575 | req->in.args[0].value = arg; | |
1576 | } | |
1577 | ||
1578 | static int fuse_getlk(struct file *file, struct file_lock *fl) | |
1579 | { | |
6131ffaa | 1580 | struct inode *inode = file_inode(file); |
71421259 MS |
1581 | struct fuse_conn *fc = get_fuse_conn(inode); |
1582 | struct fuse_req *req; | |
1583 | struct fuse_lk_out outarg; | |
1584 | int err; | |
1585 | ||
b111c8c0 | 1586 | req = fuse_get_req_nopages(fc); |
71421259 MS |
1587 | if (IS_ERR(req)) |
1588 | return PTR_ERR(req); | |
1589 | ||
a9ff4f87 | 1590 | fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); |
71421259 MS |
1591 | req->out.numargs = 1; |
1592 | req->out.args[0].size = sizeof(outarg); | |
1593 | req->out.args[0].value = &outarg; | |
b93f858a | 1594 | fuse_request_send(fc, req); |
71421259 MS |
1595 | err = req->out.h.error; |
1596 | fuse_put_request(fc, req); | |
1597 | if (!err) | |
1598 | err = convert_fuse_file_lock(&outarg.lk, fl); | |
1599 | ||
1600 | return err; | |
1601 | } | |
1602 | ||
a9ff4f87 | 1603 | static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) |
71421259 | 1604 | { |
6131ffaa | 1605 | struct inode *inode = file_inode(file); |
71421259 MS |
1606 | struct fuse_conn *fc = get_fuse_conn(inode); |
1607 | struct fuse_req *req; | |
1608 | int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; | |
1609 | pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; | |
1610 | int err; | |
1611 | ||
8fb47a4f | 1612 | if (fl->fl_lmops && fl->fl_lmops->lm_grant) { |
48e90761 MS |
1613 | /* NLM needs asynchronous locks, which we don't support yet */ |
1614 | return -ENOLCK; | |
1615 | } | |
1616 | ||
71421259 MS |
1617 | /* Unlock on close is handled by the flush method */ |
1618 | if (fl->fl_flags & FL_CLOSE) | |
1619 | return 0; | |
1620 | ||
b111c8c0 | 1621 | req = fuse_get_req_nopages(fc); |
71421259 MS |
1622 | if (IS_ERR(req)) |
1623 | return PTR_ERR(req); | |
1624 | ||
a9ff4f87 | 1625 | fuse_lk_fill(req, file, fl, opcode, pid, flock); |
b93f858a | 1626 | fuse_request_send(fc, req); |
71421259 | 1627 | err = req->out.h.error; |
a4d27e75 MS |
1628 | /* locking is restartable */ |
1629 | if (err == -EINTR) | |
1630 | err = -ERESTARTSYS; | |
71421259 MS |
1631 | fuse_put_request(fc, req); |
1632 | return err; | |
1633 | } | |
1634 | ||
1635 | static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) | |
1636 | { | |
6131ffaa | 1637 | struct inode *inode = file_inode(file); |
71421259 MS |
1638 | struct fuse_conn *fc = get_fuse_conn(inode); |
1639 | int err; | |
1640 | ||
48e90761 MS |
1641 | if (cmd == F_CANCELLK) { |
1642 | err = 0; | |
1643 | } else if (cmd == F_GETLK) { | |
71421259 | 1644 | if (fc->no_lock) { |
9d6a8c5c | 1645 | posix_test_lock(file, fl); |
71421259 MS |
1646 | err = 0; |
1647 | } else | |
1648 | err = fuse_getlk(file, fl); | |
1649 | } else { | |
1650 | if (fc->no_lock) | |
48e90761 | 1651 | err = posix_lock_file(file, fl, NULL); |
71421259 | 1652 | else |
a9ff4f87 | 1653 | err = fuse_setlk(file, fl, 0); |
71421259 MS |
1654 | } |
1655 | return err; | |
1656 | } | |
1657 | ||
a9ff4f87 MS |
1658 | static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) |
1659 | { | |
6131ffaa | 1660 | struct inode *inode = file_inode(file); |
a9ff4f87 MS |
1661 | struct fuse_conn *fc = get_fuse_conn(inode); |
1662 | int err; | |
1663 | ||
37fb3a30 | 1664 | if (fc->no_flock) { |
a9ff4f87 MS |
1665 | err = flock_lock_file_wait(file, fl); |
1666 | } else { | |
37fb3a30 MS |
1667 | struct fuse_file *ff = file->private_data; |
1668 | ||
a9ff4f87 MS |
1669 | /* emulate flock with POSIX locks */ |
1670 | fl->fl_owner = (fl_owner_t) file; | |
37fb3a30 | 1671 | ff->flock = true; |
a9ff4f87 MS |
1672 | err = fuse_setlk(file, fl, 1); |
1673 | } | |
1674 | ||
1675 | return err; | |
1676 | } | |
1677 | ||
b2d2272f MS |
1678 | static sector_t fuse_bmap(struct address_space *mapping, sector_t block) |
1679 | { | |
1680 | struct inode *inode = mapping->host; | |
1681 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1682 | struct fuse_req *req; | |
1683 | struct fuse_bmap_in inarg; | |
1684 | struct fuse_bmap_out outarg; | |
1685 | int err; | |
1686 | ||
1687 | if (!inode->i_sb->s_bdev || fc->no_bmap) | |
1688 | return 0; | |
1689 | ||
b111c8c0 | 1690 | req = fuse_get_req_nopages(fc); |
b2d2272f MS |
1691 | if (IS_ERR(req)) |
1692 | return 0; | |
1693 | ||
1694 | memset(&inarg, 0, sizeof(inarg)); | |
1695 | inarg.block = block; | |
1696 | inarg.blocksize = inode->i_sb->s_blocksize; | |
1697 | req->in.h.opcode = FUSE_BMAP; | |
1698 | req->in.h.nodeid = get_node_id(inode); | |
1699 | req->in.numargs = 1; | |
1700 | req->in.args[0].size = sizeof(inarg); | |
1701 | req->in.args[0].value = &inarg; | |
1702 | req->out.numargs = 1; | |
1703 | req->out.args[0].size = sizeof(outarg); | |
1704 | req->out.args[0].value = &outarg; | |
b93f858a | 1705 | fuse_request_send(fc, req); |
b2d2272f MS |
1706 | err = req->out.h.error; |
1707 | fuse_put_request(fc, req); | |
1708 | if (err == -ENOSYS) | |
1709 | fc->no_bmap = 1; | |
1710 | ||
1711 | return err ? 0 : outarg.block; | |
1712 | } | |
1713 | ||
965c8e59 | 1714 | static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) |
5559b8f4 MS |
1715 | { |
1716 | loff_t retval; | |
6131ffaa | 1717 | struct inode *inode = file_inode(file); |
5559b8f4 | 1718 | |
c07c3d19 | 1719 | /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ |
965c8e59 AM |
1720 | if (whence == SEEK_CUR || whence == SEEK_SET) |
1721 | return generic_file_llseek(file, offset, whence); | |
06222e49 | 1722 | |
c07c3d19 MS |
1723 | mutex_lock(&inode->i_mutex); |
1724 | retval = fuse_update_attributes(inode, NULL, file, NULL); | |
1725 | if (!retval) | |
965c8e59 | 1726 | retval = generic_file_llseek(file, offset, whence); |
5559b8f4 | 1727 | mutex_unlock(&inode->i_mutex); |
c07c3d19 | 1728 | |
5559b8f4 MS |
1729 | return retval; |
1730 | } | |
1731 | ||
59efec7b TH |
1732 | static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, |
1733 | unsigned int nr_segs, size_t bytes, bool to_user) | |
1734 | { | |
1735 | struct iov_iter ii; | |
1736 | int page_idx = 0; | |
1737 | ||
1738 | if (!bytes) | |
1739 | return 0; | |
1740 | ||
1741 | iov_iter_init(&ii, iov, nr_segs, bytes, 0); | |
1742 | ||
1743 | while (iov_iter_count(&ii)) { | |
1744 | struct page *page = pages[page_idx++]; | |
1745 | size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); | |
4aa0edd2 | 1746 | void *kaddr; |
59efec7b | 1747 | |
4aa0edd2 | 1748 | kaddr = kmap(page); |
59efec7b TH |
1749 | |
1750 | while (todo) { | |
1751 | char __user *uaddr = ii.iov->iov_base + ii.iov_offset; | |
1752 | size_t iov_len = ii.iov->iov_len - ii.iov_offset; | |
1753 | size_t copy = min(todo, iov_len); | |
1754 | size_t left; | |
1755 | ||
1756 | if (!to_user) | |
1757 | left = copy_from_user(kaddr, uaddr, copy); | |
1758 | else | |
1759 | left = copy_to_user(uaddr, kaddr, copy); | |
1760 | ||
1761 | if (unlikely(left)) | |
1762 | return -EFAULT; | |
1763 | ||
1764 | iov_iter_advance(&ii, copy); | |
1765 | todo -= copy; | |
1766 | kaddr += copy; | |
1767 | } | |
1768 | ||
0bd87182 | 1769 | kunmap(page); |
59efec7b TH |
1770 | } |
1771 | ||
1772 | return 0; | |
1773 | } | |
1774 | ||
d9d318d3 MS |
1775 | /* |
1776 | * CUSE servers compiled on 32bit broke on 64bit kernels because the | |
1777 | * ABI was defined to be 'struct iovec' which is different on 32bit | |
1778 | * and 64bit. Fortunately we can determine which structure the server | |
1779 | * used from the size of the reply. | |
1780 | */ | |
1baa26b2 MS |
1781 | static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, |
1782 | size_t transferred, unsigned count, | |
1783 | bool is_compat) | |
d9d318d3 MS |
1784 | { |
1785 | #ifdef CONFIG_COMPAT | |
1786 | if (count * sizeof(struct compat_iovec) == transferred) { | |
1787 | struct compat_iovec *ciov = src; | |
1788 | unsigned i; | |
1789 | ||
1790 | /* | |
1791 | * With this interface a 32bit server cannot support | |
1792 | * non-compat (i.e. ones coming from 64bit apps) ioctl | |
1793 | * requests | |
1794 | */ | |
1795 | if (!is_compat) | |
1796 | return -EINVAL; | |
1797 | ||
1798 | for (i = 0; i < count; i++) { | |
1799 | dst[i].iov_base = compat_ptr(ciov[i].iov_base); | |
1800 | dst[i].iov_len = ciov[i].iov_len; | |
1801 | } | |
1802 | return 0; | |
1803 | } | |
1804 | #endif | |
1805 | ||
1806 | if (count * sizeof(struct iovec) != transferred) | |
1807 | return -EIO; | |
1808 | ||
1809 | memcpy(dst, src, transferred); | |
1810 | return 0; | |
1811 | } | |
1812 | ||
7572777e MS |
1813 | /* Make sure iov_length() won't overflow */ |
1814 | static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) | |
1815 | { | |
1816 | size_t n; | |
1817 | u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; | |
1818 | ||
fb6ccff6 | 1819 | for (n = 0; n < count; n++, iov++) { |
7572777e MS |
1820 | if (iov->iov_len > (size_t) max) |
1821 | return -ENOMEM; | |
1822 | max -= iov->iov_len; | |
1823 | } | |
1824 | return 0; | |
1825 | } | |
1826 | ||
1baa26b2 MS |
1827 | static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, |
1828 | void *src, size_t transferred, unsigned count, | |
1829 | bool is_compat) | |
1830 | { | |
1831 | unsigned i; | |
1832 | struct fuse_ioctl_iovec *fiov = src; | |
1833 | ||
1834 | if (fc->minor < 16) { | |
1835 | return fuse_copy_ioctl_iovec_old(dst, src, transferred, | |
1836 | count, is_compat); | |
1837 | } | |
1838 | ||
1839 | if (count * sizeof(struct fuse_ioctl_iovec) != transferred) | |
1840 | return -EIO; | |
1841 | ||
1842 | for (i = 0; i < count; i++) { | |
1843 | /* Did the server supply an inappropriate value? */ | |
1844 | if (fiov[i].base != (unsigned long) fiov[i].base || | |
1845 | fiov[i].len != (unsigned long) fiov[i].len) | |
1846 | return -EIO; | |
1847 | ||
1848 | dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; | |
1849 | dst[i].iov_len = (size_t) fiov[i].len; | |
1850 | ||
1851 | #ifdef CONFIG_COMPAT | |
1852 | if (is_compat && | |
1853 | (ptr_to_compat(dst[i].iov_base) != fiov[i].base || | |
1854 | (compat_size_t) dst[i].iov_len != fiov[i].len)) | |
1855 | return -EIO; | |
1856 | #endif | |
1857 | } | |
1858 | ||
1859 | return 0; | |
1860 | } | |
1861 | ||
1862 | ||
59efec7b TH |
1863 | /* |
1864 | * For ioctls, there is no generic way to determine how much memory | |
1865 | * needs to be read and/or written. Furthermore, ioctls are allowed | |
1866 | * to dereference the passed pointer, so the parameter requires deep | |
1867 | * copying but FUSE has no idea whatsoever about what to copy in or | |
1868 | * out. | |
1869 | * | |
1870 | * This is solved by allowing FUSE server to retry ioctl with | |
1871 | * necessary in/out iovecs. Let's assume the ioctl implementation | |
1872 | * needs to read in the following structure. | |
1873 | * | |
1874 | * struct a { | |
1875 | * char *buf; | |
1876 | * size_t buflen; | |
1877 | * } | |
1878 | * | |
1879 | * On the first callout to FUSE server, inarg->in_size and | |
1880 | * inarg->out_size will be NULL; then, the server completes the ioctl | |
1881 | * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and | |
1882 | * the actual iov array to | |
1883 | * | |
1884 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } | |
1885 | * | |
1886 | * which tells FUSE to copy in the requested area and retry the ioctl. | |
1887 | * On the second round, the server has access to the structure and | |
1888 | * from that it can tell what to look for next, so on the invocation, | |
1889 | * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to | |
1890 | * | |
1891 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, | |
1892 | * { .iov_base = a.buf, .iov_len = a.buflen } } | |
1893 | * | |
1894 | * FUSE will copy both struct a and the pointed buffer from the | |
1895 | * process doing the ioctl and retry ioctl with both struct a and the | |
1896 | * buffer. | |
1897 | * | |
1898 | * This time, FUSE server has everything it needs and completes ioctl | |
1899 | * without FUSE_IOCTL_RETRY which finishes the ioctl call. | |
1900 | * | |
1901 | * Copying data out works the same way. | |
1902 | * | |
1903 | * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel | |
1904 | * automatically initializes in and out iovs by decoding @cmd with | |
1905 | * _IOC_* macros and the server is not allowed to request RETRY. This | |
1906 | * limits ioctl data transfers to well-formed ioctls and is the forced | |
1907 | * behavior for all FUSE servers. | |
1908 | */ | |
08cbf542 TH |
1909 | long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, |
1910 | unsigned int flags) | |
59efec7b | 1911 | { |
59efec7b | 1912 | struct fuse_file *ff = file->private_data; |
d36f2487 | 1913 | struct fuse_conn *fc = ff->fc; |
59efec7b TH |
1914 | struct fuse_ioctl_in inarg = { |
1915 | .fh = ff->fh, | |
1916 | .cmd = cmd, | |
1917 | .arg = arg, | |
1918 | .flags = flags | |
1919 | }; | |
1920 | struct fuse_ioctl_out outarg; | |
1921 | struct fuse_req *req = NULL; | |
1922 | struct page **pages = NULL; | |
8ac83505 | 1923 | struct iovec *iov_page = NULL; |
59efec7b TH |
1924 | struct iovec *in_iov = NULL, *out_iov = NULL; |
1925 | unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; | |
1926 | size_t in_size, out_size, transferred; | |
1927 | int err; | |
1928 | ||
1baa26b2 MS |
1929 | #if BITS_PER_LONG == 32 |
1930 | inarg.flags |= FUSE_IOCTL_32BIT; | |
1931 | #else | |
1932 | if (flags & FUSE_IOCTL_COMPAT) | |
1933 | inarg.flags |= FUSE_IOCTL_32BIT; | |
1934 | #endif | |
1935 | ||
59efec7b | 1936 | /* assume all the iovs returned by client always fits in a page */ |
1baa26b2 | 1937 | BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); |
59efec7b | 1938 | |
59efec7b | 1939 | err = -ENOMEM; |
c411cc88 | 1940 | pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); |
8ac83505 | 1941 | iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); |
59efec7b TH |
1942 | if (!pages || !iov_page) |
1943 | goto out; | |
1944 | ||
1945 | /* | |
1946 | * If restricted, initialize IO parameters as encoded in @cmd. | |
1947 | * RETRY from server is not allowed. | |
1948 | */ | |
1949 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { | |
8ac83505 | 1950 | struct iovec *iov = iov_page; |
59efec7b | 1951 | |
c9f0523d | 1952 | iov->iov_base = (void __user *)arg; |
59efec7b TH |
1953 | iov->iov_len = _IOC_SIZE(cmd); |
1954 | ||
1955 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | |
1956 | in_iov = iov; | |
1957 | in_iovs = 1; | |
1958 | } | |
1959 | ||
1960 | if (_IOC_DIR(cmd) & _IOC_READ) { | |
1961 | out_iov = iov; | |
1962 | out_iovs = 1; | |
1963 | } | |
1964 | } | |
1965 | ||
1966 | retry: | |
1967 | inarg.in_size = in_size = iov_length(in_iov, in_iovs); | |
1968 | inarg.out_size = out_size = iov_length(out_iov, out_iovs); | |
1969 | ||
1970 | /* | |
1971 | * Out data can be used either for actual out data or iovs, | |
1972 | * make sure there always is at least one page. | |
1973 | */ | |
1974 | out_size = max_t(size_t, out_size, PAGE_SIZE); | |
1975 | max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); | |
1976 | ||
1977 | /* make sure there are enough buffer pages and init request with them */ | |
1978 | err = -ENOMEM; | |
1979 | if (max_pages > FUSE_MAX_PAGES_PER_REQ) | |
1980 | goto out; | |
1981 | while (num_pages < max_pages) { | |
1982 | pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | |
1983 | if (!pages[num_pages]) | |
1984 | goto out; | |
1985 | num_pages++; | |
1986 | } | |
1987 | ||
54b96670 | 1988 | req = fuse_get_req(fc, num_pages); |
59efec7b TH |
1989 | if (IS_ERR(req)) { |
1990 | err = PTR_ERR(req); | |
1991 | req = NULL; | |
1992 | goto out; | |
1993 | } | |
1994 | memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); | |
1995 | req->num_pages = num_pages; | |
7c190c8b | 1996 | fuse_page_descs_length_init(req, 0, req->num_pages); |
59efec7b TH |
1997 | |
1998 | /* okay, let's send it to the client */ | |
1999 | req->in.h.opcode = FUSE_IOCTL; | |
d36f2487 | 2000 | req->in.h.nodeid = ff->nodeid; |
59efec7b TH |
2001 | req->in.numargs = 1; |
2002 | req->in.args[0].size = sizeof(inarg); | |
2003 | req->in.args[0].value = &inarg; | |
2004 | if (in_size) { | |
2005 | req->in.numargs++; | |
2006 | req->in.args[1].size = in_size; | |
2007 | req->in.argpages = 1; | |
2008 | ||
2009 | err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, | |
2010 | false); | |
2011 | if (err) | |
2012 | goto out; | |
2013 | } | |
2014 | ||
2015 | req->out.numargs = 2; | |
2016 | req->out.args[0].size = sizeof(outarg); | |
2017 | req->out.args[0].value = &outarg; | |
2018 | req->out.args[1].size = out_size; | |
2019 | req->out.argpages = 1; | |
2020 | req->out.argvar = 1; | |
2021 | ||
b93f858a | 2022 | fuse_request_send(fc, req); |
59efec7b TH |
2023 | err = req->out.h.error; |
2024 | transferred = req->out.args[1].size; | |
2025 | fuse_put_request(fc, req); | |
2026 | req = NULL; | |
2027 | if (err) | |
2028 | goto out; | |
2029 | ||
2030 | /* did it ask for retry? */ | |
2031 | if (outarg.flags & FUSE_IOCTL_RETRY) { | |
8ac83505 | 2032 | void *vaddr; |
59efec7b TH |
2033 | |
2034 | /* no retry if in restricted mode */ | |
2035 | err = -EIO; | |
2036 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) | |
2037 | goto out; | |
2038 | ||
2039 | in_iovs = outarg.in_iovs; | |
2040 | out_iovs = outarg.out_iovs; | |
2041 | ||
2042 | /* | |
2043 | * Make sure things are in boundary, separate checks | |
2044 | * are to protect against overflow. | |
2045 | */ | |
2046 | err = -ENOMEM; | |
2047 | if (in_iovs > FUSE_IOCTL_MAX_IOV || | |
2048 | out_iovs > FUSE_IOCTL_MAX_IOV || | |
2049 | in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) | |
2050 | goto out; | |
2051 | ||
2408f6ef | 2052 | vaddr = kmap_atomic(pages[0]); |
1baa26b2 | 2053 | err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, |
d9d318d3 MS |
2054 | transferred, in_iovs + out_iovs, |
2055 | (flags & FUSE_IOCTL_COMPAT) != 0); | |
2408f6ef | 2056 | kunmap_atomic(vaddr); |
d9d318d3 MS |
2057 | if (err) |
2058 | goto out; | |
59efec7b | 2059 | |
8ac83505 | 2060 | in_iov = iov_page; |
59efec7b TH |
2061 | out_iov = in_iov + in_iovs; |
2062 | ||
7572777e MS |
2063 | err = fuse_verify_ioctl_iov(in_iov, in_iovs); |
2064 | if (err) | |
2065 | goto out; | |
2066 | ||
2067 | err = fuse_verify_ioctl_iov(out_iov, out_iovs); | |
2068 | if (err) | |
2069 | goto out; | |
2070 | ||
59efec7b TH |
2071 | goto retry; |
2072 | } | |
2073 | ||
2074 | err = -EIO; | |
2075 | if (transferred > inarg.out_size) | |
2076 | goto out; | |
2077 | ||
2078 | err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); | |
2079 | out: | |
2080 | if (req) | |
2081 | fuse_put_request(fc, req); | |
8ac83505 | 2082 | free_page((unsigned long) iov_page); |
59efec7b TH |
2083 | while (num_pages) |
2084 | __free_page(pages[--num_pages]); | |
2085 | kfree(pages); | |
2086 | ||
2087 | return err ? err : outarg.result; | |
2088 | } | |
08cbf542 | 2089 | EXPORT_SYMBOL_GPL(fuse_do_ioctl); |
59efec7b | 2090 | |
b18da0c5 MS |
2091 | long fuse_ioctl_common(struct file *file, unsigned int cmd, |
2092 | unsigned long arg, unsigned int flags) | |
d36f2487 | 2093 | { |
6131ffaa | 2094 | struct inode *inode = file_inode(file); |
d36f2487 MS |
2095 | struct fuse_conn *fc = get_fuse_conn(inode); |
2096 | ||
c2132c1b | 2097 | if (!fuse_allow_current_process(fc)) |
d36f2487 MS |
2098 | return -EACCES; |
2099 | ||
2100 | if (is_bad_inode(inode)) | |
2101 | return -EIO; | |
2102 | ||
2103 | return fuse_do_ioctl(file, cmd, arg, flags); | |
2104 | } | |
2105 | ||
59efec7b TH |
2106 | static long fuse_file_ioctl(struct file *file, unsigned int cmd, |
2107 | unsigned long arg) | |
2108 | { | |
b18da0c5 | 2109 | return fuse_ioctl_common(file, cmd, arg, 0); |
59efec7b TH |
2110 | } |
2111 | ||
2112 | static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, | |
2113 | unsigned long arg) | |
2114 | { | |
b18da0c5 | 2115 | return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); |
59efec7b TH |
2116 | } |
2117 | ||
95668a69 TH |
2118 | /* |
2119 | * All files which have been polled are linked to RB tree | |
2120 | * fuse_conn->polled_files which is indexed by kh. Walk the tree and | |
2121 | * find the matching one. | |
2122 | */ | |
2123 | static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, | |
2124 | struct rb_node **parent_out) | |
2125 | { | |
2126 | struct rb_node **link = &fc->polled_files.rb_node; | |
2127 | struct rb_node *last = NULL; | |
2128 | ||
2129 | while (*link) { | |
2130 | struct fuse_file *ff; | |
2131 | ||
2132 | last = *link; | |
2133 | ff = rb_entry(last, struct fuse_file, polled_node); | |
2134 | ||
2135 | if (kh < ff->kh) | |
2136 | link = &last->rb_left; | |
2137 | else if (kh > ff->kh) | |
2138 | link = &last->rb_right; | |
2139 | else | |
2140 | return link; | |
2141 | } | |
2142 | ||
2143 | if (parent_out) | |
2144 | *parent_out = last; | |
2145 | return link; | |
2146 | } | |
2147 | ||
2148 | /* | |
2149 | * The file is about to be polled. Make sure it's on the polled_files | |
2150 | * RB tree. Note that files once added to the polled_files tree are | |
2151 | * not removed before the file is released. This is because a file | |
2152 | * polled once is likely to be polled again. | |
2153 | */ | |
2154 | static void fuse_register_polled_file(struct fuse_conn *fc, | |
2155 | struct fuse_file *ff) | |
2156 | { | |
2157 | spin_lock(&fc->lock); | |
2158 | if (RB_EMPTY_NODE(&ff->polled_node)) { | |
2159 | struct rb_node **link, *parent; | |
2160 | ||
2161 | link = fuse_find_polled_node(fc, ff->kh, &parent); | |
2162 | BUG_ON(*link); | |
2163 | rb_link_node(&ff->polled_node, parent, link); | |
2164 | rb_insert_color(&ff->polled_node, &fc->polled_files); | |
2165 | } | |
2166 | spin_unlock(&fc->lock); | |
2167 | } | |
2168 | ||
08cbf542 | 2169 | unsigned fuse_file_poll(struct file *file, poll_table *wait) |
95668a69 | 2170 | { |
95668a69 | 2171 | struct fuse_file *ff = file->private_data; |
797759aa | 2172 | struct fuse_conn *fc = ff->fc; |
95668a69 TH |
2173 | struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; |
2174 | struct fuse_poll_out outarg; | |
2175 | struct fuse_req *req; | |
2176 | int err; | |
2177 | ||
2178 | if (fc->no_poll) | |
2179 | return DEFAULT_POLLMASK; | |
2180 | ||
2181 | poll_wait(file, &ff->poll_wait, wait); | |
0415d291 | 2182 | inarg.events = (__u32)poll_requested_events(wait); |
95668a69 TH |
2183 | |
2184 | /* | |
2185 | * Ask for notification iff there's someone waiting for it. | |
2186 | * The client may ignore the flag and always notify. | |
2187 | */ | |
2188 | if (waitqueue_active(&ff->poll_wait)) { | |
2189 | inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; | |
2190 | fuse_register_polled_file(fc, ff); | |
2191 | } | |
2192 | ||
b111c8c0 | 2193 | req = fuse_get_req_nopages(fc); |
95668a69 | 2194 | if (IS_ERR(req)) |
201fa69a | 2195 | return POLLERR; |
95668a69 TH |
2196 | |
2197 | req->in.h.opcode = FUSE_POLL; | |
797759aa | 2198 | req->in.h.nodeid = ff->nodeid; |
95668a69 TH |
2199 | req->in.numargs = 1; |
2200 | req->in.args[0].size = sizeof(inarg); | |
2201 | req->in.args[0].value = &inarg; | |
2202 | req->out.numargs = 1; | |
2203 | req->out.args[0].size = sizeof(outarg); | |
2204 | req->out.args[0].value = &outarg; | |
b93f858a | 2205 | fuse_request_send(fc, req); |
95668a69 TH |
2206 | err = req->out.h.error; |
2207 | fuse_put_request(fc, req); | |
2208 | ||
2209 | if (!err) | |
2210 | return outarg.revents; | |
2211 | if (err == -ENOSYS) { | |
2212 | fc->no_poll = 1; | |
2213 | return DEFAULT_POLLMASK; | |
2214 | } | |
2215 | return POLLERR; | |
2216 | } | |
08cbf542 | 2217 | EXPORT_SYMBOL_GPL(fuse_file_poll); |
95668a69 TH |
2218 | |
2219 | /* | |
2220 | * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and | |
2221 | * wakes up the poll waiters. | |
2222 | */ | |
2223 | int fuse_notify_poll_wakeup(struct fuse_conn *fc, | |
2224 | struct fuse_notify_poll_wakeup_out *outarg) | |
2225 | { | |
2226 | u64 kh = outarg->kh; | |
2227 | struct rb_node **link; | |
2228 | ||
2229 | spin_lock(&fc->lock); | |
2230 | ||
2231 | link = fuse_find_polled_node(fc, kh, NULL); | |
2232 | if (*link) { | |
2233 | struct fuse_file *ff; | |
2234 | ||
2235 | ff = rb_entry(*link, struct fuse_file, polled_node); | |
2236 | wake_up_interruptible_sync(&ff->poll_wait); | |
2237 | } | |
2238 | ||
2239 | spin_unlock(&fc->lock); | |
2240 | return 0; | |
2241 | } | |
2242 | ||
4273b793 AA |
2243 | static ssize_t |
2244 | fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
2245 | loff_t offset, unsigned long nr_segs) | |
2246 | { | |
2247 | ssize_t ret = 0; | |
2248 | struct file *file = NULL; | |
2249 | loff_t pos = 0; | |
2250 | ||
2251 | file = iocb->ki_filp; | |
2252 | pos = offset; | |
2253 | ||
b98d023a MP |
2254 | if (rw == WRITE) |
2255 | ret = __fuse_direct_write(file, iov, nr_segs, &pos); | |
2256 | else | |
2257 | ret = __fuse_direct_read(file, iov, nr_segs, &pos); | |
4273b793 AA |
2258 | |
2259 | return ret; | |
2260 | } | |
2261 | ||
cdadb11c MS |
2262 | static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
2263 | loff_t length) | |
05ba1f08 AP |
2264 | { |
2265 | struct fuse_file *ff = file->private_data; | |
2266 | struct fuse_conn *fc = ff->fc; | |
2267 | struct fuse_req *req; | |
2268 | struct fuse_fallocate_in inarg = { | |
2269 | .fh = ff->fh, | |
2270 | .offset = offset, | |
2271 | .length = length, | |
2272 | .mode = mode | |
2273 | }; | |
2274 | int err; | |
2275 | ||
519c6040 MS |
2276 | if (fc->no_fallocate) |
2277 | return -EOPNOTSUPP; | |
2278 | ||
b111c8c0 | 2279 | req = fuse_get_req_nopages(fc); |
05ba1f08 AP |
2280 | if (IS_ERR(req)) |
2281 | return PTR_ERR(req); | |
2282 | ||
2283 | req->in.h.opcode = FUSE_FALLOCATE; | |
2284 | req->in.h.nodeid = ff->nodeid; | |
2285 | req->in.numargs = 1; | |
2286 | req->in.args[0].size = sizeof(inarg); | |
2287 | req->in.args[0].value = &inarg; | |
2288 | fuse_request_send(fc, req); | |
2289 | err = req->out.h.error; | |
519c6040 MS |
2290 | if (err == -ENOSYS) { |
2291 | fc->no_fallocate = 1; | |
2292 | err = -EOPNOTSUPP; | |
2293 | } | |
05ba1f08 AP |
2294 | fuse_put_request(fc, req); |
2295 | ||
2296 | return err; | |
2297 | } | |
05ba1f08 | 2298 | |
4b6f5d20 | 2299 | static const struct file_operations fuse_file_operations = { |
5559b8f4 | 2300 | .llseek = fuse_file_llseek, |
543ade1f | 2301 | .read = do_sync_read, |
bcb4be80 | 2302 | .aio_read = fuse_file_aio_read, |
543ade1f | 2303 | .write = do_sync_write, |
ea9b9907 | 2304 | .aio_write = fuse_file_aio_write, |
b6aeaded MS |
2305 | .mmap = fuse_file_mmap, |
2306 | .open = fuse_open, | |
2307 | .flush = fuse_flush, | |
2308 | .release = fuse_release, | |
2309 | .fsync = fuse_fsync, | |
71421259 | 2310 | .lock = fuse_file_lock, |
a9ff4f87 | 2311 | .flock = fuse_file_flock, |
5ffc4ef4 | 2312 | .splice_read = generic_file_splice_read, |
59efec7b TH |
2313 | .unlocked_ioctl = fuse_file_ioctl, |
2314 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 2315 | .poll = fuse_file_poll, |
05ba1f08 | 2316 | .fallocate = fuse_file_fallocate, |
b6aeaded MS |
2317 | }; |
2318 | ||
4b6f5d20 | 2319 | static const struct file_operations fuse_direct_io_file_operations = { |
5559b8f4 | 2320 | .llseek = fuse_file_llseek, |
413ef8cb MS |
2321 | .read = fuse_direct_read, |
2322 | .write = fuse_direct_write, | |
fc280c96 | 2323 | .mmap = fuse_direct_mmap, |
413ef8cb MS |
2324 | .open = fuse_open, |
2325 | .flush = fuse_flush, | |
2326 | .release = fuse_release, | |
2327 | .fsync = fuse_fsync, | |
71421259 | 2328 | .lock = fuse_file_lock, |
a9ff4f87 | 2329 | .flock = fuse_file_flock, |
59efec7b TH |
2330 | .unlocked_ioctl = fuse_file_ioctl, |
2331 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 2332 | .poll = fuse_file_poll, |
05ba1f08 | 2333 | .fallocate = fuse_file_fallocate, |
fc280c96 | 2334 | /* no splice_read */ |
413ef8cb MS |
2335 | }; |
2336 | ||
f5e54d6e | 2337 | static const struct address_space_operations fuse_file_aops = { |
b6aeaded | 2338 | .readpage = fuse_readpage, |
3be5a52b MS |
2339 | .writepage = fuse_writepage, |
2340 | .launder_page = fuse_launder_page, | |
db50b96c | 2341 | .readpages = fuse_readpages, |
3be5a52b | 2342 | .set_page_dirty = __set_page_dirty_nobuffers, |
b2d2272f | 2343 | .bmap = fuse_bmap, |
4273b793 | 2344 | .direct_IO = fuse_direct_IO, |
b6aeaded MS |
2345 | }; |
2346 | ||
2347 | void fuse_init_file_inode(struct inode *inode) | |
2348 | { | |
45323fb7 MS |
2349 | inode->i_fop = &fuse_file_operations; |
2350 | inode->i_data.a_ops = &fuse_file_aops; | |
b6aeaded | 2351 | } |