]>
Commit | Line | Data |
---|---|---|
b6aeaded MS |
1 | /* |
2 | FUSE: Filesystem in Userspace | |
1729a16c | 3 | Copyright (C) 2001-2008 Miklos Szeredi <[email protected]> |
b6aeaded MS |
4 | |
5 | This program can be distributed under the terms of the GNU GPL. | |
6 | See the file COPYING. | |
7 | */ | |
8 | ||
9 | #include "fuse_i.h" | |
10 | ||
11 | #include <linux/pagemap.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/kernel.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
08cbf542 | 15 | #include <linux/module.h> |
d9d318d3 | 16 | #include <linux/compat.h> |
478e0841 | 17 | #include <linux/swap.h> |
b6aeaded | 18 | |
4b6f5d20 | 19 | static const struct file_operations fuse_direct_io_file_operations; |
45323fb7 | 20 | |
91fe96b4 MS |
21 | static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
22 | int opcode, struct fuse_open_out *outargp) | |
b6aeaded | 23 | { |
b6aeaded | 24 | struct fuse_open_in inarg; |
fd72faac MS |
25 | struct fuse_req *req; |
26 | int err; | |
27 | ||
ce1d5a49 MS |
28 | req = fuse_get_req(fc); |
29 | if (IS_ERR(req)) | |
30 | return PTR_ERR(req); | |
fd72faac MS |
31 | |
32 | memset(&inarg, 0, sizeof(inarg)); | |
6ff958ed MS |
33 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); |
34 | if (!fc->atomic_o_trunc) | |
35 | inarg.flags &= ~O_TRUNC; | |
91fe96b4 MS |
36 | req->in.h.opcode = opcode; |
37 | req->in.h.nodeid = nodeid; | |
fd72faac MS |
38 | req->in.numargs = 1; |
39 | req->in.args[0].size = sizeof(inarg); | |
40 | req->in.args[0].value = &inarg; | |
41 | req->out.numargs = 1; | |
42 | req->out.args[0].size = sizeof(*outargp); | |
43 | req->out.args[0].value = outargp; | |
b93f858a | 44 | fuse_request_send(fc, req); |
fd72faac MS |
45 | err = req->out.h.error; |
46 | fuse_put_request(fc, req); | |
47 | ||
48 | return err; | |
49 | } | |
50 | ||
acf99433 | 51 | struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) |
fd72faac MS |
52 | { |
53 | struct fuse_file *ff; | |
6b2db28a | 54 | |
fd72faac | 55 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); |
6b2db28a TH |
56 | if (unlikely(!ff)) |
57 | return NULL; | |
58 | ||
da5e4714 | 59 | ff->fc = fc; |
6b2db28a TH |
60 | ff->reserved_req = fuse_request_alloc(); |
61 | if (unlikely(!ff->reserved_req)) { | |
62 | kfree(ff); | |
63 | return NULL; | |
fd72faac | 64 | } |
6b2db28a TH |
65 | |
66 | INIT_LIST_HEAD(&ff->write_entry); | |
67 | atomic_set(&ff->count, 0); | |
68 | RB_CLEAR_NODE(&ff->polled_node); | |
69 | init_waitqueue_head(&ff->poll_wait); | |
70 | ||
71 | spin_lock(&fc->lock); | |
72 | ff->kh = ++fc->khctr; | |
73 | spin_unlock(&fc->lock); | |
74 | ||
fd72faac MS |
75 | return ff; |
76 | } | |
77 | ||
78 | void fuse_file_free(struct fuse_file *ff) | |
79 | { | |
33649c91 | 80 | fuse_request_free(ff->reserved_req); |
fd72faac MS |
81 | kfree(ff); |
82 | } | |
83 | ||
c7b7143c | 84 | struct fuse_file *fuse_file_get(struct fuse_file *ff) |
c756e0a4 MS |
85 | { |
86 | atomic_inc(&ff->count); | |
87 | return ff; | |
88 | } | |
89 | ||
5a18ec17 MS |
90 | static void fuse_release_async(struct work_struct *work) |
91 | { | |
92 | struct fuse_req *req; | |
93 | struct fuse_conn *fc; | |
94 | struct path path; | |
95 | ||
96 | req = container_of(work, struct fuse_req, misc.release.work); | |
97 | path = req->misc.release.path; | |
98 | fc = get_fuse_conn(path.dentry->d_inode); | |
99 | ||
100 | fuse_put_request(fc, req); | |
101 | path_put(&path); | |
102 | } | |
103 | ||
819c4b3b MS |
104 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
105 | { | |
5a18ec17 MS |
106 | if (fc->destroy_req) { |
107 | /* | |
108 | * If this is a fuseblk mount, then it's possible that | |
109 | * releasing the path will result in releasing the | |
110 | * super block and sending the DESTROY request. If | |
111 | * the server is single threaded, this would hang. | |
112 | * For this reason do the path_put() in a separate | |
113 | * thread. | |
114 | */ | |
115 | atomic_inc(&req->count); | |
116 | INIT_WORK(&req->misc.release.work, fuse_release_async); | |
117 | schedule_work(&req->misc.release.work); | |
118 | } else { | |
119 | path_put(&req->misc.release.path); | |
120 | } | |
819c4b3b MS |
121 | } |
122 | ||
5a18ec17 | 123 | static void fuse_file_put(struct fuse_file *ff, bool sync) |
c756e0a4 MS |
124 | { |
125 | if (atomic_dec_and_test(&ff->count)) { | |
126 | struct fuse_req *req = ff->reserved_req; | |
8b0797a4 | 127 | |
5a18ec17 MS |
128 | if (sync) { |
129 | fuse_request_send(ff->fc, req); | |
130 | path_put(&req->misc.release.path); | |
131 | fuse_put_request(ff->fc, req); | |
132 | } else { | |
133 | req->end = fuse_release_end; | |
134 | fuse_request_send_background(ff->fc, req); | |
135 | } | |
c756e0a4 MS |
136 | kfree(ff); |
137 | } | |
138 | } | |
139 | ||
08cbf542 TH |
140 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
141 | bool isdir) | |
91fe96b4 MS |
142 | { |
143 | struct fuse_open_out outarg; | |
144 | struct fuse_file *ff; | |
145 | int err; | |
146 | int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; | |
147 | ||
148 | ff = fuse_file_alloc(fc); | |
149 | if (!ff) | |
150 | return -ENOMEM; | |
151 | ||
152 | err = fuse_send_open(fc, nodeid, file, opcode, &outarg); | |
153 | if (err) { | |
154 | fuse_file_free(ff); | |
155 | return err; | |
156 | } | |
157 | ||
158 | if (isdir) | |
159 | outarg.open_flags &= ~FOPEN_DIRECT_IO; | |
160 | ||
161 | ff->fh = outarg.fh; | |
162 | ff->nodeid = nodeid; | |
163 | ff->open_flags = outarg.open_flags; | |
164 | file->private_data = fuse_file_get(ff); | |
165 | ||
166 | return 0; | |
167 | } | |
08cbf542 | 168 | EXPORT_SYMBOL_GPL(fuse_do_open); |
91fe96b4 | 169 | |
c7b7143c | 170 | void fuse_finish_open(struct inode *inode, struct file *file) |
fd72faac | 171 | { |
c7b7143c | 172 | struct fuse_file *ff = file->private_data; |
a0822c55 | 173 | struct fuse_conn *fc = get_fuse_conn(inode); |
c7b7143c MS |
174 | |
175 | if (ff->open_flags & FOPEN_DIRECT_IO) | |
fd72faac | 176 | file->f_op = &fuse_direct_io_file_operations; |
c7b7143c | 177 | if (!(ff->open_flags & FOPEN_KEEP_CACHE)) |
b1009979 | 178 | invalidate_inode_pages2(inode->i_mapping); |
c7b7143c | 179 | if (ff->open_flags & FOPEN_NONSEEKABLE) |
a7c1b990 | 180 | nonseekable_open(inode, file); |
a0822c55 KS |
181 | if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { |
182 | struct fuse_inode *fi = get_fuse_inode(inode); | |
183 | ||
184 | spin_lock(&fc->lock); | |
185 | fi->attr_version = ++fc->attr_version; | |
186 | i_size_write(inode, 0); | |
187 | spin_unlock(&fc->lock); | |
188 | fuse_invalidate_attr(inode); | |
189 | } | |
fd72faac MS |
190 | } |
191 | ||
91fe96b4 | 192 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) |
fd72faac | 193 | { |
acf99433 | 194 | struct fuse_conn *fc = get_fuse_conn(inode); |
b6aeaded | 195 | int err; |
b6aeaded MS |
196 | |
197 | err = generic_file_open(inode, file); | |
198 | if (err) | |
199 | return err; | |
200 | ||
91fe96b4 | 201 | err = fuse_do_open(fc, get_node_id(inode), file, isdir); |
fd72faac | 202 | if (err) |
91fe96b4 | 203 | return err; |
b6aeaded | 204 | |
91fe96b4 MS |
205 | fuse_finish_open(inode, file); |
206 | ||
207 | return 0; | |
b6aeaded MS |
208 | } |
209 | ||
8b0797a4 | 210 | static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) |
64c6d8ed | 211 | { |
8b0797a4 | 212 | struct fuse_conn *fc = ff->fc; |
33649c91 | 213 | struct fuse_req *req = ff->reserved_req; |
b57d4264 | 214 | struct fuse_release_in *inarg = &req->misc.release.in; |
b6aeaded | 215 | |
8b0797a4 MS |
216 | spin_lock(&fc->lock); |
217 | list_del(&ff->write_entry); | |
218 | if (!RB_EMPTY_NODE(&ff->polled_node)) | |
219 | rb_erase(&ff->polled_node, &fc->polled_files); | |
220 | spin_unlock(&fc->lock); | |
221 | ||
357ccf2b | 222 | wake_up_interruptible_all(&ff->poll_wait); |
8b0797a4 | 223 | |
b6aeaded | 224 | inarg->fh = ff->fh; |
fd72faac | 225 | inarg->flags = flags; |
51eb01e7 | 226 | req->in.h.opcode = opcode; |
c7b7143c | 227 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
228 | req->in.numargs = 1; |
229 | req->in.args[0].size = sizeof(struct fuse_release_in); | |
230 | req->in.args[0].value = inarg; | |
fd72faac MS |
231 | } |
232 | ||
8b0797a4 | 233 | void fuse_release_common(struct file *file, int opcode) |
fd72faac | 234 | { |
6b2db28a TH |
235 | struct fuse_file *ff; |
236 | struct fuse_req *req; | |
b6aeaded | 237 | |
6b2db28a TH |
238 | ff = file->private_data; |
239 | if (unlikely(!ff)) | |
8b0797a4 | 240 | return; |
6b2db28a | 241 | |
6b2db28a | 242 | req = ff->reserved_req; |
8b0797a4 | 243 | fuse_prepare_release(ff, file->f_flags, opcode); |
6b2db28a | 244 | |
37fb3a30 MS |
245 | if (ff->flock) { |
246 | struct fuse_release_in *inarg = &req->misc.release.in; | |
247 | inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; | |
248 | inarg->lock_owner = fuse_lock_owner_id(ff->fc, | |
249 | (fl_owner_t) file); | |
250 | } | |
6b2db28a | 251 | /* Hold vfsmount and dentry until release is finished */ |
b0be46eb MS |
252 | path_get(&file->f_path); |
253 | req->misc.release.path = file->f_path; | |
6b2db28a | 254 | |
6b2db28a TH |
255 | /* |
256 | * Normally this will send the RELEASE request, however if | |
257 | * some asynchronous READ or WRITE requests are outstanding, | |
258 | * the sending will be delayed. | |
5a18ec17 MS |
259 | * |
260 | * Make the release synchronous if this is a fuseblk mount, | |
261 | * synchronous RELEASE is allowed (and desirable) in this case | |
262 | * because the server can be trusted not to screw up. | |
6b2db28a | 263 | */ |
5a18ec17 | 264 | fuse_file_put(ff, ff->fc->destroy_req != NULL); |
b6aeaded MS |
265 | } |
266 | ||
04730fef MS |
267 | static int fuse_open(struct inode *inode, struct file *file) |
268 | { | |
91fe96b4 | 269 | return fuse_open_common(inode, file, false); |
04730fef MS |
270 | } |
271 | ||
272 | static int fuse_release(struct inode *inode, struct file *file) | |
273 | { | |
8b0797a4 MS |
274 | fuse_release_common(file, FUSE_RELEASE); |
275 | ||
276 | /* return value is ignored by VFS */ | |
277 | return 0; | |
278 | } | |
279 | ||
280 | void fuse_sync_release(struct fuse_file *ff, int flags) | |
281 | { | |
282 | WARN_ON(atomic_read(&ff->count) > 1); | |
283 | fuse_prepare_release(ff, flags, FUSE_RELEASE); | |
284 | ff->reserved_req->force = 1; | |
285 | fuse_request_send(ff->fc, ff->reserved_req); | |
286 | fuse_put_request(ff->fc, ff->reserved_req); | |
287 | kfree(ff); | |
04730fef | 288 | } |
08cbf542 | 289 | EXPORT_SYMBOL_GPL(fuse_sync_release); |
04730fef | 290 | |
71421259 | 291 | /* |
9c8ef561 MS |
292 | * Scramble the ID space with XTEA, so that the value of the files_struct |
293 | * pointer is not exposed to userspace. | |
71421259 | 294 | */ |
f3332114 | 295 | u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) |
71421259 | 296 | { |
9c8ef561 MS |
297 | u32 *k = fc->scramble_key; |
298 | u64 v = (unsigned long) id; | |
299 | u32 v0 = v; | |
300 | u32 v1 = v >> 32; | |
301 | u32 sum = 0; | |
302 | int i; | |
303 | ||
304 | for (i = 0; i < 32; i++) { | |
305 | v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); | |
306 | sum += 0x9E3779B9; | |
307 | v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); | |
308 | } | |
309 | ||
310 | return (u64) v0 + ((u64) v1 << 32); | |
71421259 MS |
311 | } |
312 | ||
3be5a52b MS |
313 | /* |
314 | * Check if page is under writeback | |
315 | * | |
316 | * This is currently done by walking the list of writepage requests | |
317 | * for the inode, which can be pretty inefficient. | |
318 | */ | |
319 | static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) | |
320 | { | |
321 | struct fuse_conn *fc = get_fuse_conn(inode); | |
322 | struct fuse_inode *fi = get_fuse_inode(inode); | |
323 | struct fuse_req *req; | |
324 | bool found = false; | |
325 | ||
326 | spin_lock(&fc->lock); | |
327 | list_for_each_entry(req, &fi->writepages, writepages_entry) { | |
328 | pgoff_t curr_index; | |
329 | ||
330 | BUG_ON(req->inode != inode); | |
331 | curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | |
332 | if (curr_index == index) { | |
333 | found = true; | |
334 | break; | |
335 | } | |
336 | } | |
337 | spin_unlock(&fc->lock); | |
338 | ||
339 | return found; | |
340 | } | |
341 | ||
342 | /* | |
343 | * Wait for page writeback to be completed. | |
344 | * | |
345 | * Since fuse doesn't rely on the VM writeback tracking, this has to | |
346 | * use some other means. | |
347 | */ | |
348 | static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) | |
349 | { | |
350 | struct fuse_inode *fi = get_fuse_inode(inode); | |
351 | ||
352 | wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); | |
353 | return 0; | |
354 | } | |
355 | ||
75e1fcc0 | 356 | static int fuse_flush(struct file *file, fl_owner_t id) |
b6aeaded | 357 | { |
7706a9d6 | 358 | struct inode *inode = file->f_path.dentry->d_inode; |
b6aeaded MS |
359 | struct fuse_conn *fc = get_fuse_conn(inode); |
360 | struct fuse_file *ff = file->private_data; | |
361 | struct fuse_req *req; | |
362 | struct fuse_flush_in inarg; | |
363 | int err; | |
364 | ||
248d86e8 MS |
365 | if (is_bad_inode(inode)) |
366 | return -EIO; | |
367 | ||
b6aeaded MS |
368 | if (fc->no_flush) |
369 | return 0; | |
370 | ||
33649c91 | 371 | req = fuse_get_req_nofail(fc, file); |
b6aeaded MS |
372 | memset(&inarg, 0, sizeof(inarg)); |
373 | inarg.fh = ff->fh; | |
9c8ef561 | 374 | inarg.lock_owner = fuse_lock_owner_id(fc, id); |
b6aeaded MS |
375 | req->in.h.opcode = FUSE_FLUSH; |
376 | req->in.h.nodeid = get_node_id(inode); | |
b6aeaded MS |
377 | req->in.numargs = 1; |
378 | req->in.args[0].size = sizeof(inarg); | |
379 | req->in.args[0].value = &inarg; | |
71421259 | 380 | req->force = 1; |
b93f858a | 381 | fuse_request_send(fc, req); |
b6aeaded MS |
382 | err = req->out.h.error; |
383 | fuse_put_request(fc, req); | |
384 | if (err == -ENOSYS) { | |
385 | fc->no_flush = 1; | |
386 | err = 0; | |
387 | } | |
388 | return err; | |
389 | } | |
390 | ||
3be5a52b MS |
391 | /* |
392 | * Wait for all pending writepages on the inode to finish. | |
393 | * | |
394 | * This is currently done by blocking further writes with FUSE_NOWRITE | |
395 | * and waiting for all sent writes to complete. | |
396 | * | |
397 | * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage | |
398 | * could conflict with truncation. | |
399 | */ | |
400 | static void fuse_sync_writes(struct inode *inode) | |
401 | { | |
402 | fuse_set_nowrite(inode); | |
403 | fuse_release_nowrite(inode); | |
404 | } | |
405 | ||
02c24a82 JB |
406 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, |
407 | int datasync, int isdir) | |
b6aeaded | 408 | { |
7ea80859 | 409 | struct inode *inode = file->f_mapping->host; |
b6aeaded MS |
410 | struct fuse_conn *fc = get_fuse_conn(inode); |
411 | struct fuse_file *ff = file->private_data; | |
412 | struct fuse_req *req; | |
413 | struct fuse_fsync_in inarg; | |
414 | int err; | |
415 | ||
248d86e8 MS |
416 | if (is_bad_inode(inode)) |
417 | return -EIO; | |
418 | ||
02c24a82 JB |
419 | err = filemap_write_and_wait_range(inode->i_mapping, start, end); |
420 | if (err) | |
421 | return err; | |
422 | ||
82547981 | 423 | if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) |
b6aeaded MS |
424 | return 0; |
425 | ||
02c24a82 JB |
426 | mutex_lock(&inode->i_mutex); |
427 | ||
3be5a52b MS |
428 | /* |
429 | * Start writeback against all dirty pages of the inode, then | |
430 | * wait for all outstanding writes, before sending the FSYNC | |
431 | * request. | |
432 | */ | |
433 | err = write_inode_now(inode, 0); | |
434 | if (err) | |
02c24a82 | 435 | goto out; |
3be5a52b MS |
436 | |
437 | fuse_sync_writes(inode); | |
438 | ||
ce1d5a49 | 439 | req = fuse_get_req(fc); |
02c24a82 JB |
440 | if (IS_ERR(req)) { |
441 | err = PTR_ERR(req); | |
442 | goto out; | |
443 | } | |
b6aeaded MS |
444 | |
445 | memset(&inarg, 0, sizeof(inarg)); | |
446 | inarg.fh = ff->fh; | |
447 | inarg.fsync_flags = datasync ? 1 : 0; | |
82547981 | 448 | req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; |
b6aeaded | 449 | req->in.h.nodeid = get_node_id(inode); |
b6aeaded MS |
450 | req->in.numargs = 1; |
451 | req->in.args[0].size = sizeof(inarg); | |
452 | req->in.args[0].value = &inarg; | |
b93f858a | 453 | fuse_request_send(fc, req); |
b6aeaded MS |
454 | err = req->out.h.error; |
455 | fuse_put_request(fc, req); | |
456 | if (err == -ENOSYS) { | |
82547981 MS |
457 | if (isdir) |
458 | fc->no_fsyncdir = 1; | |
459 | else | |
460 | fc->no_fsync = 1; | |
b6aeaded MS |
461 | err = 0; |
462 | } | |
02c24a82 JB |
463 | out: |
464 | mutex_unlock(&inode->i_mutex); | |
b6aeaded MS |
465 | return err; |
466 | } | |
467 | ||
02c24a82 JB |
468 | static int fuse_fsync(struct file *file, loff_t start, loff_t end, |
469 | int datasync) | |
82547981 | 470 | { |
02c24a82 | 471 | return fuse_fsync_common(file, start, end, datasync, 0); |
82547981 MS |
472 | } |
473 | ||
2106cb18 MS |
474 | void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, |
475 | size_t count, int opcode) | |
b6aeaded | 476 | { |
5c5c5e51 | 477 | struct fuse_read_in *inarg = &req->misc.read.in; |
a6643094 | 478 | struct fuse_file *ff = file->private_data; |
b6aeaded | 479 | |
361b1eb5 MS |
480 | inarg->fh = ff->fh; |
481 | inarg->offset = pos; | |
482 | inarg->size = count; | |
a6643094 | 483 | inarg->flags = file->f_flags; |
361b1eb5 | 484 | req->in.h.opcode = opcode; |
2106cb18 | 485 | req->in.h.nodeid = ff->nodeid; |
b6aeaded MS |
486 | req->in.numargs = 1; |
487 | req->in.args[0].size = sizeof(struct fuse_read_in); | |
c1aa96a5 | 488 | req->in.args[0].value = inarg; |
b6aeaded MS |
489 | req->out.argvar = 1; |
490 | req->out.numargs = 1; | |
491 | req->out.args[0].size = count; | |
b6aeaded MS |
492 | } |
493 | ||
8bfc016d | 494 | static size_t fuse_send_read(struct fuse_req *req, struct file *file, |
2106cb18 | 495 | loff_t pos, size_t count, fl_owner_t owner) |
04730fef | 496 | { |
2106cb18 MS |
497 | struct fuse_file *ff = file->private_data; |
498 | struct fuse_conn *fc = ff->fc; | |
f3332114 | 499 | |
2106cb18 | 500 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
f3332114 | 501 | if (owner != NULL) { |
5c5c5e51 | 502 | struct fuse_read_in *inarg = &req->misc.read.in; |
f3332114 MS |
503 | |
504 | inarg->read_flags |= FUSE_READ_LOCKOWNER; | |
505 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
506 | } | |
b93f858a | 507 | fuse_request_send(fc, req); |
361b1eb5 | 508 | return req->out.args[0].size; |
04730fef MS |
509 | } |
510 | ||
5c5c5e51 MS |
511 | static void fuse_read_update_size(struct inode *inode, loff_t size, |
512 | u64 attr_ver) | |
513 | { | |
514 | struct fuse_conn *fc = get_fuse_conn(inode); | |
515 | struct fuse_inode *fi = get_fuse_inode(inode); | |
516 | ||
517 | spin_lock(&fc->lock); | |
518 | if (attr_ver == fi->attr_version && size < inode->i_size) { | |
519 | fi->attr_version = ++fc->attr_version; | |
520 | i_size_write(inode, size); | |
521 | } | |
522 | spin_unlock(&fc->lock); | |
523 | } | |
524 | ||
b6aeaded MS |
525 | static int fuse_readpage(struct file *file, struct page *page) |
526 | { | |
527 | struct inode *inode = page->mapping->host; | |
528 | struct fuse_conn *fc = get_fuse_conn(inode); | |
248d86e8 | 529 | struct fuse_req *req; |
5c5c5e51 MS |
530 | size_t num_read; |
531 | loff_t pos = page_offset(page); | |
532 | size_t count = PAGE_CACHE_SIZE; | |
533 | u64 attr_ver; | |
248d86e8 MS |
534 | int err; |
535 | ||
536 | err = -EIO; | |
537 | if (is_bad_inode(inode)) | |
538 | goto out; | |
539 | ||
3be5a52b | 540 | /* |
25985edc | 541 | * Page writeback can extend beyond the lifetime of the |
3be5a52b MS |
542 | * page-cache page, so make sure we read a properly synced |
543 | * page. | |
544 | */ | |
545 | fuse_wait_on_page_writeback(inode, page->index); | |
546 | ||
ce1d5a49 MS |
547 | req = fuse_get_req(fc); |
548 | err = PTR_ERR(req); | |
549 | if (IS_ERR(req)) | |
b6aeaded MS |
550 | goto out; |
551 | ||
5c5c5e51 MS |
552 | attr_ver = fuse_get_attr_version(fc); |
553 | ||
b6aeaded | 554 | req->out.page_zeroing = 1; |
f4975c67 | 555 | req->out.argpages = 1; |
b6aeaded MS |
556 | req->num_pages = 1; |
557 | req->pages[0] = page; | |
2106cb18 | 558 | num_read = fuse_send_read(req, file, pos, count, NULL); |
b6aeaded MS |
559 | err = req->out.h.error; |
560 | fuse_put_request(fc, req); | |
5c5c5e51 MS |
561 | |
562 | if (!err) { | |
563 | /* | |
564 | * Short read means EOF. If file size is larger, truncate it | |
565 | */ | |
566 | if (num_read < count) | |
567 | fuse_read_update_size(inode, pos + num_read, attr_ver); | |
568 | ||
b6aeaded | 569 | SetPageUptodate(page); |
5c5c5e51 MS |
570 | } |
571 | ||
b36c31ba | 572 | fuse_invalidate_attr(inode); /* atime changed */ |
b6aeaded MS |
573 | out: |
574 | unlock_page(page); | |
575 | return err; | |
576 | } | |
577 | ||
c1aa96a5 | 578 | static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) |
db50b96c | 579 | { |
c1aa96a5 | 580 | int i; |
5c5c5e51 MS |
581 | size_t count = req->misc.read.in.size; |
582 | size_t num_read = req->out.args[0].size; | |
ce534fb0 | 583 | struct address_space *mapping = NULL; |
c1aa96a5 | 584 | |
ce534fb0 MS |
585 | for (i = 0; mapping == NULL && i < req->num_pages; i++) |
586 | mapping = req->pages[i]->mapping; | |
5c5c5e51 | 587 | |
ce534fb0 MS |
588 | if (mapping) { |
589 | struct inode *inode = mapping->host; | |
590 | ||
591 | /* | |
592 | * Short read means EOF. If file size is larger, truncate it | |
593 | */ | |
594 | if (!req->out.h.error && num_read < count) { | |
595 | loff_t pos; | |
596 | ||
597 | pos = page_offset(req->pages[0]) + num_read; | |
598 | fuse_read_update_size(inode, pos, | |
599 | req->misc.read.attr_ver); | |
600 | } | |
601 | fuse_invalidate_attr(inode); /* atime changed */ | |
602 | } | |
c1aa96a5 | 603 | |
db50b96c MS |
604 | for (i = 0; i < req->num_pages; i++) { |
605 | struct page *page = req->pages[i]; | |
606 | if (!req->out.h.error) | |
607 | SetPageUptodate(page); | |
c1aa96a5 MS |
608 | else |
609 | SetPageError(page); | |
db50b96c | 610 | unlock_page(page); |
b5dd3285 | 611 | page_cache_release(page); |
db50b96c | 612 | } |
c756e0a4 | 613 | if (req->ff) |
5a18ec17 | 614 | fuse_file_put(req->ff, false); |
c1aa96a5 MS |
615 | } |
616 | ||
2106cb18 | 617 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
c1aa96a5 | 618 | { |
2106cb18 MS |
619 | struct fuse_file *ff = file->private_data; |
620 | struct fuse_conn *fc = ff->fc; | |
c1aa96a5 MS |
621 | loff_t pos = page_offset(req->pages[0]); |
622 | size_t count = req->num_pages << PAGE_CACHE_SHIFT; | |
f4975c67 MS |
623 | |
624 | req->out.argpages = 1; | |
c1aa96a5 | 625 | req->out.page_zeroing = 1; |
ce534fb0 | 626 | req->out.page_replace = 1; |
2106cb18 | 627 | fuse_read_fill(req, file, pos, count, FUSE_READ); |
5c5c5e51 | 628 | req->misc.read.attr_ver = fuse_get_attr_version(fc); |
9cd68455 | 629 | if (fc->async_read) { |
c756e0a4 | 630 | req->ff = fuse_file_get(ff); |
9cd68455 | 631 | req->end = fuse_readpages_end; |
b93f858a | 632 | fuse_request_send_background(fc, req); |
9cd68455 | 633 | } else { |
b93f858a | 634 | fuse_request_send(fc, req); |
9cd68455 | 635 | fuse_readpages_end(fc, req); |
e9bb09dd | 636 | fuse_put_request(fc, req); |
9cd68455 | 637 | } |
db50b96c MS |
638 | } |
639 | ||
c756e0a4 | 640 | struct fuse_fill_data { |
db50b96c | 641 | struct fuse_req *req; |
a6643094 | 642 | struct file *file; |
db50b96c MS |
643 | struct inode *inode; |
644 | }; | |
645 | ||
646 | static int fuse_readpages_fill(void *_data, struct page *page) | |
647 | { | |
c756e0a4 | 648 | struct fuse_fill_data *data = _data; |
db50b96c MS |
649 | struct fuse_req *req = data->req; |
650 | struct inode *inode = data->inode; | |
651 | struct fuse_conn *fc = get_fuse_conn(inode); | |
652 | ||
3be5a52b MS |
653 | fuse_wait_on_page_writeback(inode, page->index); |
654 | ||
db50b96c MS |
655 | if (req->num_pages && |
656 | (req->num_pages == FUSE_MAX_PAGES_PER_REQ || | |
657 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || | |
658 | req->pages[req->num_pages - 1]->index + 1 != page->index)) { | |
2106cb18 | 659 | fuse_send_readpages(req, data->file); |
ce1d5a49 MS |
660 | data->req = req = fuse_get_req(fc); |
661 | if (IS_ERR(req)) { | |
db50b96c | 662 | unlock_page(page); |
ce1d5a49 | 663 | return PTR_ERR(req); |
db50b96c | 664 | } |
db50b96c | 665 | } |
b5dd3285 | 666 | page_cache_get(page); |
db50b96c | 667 | req->pages[req->num_pages] = page; |
1729a16c | 668 | req->num_pages++; |
db50b96c MS |
669 | return 0; |
670 | } | |
671 | ||
672 | static int fuse_readpages(struct file *file, struct address_space *mapping, | |
673 | struct list_head *pages, unsigned nr_pages) | |
674 | { | |
675 | struct inode *inode = mapping->host; | |
676 | struct fuse_conn *fc = get_fuse_conn(inode); | |
c756e0a4 | 677 | struct fuse_fill_data data; |
db50b96c | 678 | int err; |
248d86e8 | 679 | |
1d7ea732 | 680 | err = -EIO; |
248d86e8 | 681 | if (is_bad_inode(inode)) |
2e990021 | 682 | goto out; |
248d86e8 | 683 | |
a6643094 | 684 | data.file = file; |
db50b96c | 685 | data.inode = inode; |
ce1d5a49 | 686 | data.req = fuse_get_req(fc); |
1d7ea732 | 687 | err = PTR_ERR(data.req); |
ce1d5a49 | 688 | if (IS_ERR(data.req)) |
2e990021 | 689 | goto out; |
db50b96c MS |
690 | |
691 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); | |
d3406ffa MS |
692 | if (!err) { |
693 | if (data.req->num_pages) | |
2106cb18 | 694 | fuse_send_readpages(data.req, file); |
d3406ffa MS |
695 | else |
696 | fuse_put_request(fc, data.req); | |
697 | } | |
2e990021 | 698 | out: |
1d7ea732 | 699 | return err; |
db50b96c MS |
700 | } |
701 | ||
bcb4be80 MS |
702 | static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, |
703 | unsigned long nr_segs, loff_t pos) | |
704 | { | |
705 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
706 | ||
707 | if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { | |
708 | int err; | |
709 | /* | |
710 | * If trying to read past EOF, make sure the i_size | |
711 | * attribute is up-to-date. | |
712 | */ | |
713 | err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); | |
714 | if (err) | |
715 | return err; | |
716 | } | |
717 | ||
718 | return generic_file_aio_read(iocb, iov, nr_segs, pos); | |
719 | } | |
720 | ||
2d698b07 | 721 | static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, |
2106cb18 | 722 | loff_t pos, size_t count) |
b6aeaded | 723 | { |
b25e82e5 MS |
724 | struct fuse_write_in *inarg = &req->misc.write.in; |
725 | struct fuse_write_out *outarg = &req->misc.write.out; | |
b6aeaded | 726 | |
b25e82e5 MS |
727 | inarg->fh = ff->fh; |
728 | inarg->offset = pos; | |
729 | inarg->size = count; | |
b6aeaded | 730 | req->in.h.opcode = FUSE_WRITE; |
2106cb18 | 731 | req->in.h.nodeid = ff->nodeid; |
b6aeaded | 732 | req->in.numargs = 2; |
2106cb18 | 733 | if (ff->fc->minor < 9) |
f3332114 MS |
734 | req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; |
735 | else | |
736 | req->in.args[0].size = sizeof(struct fuse_write_in); | |
b25e82e5 | 737 | req->in.args[0].value = inarg; |
b6aeaded MS |
738 | req->in.args[1].size = count; |
739 | req->out.numargs = 1; | |
740 | req->out.args[0].size = sizeof(struct fuse_write_out); | |
b25e82e5 MS |
741 | req->out.args[0].value = outarg; |
742 | } | |
743 | ||
744 | static size_t fuse_send_write(struct fuse_req *req, struct file *file, | |
2106cb18 | 745 | loff_t pos, size_t count, fl_owner_t owner) |
b25e82e5 | 746 | { |
2106cb18 MS |
747 | struct fuse_file *ff = file->private_data; |
748 | struct fuse_conn *fc = ff->fc; | |
2d698b07 MS |
749 | struct fuse_write_in *inarg = &req->misc.write.in; |
750 | ||
2106cb18 | 751 | fuse_write_fill(req, ff, pos, count); |
2d698b07 | 752 | inarg->flags = file->f_flags; |
f3332114 | 753 | if (owner != NULL) { |
f3332114 MS |
754 | inarg->write_flags |= FUSE_WRITE_LOCKOWNER; |
755 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | |
756 | } | |
b93f858a | 757 | fuse_request_send(fc, req); |
b25e82e5 | 758 | return req->misc.write.out.size; |
b6aeaded MS |
759 | } |
760 | ||
a1d75f25 | 761 | void fuse_write_update_size(struct inode *inode, loff_t pos) |
854512ec MS |
762 | { |
763 | struct fuse_conn *fc = get_fuse_conn(inode); | |
764 | struct fuse_inode *fi = get_fuse_inode(inode); | |
765 | ||
766 | spin_lock(&fc->lock); | |
767 | fi->attr_version = ++fc->attr_version; | |
768 | if (pos > inode->i_size) | |
769 | i_size_write(inode, pos); | |
770 | spin_unlock(&fc->lock); | |
771 | } | |
772 | ||
ea9b9907 NP |
773 | static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, |
774 | struct inode *inode, loff_t pos, | |
775 | size_t count) | |
776 | { | |
777 | size_t res; | |
778 | unsigned offset; | |
779 | unsigned i; | |
780 | ||
781 | for (i = 0; i < req->num_pages; i++) | |
782 | fuse_wait_on_page_writeback(inode, req->pages[i]->index); | |
783 | ||
2106cb18 | 784 | res = fuse_send_write(req, file, pos, count, NULL); |
ea9b9907 NP |
785 | |
786 | offset = req->page_offset; | |
787 | count = res; | |
788 | for (i = 0; i < req->num_pages; i++) { | |
789 | struct page *page = req->pages[i]; | |
790 | ||
791 | if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) | |
792 | SetPageUptodate(page); | |
793 | ||
794 | if (count > PAGE_CACHE_SIZE - offset) | |
795 | count -= PAGE_CACHE_SIZE - offset; | |
796 | else | |
797 | count = 0; | |
798 | offset = 0; | |
799 | ||
800 | unlock_page(page); | |
801 | page_cache_release(page); | |
802 | } | |
803 | ||
804 | return res; | |
805 | } | |
806 | ||
807 | static ssize_t fuse_fill_write_pages(struct fuse_req *req, | |
808 | struct address_space *mapping, | |
809 | struct iov_iter *ii, loff_t pos) | |
810 | { | |
811 | struct fuse_conn *fc = get_fuse_conn(mapping->host); | |
812 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | |
813 | size_t count = 0; | |
814 | int err; | |
815 | ||
f4975c67 | 816 | req->in.argpages = 1; |
ea9b9907 NP |
817 | req->page_offset = offset; |
818 | ||
819 | do { | |
820 | size_t tmp; | |
821 | struct page *page; | |
822 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | |
823 | size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, | |
824 | iov_iter_count(ii)); | |
825 | ||
826 | bytes = min_t(size_t, bytes, fc->max_write - count); | |
827 | ||
828 | again: | |
829 | err = -EFAULT; | |
830 | if (iov_iter_fault_in_readable(ii, bytes)) | |
831 | break; | |
832 | ||
833 | err = -ENOMEM; | |
54566b2c | 834 | page = grab_cache_page_write_begin(mapping, index, 0); |
ea9b9907 NP |
835 | if (!page) |
836 | break; | |
837 | ||
931e80e4 | 838 | if (mapping_writably_mapped(mapping)) |
839 | flush_dcache_page(page); | |
840 | ||
ea9b9907 NP |
841 | pagefault_disable(); |
842 | tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); | |
843 | pagefault_enable(); | |
844 | flush_dcache_page(page); | |
845 | ||
478e0841 JW |
846 | mark_page_accessed(page); |
847 | ||
ea9b9907 NP |
848 | if (!tmp) { |
849 | unlock_page(page); | |
850 | page_cache_release(page); | |
851 | bytes = min(bytes, iov_iter_single_seg_count(ii)); | |
852 | goto again; | |
853 | } | |
854 | ||
855 | err = 0; | |
856 | req->pages[req->num_pages] = page; | |
857 | req->num_pages++; | |
858 | ||
859 | iov_iter_advance(ii, tmp); | |
860 | count += tmp; | |
861 | pos += tmp; | |
862 | offset += tmp; | |
863 | if (offset == PAGE_CACHE_SIZE) | |
864 | offset = 0; | |
865 | ||
78bb6cb9 MS |
866 | if (!fc->big_writes) |
867 | break; | |
ea9b9907 NP |
868 | } while (iov_iter_count(ii) && count < fc->max_write && |
869 | req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); | |
870 | ||
871 | return count > 0 ? count : err; | |
872 | } | |
873 | ||
874 | static ssize_t fuse_perform_write(struct file *file, | |
875 | struct address_space *mapping, | |
876 | struct iov_iter *ii, loff_t pos) | |
877 | { | |
878 | struct inode *inode = mapping->host; | |
879 | struct fuse_conn *fc = get_fuse_conn(inode); | |
880 | int err = 0; | |
881 | ssize_t res = 0; | |
882 | ||
883 | if (is_bad_inode(inode)) | |
884 | return -EIO; | |
885 | ||
886 | do { | |
887 | struct fuse_req *req; | |
888 | ssize_t count; | |
889 | ||
890 | req = fuse_get_req(fc); | |
891 | if (IS_ERR(req)) { | |
892 | err = PTR_ERR(req); | |
893 | break; | |
894 | } | |
895 | ||
896 | count = fuse_fill_write_pages(req, mapping, ii, pos); | |
897 | if (count <= 0) { | |
898 | err = count; | |
899 | } else { | |
900 | size_t num_written; | |
901 | ||
902 | num_written = fuse_send_write_pages(req, file, inode, | |
903 | pos, count); | |
904 | err = req->out.h.error; | |
905 | if (!err) { | |
906 | res += num_written; | |
907 | pos += num_written; | |
908 | ||
909 | /* break out of the loop on short write */ | |
910 | if (num_written != count) | |
911 | err = -EIO; | |
912 | } | |
913 | } | |
914 | fuse_put_request(fc, req); | |
915 | } while (!err && iov_iter_count(ii)); | |
916 | ||
917 | if (res > 0) | |
918 | fuse_write_update_size(inode, pos); | |
919 | ||
920 | fuse_invalidate_attr(inode); | |
921 | ||
922 | return res > 0 ? res : err; | |
923 | } | |
924 | ||
925 | static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
926 | unsigned long nr_segs, loff_t pos) | |
927 | { | |
928 | struct file *file = iocb->ki_filp; | |
929 | struct address_space *mapping = file->f_mapping; | |
930 | size_t count = 0; | |
4273b793 | 931 | size_t ocount = 0; |
ea9b9907 | 932 | ssize_t written = 0; |
4273b793 | 933 | ssize_t written_buffered = 0; |
ea9b9907 NP |
934 | struct inode *inode = mapping->host; |
935 | ssize_t err; | |
936 | struct iov_iter i; | |
4273b793 | 937 | loff_t endbyte = 0; |
ea9b9907 NP |
938 | |
939 | WARN_ON(iocb->ki_pos != pos); | |
940 | ||
4273b793 AA |
941 | ocount = 0; |
942 | err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | |
ea9b9907 NP |
943 | if (err) |
944 | return err; | |
945 | ||
4273b793 AA |
946 | count = ocount; |
947 | ||
ea9b9907 NP |
948 | mutex_lock(&inode->i_mutex); |
949 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | |
950 | ||
951 | /* We can write back this queue in page reclaim */ | |
952 | current->backing_dev_info = mapping->backing_dev_info; | |
953 | ||
954 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | |
955 | if (err) | |
956 | goto out; | |
957 | ||
958 | if (count == 0) | |
959 | goto out; | |
960 | ||
2f1936b8 | 961 | err = file_remove_suid(file); |
ea9b9907 NP |
962 | if (err) |
963 | goto out; | |
964 | ||
c3b2da31 JB |
965 | err = file_update_time(file); |
966 | if (err) | |
967 | goto out; | |
ea9b9907 | 968 | |
4273b793 AA |
969 | if (file->f_flags & O_DIRECT) { |
970 | written = generic_file_direct_write(iocb, iov, &nr_segs, | |
971 | pos, &iocb->ki_pos, | |
972 | count, ocount); | |
973 | if (written < 0 || written == count) | |
974 | goto out; | |
975 | ||
976 | pos += written; | |
977 | count -= written; | |
ea9b9907 | 978 | |
4273b793 AA |
979 | iov_iter_init(&i, iov, nr_segs, count, written); |
980 | written_buffered = fuse_perform_write(file, mapping, &i, pos); | |
981 | if (written_buffered < 0) { | |
982 | err = written_buffered; | |
983 | goto out; | |
984 | } | |
985 | endbyte = pos + written_buffered - 1; | |
986 | ||
987 | err = filemap_write_and_wait_range(file->f_mapping, pos, | |
988 | endbyte); | |
989 | if (err) | |
990 | goto out; | |
991 | ||
992 | invalidate_mapping_pages(file->f_mapping, | |
993 | pos >> PAGE_CACHE_SHIFT, | |
994 | endbyte >> PAGE_CACHE_SHIFT); | |
995 | ||
996 | written += written_buffered; | |
997 | iocb->ki_pos = pos + written_buffered; | |
998 | } else { | |
999 | iov_iter_init(&i, iov, nr_segs, count, 0); | |
1000 | written = fuse_perform_write(file, mapping, &i, pos); | |
1001 | if (written >= 0) | |
1002 | iocb->ki_pos = pos + written; | |
1003 | } | |
ea9b9907 NP |
1004 | out: |
1005 | current->backing_dev_info = NULL; | |
1006 | mutex_unlock(&inode->i_mutex); | |
1007 | ||
1008 | return written ? written : err; | |
1009 | } | |
1010 | ||
413ef8cb MS |
1011 | static void fuse_release_user_pages(struct fuse_req *req, int write) |
1012 | { | |
1013 | unsigned i; | |
1014 | ||
1015 | for (i = 0; i < req->num_pages; i++) { | |
1016 | struct page *page = req->pages[i]; | |
1017 | if (write) | |
1018 | set_page_dirty_lock(page); | |
1019 | put_page(page); | |
1020 | } | |
1021 | } | |
1022 | ||
1023 | static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, | |
ce60a2f1 | 1024 | size_t *nbytesp, int write) |
413ef8cb | 1025 | { |
ce60a2f1 | 1026 | size_t nbytes = *nbytesp; |
413ef8cb MS |
1027 | unsigned long user_addr = (unsigned long) buf; |
1028 | unsigned offset = user_addr & ~PAGE_MASK; | |
1029 | int npages; | |
1030 | ||
f4975c67 MS |
1031 | /* Special case for kernel I/O: can copy directly into the buffer */ |
1032 | if (segment_eq(get_fs(), KERNEL_DS)) { | |
1033 | if (write) | |
1034 | req->in.args[1].value = (void *) user_addr; | |
1035 | else | |
1036 | req->out.args[0].value = (void *) user_addr; | |
1037 | ||
1038 | return 0; | |
1039 | } | |
413ef8cb | 1040 | |
ce60a2f1 | 1041 | nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); |
413ef8cb | 1042 | npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; |
bd730967 | 1043 | npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); |
1bf94ca7 | 1044 | npages = get_user_pages_fast(user_addr, npages, !write, req->pages); |
413ef8cb MS |
1045 | if (npages < 0) |
1046 | return npages; | |
1047 | ||
1048 | req->num_pages = npages; | |
1049 | req->page_offset = offset; | |
f4975c67 MS |
1050 | |
1051 | if (write) | |
1052 | req->in.argpages = 1; | |
1053 | else | |
1054 | req->out.argpages = 1; | |
1055 | ||
1056 | nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset; | |
1057 | *nbytesp = min(*nbytesp, nbytes); | |
1058 | ||
413ef8cb MS |
1059 | return 0; |
1060 | } | |
1061 | ||
08cbf542 TH |
1062 | ssize_t fuse_direct_io(struct file *file, const char __user *buf, |
1063 | size_t count, loff_t *ppos, int write) | |
413ef8cb | 1064 | { |
2106cb18 MS |
1065 | struct fuse_file *ff = file->private_data; |
1066 | struct fuse_conn *fc = ff->fc; | |
413ef8cb MS |
1067 | size_t nmax = write ? fc->max_write : fc->max_read; |
1068 | loff_t pos = *ppos; | |
1069 | ssize_t res = 0; | |
248d86e8 MS |
1070 | struct fuse_req *req; |
1071 | ||
ce1d5a49 MS |
1072 | req = fuse_get_req(fc); |
1073 | if (IS_ERR(req)) | |
1074 | return PTR_ERR(req); | |
413ef8cb MS |
1075 | |
1076 | while (count) { | |
413ef8cb | 1077 | size_t nres; |
2106cb18 | 1078 | fl_owner_t owner = current->files; |
f4975c67 MS |
1079 | size_t nbytes = min(count, nmax); |
1080 | int err = fuse_get_user_pages(req, buf, &nbytes, write); | |
413ef8cb MS |
1081 | if (err) { |
1082 | res = err; | |
1083 | break; | |
1084 | } | |
f4975c67 | 1085 | |
413ef8cb | 1086 | if (write) |
2106cb18 | 1087 | nres = fuse_send_write(req, file, pos, nbytes, owner); |
413ef8cb | 1088 | else |
2106cb18 MS |
1089 | nres = fuse_send_read(req, file, pos, nbytes, owner); |
1090 | ||
413ef8cb MS |
1091 | fuse_release_user_pages(req, !write); |
1092 | if (req->out.h.error) { | |
1093 | if (!res) | |
1094 | res = req->out.h.error; | |
1095 | break; | |
1096 | } else if (nres > nbytes) { | |
1097 | res = -EIO; | |
1098 | break; | |
1099 | } | |
1100 | count -= nres; | |
1101 | res += nres; | |
1102 | pos += nres; | |
1103 | buf += nres; | |
1104 | if (nres != nbytes) | |
1105 | break; | |
56cf34ff MS |
1106 | if (count) { |
1107 | fuse_put_request(fc, req); | |
1108 | req = fuse_get_req(fc); | |
1109 | if (IS_ERR(req)) | |
1110 | break; | |
1111 | } | |
413ef8cb | 1112 | } |
f60311d5 AA |
1113 | if (!IS_ERR(req)) |
1114 | fuse_put_request(fc, req); | |
d09cb9d7 | 1115 | if (res > 0) |
413ef8cb | 1116 | *ppos = pos; |
413ef8cb MS |
1117 | |
1118 | return res; | |
1119 | } | |
08cbf542 | 1120 | EXPORT_SYMBOL_GPL(fuse_direct_io); |
413ef8cb MS |
1121 | |
1122 | static ssize_t fuse_direct_read(struct file *file, char __user *buf, | |
1123 | size_t count, loff_t *ppos) | |
1124 | { | |
d09cb9d7 MS |
1125 | ssize_t res; |
1126 | struct inode *inode = file->f_path.dentry->d_inode; | |
1127 | ||
1128 | if (is_bad_inode(inode)) | |
1129 | return -EIO; | |
1130 | ||
1131 | res = fuse_direct_io(file, buf, count, ppos, 0); | |
1132 | ||
1133 | fuse_invalidate_attr(inode); | |
1134 | ||
1135 | return res; | |
413ef8cb MS |
1136 | } |
1137 | ||
4273b793 AA |
1138 | static ssize_t __fuse_direct_write(struct file *file, const char __user *buf, |
1139 | size_t count, loff_t *ppos) | |
413ef8cb | 1140 | { |
7706a9d6 | 1141 | struct inode *inode = file->f_path.dentry->d_inode; |
413ef8cb | 1142 | ssize_t res; |
d09cb9d7 | 1143 | |
889f7848 | 1144 | res = generic_write_checks(file, ppos, &count, 0); |
d09cb9d7 | 1145 | if (!res) { |
889f7848 | 1146 | res = fuse_direct_io(file, buf, count, ppos, 1); |
d09cb9d7 MS |
1147 | if (res > 0) |
1148 | fuse_write_update_size(inode, *ppos); | |
1149 | } | |
d09cb9d7 MS |
1150 | |
1151 | fuse_invalidate_attr(inode); | |
1152 | ||
413ef8cb MS |
1153 | return res; |
1154 | } | |
1155 | ||
4273b793 AA |
1156 | static ssize_t fuse_direct_write(struct file *file, const char __user *buf, |
1157 | size_t count, loff_t *ppos) | |
1158 | { | |
1159 | struct inode *inode = file->f_path.dentry->d_inode; | |
1160 | ssize_t res; | |
1161 | ||
1162 | if (is_bad_inode(inode)) | |
1163 | return -EIO; | |
1164 | ||
1165 | /* Don't allow parallel writes to the same file */ | |
1166 | mutex_lock(&inode->i_mutex); | |
1167 | res = __fuse_direct_write(file, buf, count, ppos); | |
1168 | mutex_unlock(&inode->i_mutex); | |
1169 | ||
1170 | return res; | |
1171 | } | |
1172 | ||
3be5a52b | 1173 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
b6aeaded | 1174 | { |
3be5a52b | 1175 | __free_page(req->pages[0]); |
5a18ec17 | 1176 | fuse_file_put(req->ff, false); |
3be5a52b MS |
1177 | } |
1178 | ||
1179 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | |
1180 | { | |
1181 | struct inode *inode = req->inode; | |
1182 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1183 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; | |
1184 | ||
1185 | list_del(&req->writepages_entry); | |
1186 | dec_bdi_stat(bdi, BDI_WRITEBACK); | |
1187 | dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); | |
1188 | bdi_writeout_inc(bdi); | |
1189 | wake_up(&fi->page_waitq); | |
1190 | } | |
1191 | ||
1192 | /* Called under fc->lock, may release and reacquire it */ | |
1193 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) | |
b9ca67b2 MS |
1194 | __releases(fc->lock) |
1195 | __acquires(fc->lock) | |
3be5a52b MS |
1196 | { |
1197 | struct fuse_inode *fi = get_fuse_inode(req->inode); | |
1198 | loff_t size = i_size_read(req->inode); | |
1199 | struct fuse_write_in *inarg = &req->misc.write.in; | |
1200 | ||
1201 | if (!fc->connected) | |
1202 | goto out_free; | |
1203 | ||
1204 | if (inarg->offset + PAGE_CACHE_SIZE <= size) { | |
1205 | inarg->size = PAGE_CACHE_SIZE; | |
1206 | } else if (inarg->offset < size) { | |
1207 | inarg->size = size & (PAGE_CACHE_SIZE - 1); | |
1208 | } else { | |
1209 | /* Got truncated off completely */ | |
1210 | goto out_free; | |
b6aeaded | 1211 | } |
3be5a52b MS |
1212 | |
1213 | req->in.args[1].size = inarg->size; | |
1214 | fi->writectr++; | |
b93f858a | 1215 | fuse_request_send_background_locked(fc, req); |
3be5a52b MS |
1216 | return; |
1217 | ||
1218 | out_free: | |
1219 | fuse_writepage_finish(fc, req); | |
1220 | spin_unlock(&fc->lock); | |
1221 | fuse_writepage_free(fc, req); | |
e9bb09dd | 1222 | fuse_put_request(fc, req); |
3be5a52b | 1223 | spin_lock(&fc->lock); |
b6aeaded MS |
1224 | } |
1225 | ||
3be5a52b MS |
1226 | /* |
1227 | * If fi->writectr is positive (no truncate or fsync going on) send | |
1228 | * all queued writepage requests. | |
1229 | * | |
1230 | * Called with fc->lock | |
1231 | */ | |
1232 | void fuse_flush_writepages(struct inode *inode) | |
b9ca67b2 MS |
1233 | __releases(fc->lock) |
1234 | __acquires(fc->lock) | |
b6aeaded | 1235 | { |
3be5a52b MS |
1236 | struct fuse_conn *fc = get_fuse_conn(inode); |
1237 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1238 | struct fuse_req *req; | |
1239 | ||
1240 | while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { | |
1241 | req = list_entry(fi->queued_writes.next, struct fuse_req, list); | |
1242 | list_del_init(&req->list); | |
1243 | fuse_send_writepage(fc, req); | |
1244 | } | |
1245 | } | |
1246 | ||
1247 | static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) | |
1248 | { | |
1249 | struct inode *inode = req->inode; | |
1250 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1251 | ||
1252 | mapping_set_error(inode->i_mapping, req->out.h.error); | |
1253 | spin_lock(&fc->lock); | |
1254 | fi->writectr--; | |
1255 | fuse_writepage_finish(fc, req); | |
1256 | spin_unlock(&fc->lock); | |
1257 | fuse_writepage_free(fc, req); | |
1258 | } | |
1259 | ||
1260 | static int fuse_writepage_locked(struct page *page) | |
1261 | { | |
1262 | struct address_space *mapping = page->mapping; | |
1263 | struct inode *inode = mapping->host; | |
1264 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1265 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1266 | struct fuse_req *req; | |
1267 | struct fuse_file *ff; | |
1268 | struct page *tmp_page; | |
1269 | ||
1270 | set_page_writeback(page); | |
1271 | ||
1272 | req = fuse_request_alloc_nofs(); | |
1273 | if (!req) | |
1274 | goto err; | |
1275 | ||
1276 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
1277 | if (!tmp_page) | |
1278 | goto err_free; | |
1279 | ||
1280 | spin_lock(&fc->lock); | |
1281 | BUG_ON(list_empty(&fi->write_files)); | |
1282 | ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); | |
1283 | req->ff = fuse_file_get(ff); | |
1284 | spin_unlock(&fc->lock); | |
1285 | ||
2106cb18 | 1286 | fuse_write_fill(req, ff, page_offset(page), 0); |
3be5a52b MS |
1287 | |
1288 | copy_highpage(tmp_page, page); | |
2d698b07 | 1289 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; |
f4975c67 | 1290 | req->in.argpages = 1; |
3be5a52b MS |
1291 | req->num_pages = 1; |
1292 | req->pages[0] = tmp_page; | |
1293 | req->page_offset = 0; | |
1294 | req->end = fuse_writepage_end; | |
1295 | req->inode = inode; | |
1296 | ||
1297 | inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); | |
1298 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | |
1299 | end_page_writeback(page); | |
1300 | ||
1301 | spin_lock(&fc->lock); | |
1302 | list_add(&req->writepages_entry, &fi->writepages); | |
1303 | list_add_tail(&req->list, &fi->queued_writes); | |
1304 | fuse_flush_writepages(inode); | |
1305 | spin_unlock(&fc->lock); | |
1306 | ||
1307 | return 0; | |
1308 | ||
1309 | err_free: | |
1310 | fuse_request_free(req); | |
1311 | err: | |
1312 | end_page_writeback(page); | |
1313 | return -ENOMEM; | |
1314 | } | |
1315 | ||
1316 | static int fuse_writepage(struct page *page, struct writeback_control *wbc) | |
1317 | { | |
1318 | int err; | |
1319 | ||
1320 | err = fuse_writepage_locked(page); | |
1321 | unlock_page(page); | |
1322 | ||
1323 | return err; | |
1324 | } | |
1325 | ||
1326 | static int fuse_launder_page(struct page *page) | |
1327 | { | |
1328 | int err = 0; | |
1329 | if (clear_page_dirty_for_io(page)) { | |
1330 | struct inode *inode = page->mapping->host; | |
1331 | err = fuse_writepage_locked(page); | |
1332 | if (!err) | |
1333 | fuse_wait_on_page_writeback(inode, page->index); | |
1334 | } | |
1335 | return err; | |
1336 | } | |
1337 | ||
1338 | /* | |
1339 | * Write back dirty pages now, because there may not be any suitable | |
1340 | * open files later | |
1341 | */ | |
1342 | static void fuse_vma_close(struct vm_area_struct *vma) | |
1343 | { | |
1344 | filemap_write_and_wait(vma->vm_file->f_mapping); | |
1345 | } | |
1346 | ||
1347 | /* | |
1348 | * Wait for writeback against this page to complete before allowing it | |
1349 | * to be marked dirty again, and hence written back again, possibly | |
1350 | * before the previous writepage completed. | |
1351 | * | |
1352 | * Block here, instead of in ->writepage(), so that the userspace fs | |
1353 | * can only block processes actually operating on the filesystem. | |
1354 | * | |
1355 | * Otherwise unprivileged userspace fs would be able to block | |
1356 | * unrelated: | |
1357 | * | |
1358 | * - page migration | |
1359 | * - sync(2) | |
1360 | * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER | |
1361 | */ | |
c2ec175c | 1362 | static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
3be5a52b | 1363 | { |
c2ec175c | 1364 | struct page *page = vmf->page; |
3be5a52b MS |
1365 | /* |
1366 | * Don't use page->mapping as it may become NULL from a | |
1367 | * concurrent truncate. | |
1368 | */ | |
1369 | struct inode *inode = vma->vm_file->f_mapping->host; | |
1370 | ||
1371 | fuse_wait_on_page_writeback(inode, page->index); | |
1372 | return 0; | |
1373 | } | |
1374 | ||
f0f37e2f | 1375 | static const struct vm_operations_struct fuse_file_vm_ops = { |
3be5a52b MS |
1376 | .close = fuse_vma_close, |
1377 | .fault = filemap_fault, | |
1378 | .page_mkwrite = fuse_page_mkwrite, | |
1379 | }; | |
1380 | ||
1381 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) | |
1382 | { | |
1383 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { | |
1384 | struct inode *inode = file->f_dentry->d_inode; | |
1385 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1386 | struct fuse_inode *fi = get_fuse_inode(inode); | |
1387 | struct fuse_file *ff = file->private_data; | |
1388 | /* | |
1389 | * file may be written through mmap, so chain it onto the | |
1390 | * inodes's write_file list | |
1391 | */ | |
1392 | spin_lock(&fc->lock); | |
1393 | if (list_empty(&ff->write_entry)) | |
1394 | list_add(&ff->write_entry, &fi->write_files); | |
1395 | spin_unlock(&fc->lock); | |
1396 | } | |
1397 | file_accessed(file); | |
1398 | vma->vm_ops = &fuse_file_vm_ops; | |
b6aeaded MS |
1399 | return 0; |
1400 | } | |
1401 | ||
fc280c96 MS |
1402 | static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) |
1403 | { | |
1404 | /* Can't provide the coherency needed for MAP_SHARED */ | |
1405 | if (vma->vm_flags & VM_MAYSHARE) | |
1406 | return -ENODEV; | |
1407 | ||
3121bfe7 MS |
1408 | invalidate_inode_pages2(file->f_mapping); |
1409 | ||
fc280c96 MS |
1410 | return generic_file_mmap(file, vma); |
1411 | } | |
1412 | ||
71421259 MS |
1413 | static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, |
1414 | struct file_lock *fl) | |
1415 | { | |
1416 | switch (ffl->type) { | |
1417 | case F_UNLCK: | |
1418 | break; | |
1419 | ||
1420 | case F_RDLCK: | |
1421 | case F_WRLCK: | |
1422 | if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || | |
1423 | ffl->end < ffl->start) | |
1424 | return -EIO; | |
1425 | ||
1426 | fl->fl_start = ffl->start; | |
1427 | fl->fl_end = ffl->end; | |
1428 | fl->fl_pid = ffl->pid; | |
1429 | break; | |
1430 | ||
1431 | default: | |
1432 | return -EIO; | |
1433 | } | |
1434 | fl->fl_type = ffl->type; | |
1435 | return 0; | |
1436 | } | |
1437 | ||
1438 | static void fuse_lk_fill(struct fuse_req *req, struct file *file, | |
a9ff4f87 MS |
1439 | const struct file_lock *fl, int opcode, pid_t pid, |
1440 | int flock) | |
71421259 | 1441 | { |
7706a9d6 | 1442 | struct inode *inode = file->f_path.dentry->d_inode; |
9c8ef561 | 1443 | struct fuse_conn *fc = get_fuse_conn(inode); |
71421259 MS |
1444 | struct fuse_file *ff = file->private_data; |
1445 | struct fuse_lk_in *arg = &req->misc.lk_in; | |
1446 | ||
1447 | arg->fh = ff->fh; | |
9c8ef561 | 1448 | arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); |
71421259 MS |
1449 | arg->lk.start = fl->fl_start; |
1450 | arg->lk.end = fl->fl_end; | |
1451 | arg->lk.type = fl->fl_type; | |
1452 | arg->lk.pid = pid; | |
a9ff4f87 MS |
1453 | if (flock) |
1454 | arg->lk_flags |= FUSE_LK_FLOCK; | |
71421259 MS |
1455 | req->in.h.opcode = opcode; |
1456 | req->in.h.nodeid = get_node_id(inode); | |
1457 | req->in.numargs = 1; | |
1458 | req->in.args[0].size = sizeof(*arg); | |
1459 | req->in.args[0].value = arg; | |
1460 | } | |
1461 | ||
1462 | static int fuse_getlk(struct file *file, struct file_lock *fl) | |
1463 | { | |
7706a9d6 | 1464 | struct inode *inode = file->f_path.dentry->d_inode; |
71421259 MS |
1465 | struct fuse_conn *fc = get_fuse_conn(inode); |
1466 | struct fuse_req *req; | |
1467 | struct fuse_lk_out outarg; | |
1468 | int err; | |
1469 | ||
1470 | req = fuse_get_req(fc); | |
1471 | if (IS_ERR(req)) | |
1472 | return PTR_ERR(req); | |
1473 | ||
a9ff4f87 | 1474 | fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); |
71421259 MS |
1475 | req->out.numargs = 1; |
1476 | req->out.args[0].size = sizeof(outarg); | |
1477 | req->out.args[0].value = &outarg; | |
b93f858a | 1478 | fuse_request_send(fc, req); |
71421259 MS |
1479 | err = req->out.h.error; |
1480 | fuse_put_request(fc, req); | |
1481 | if (!err) | |
1482 | err = convert_fuse_file_lock(&outarg.lk, fl); | |
1483 | ||
1484 | return err; | |
1485 | } | |
1486 | ||
a9ff4f87 | 1487 | static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) |
71421259 | 1488 | { |
7706a9d6 | 1489 | struct inode *inode = file->f_path.dentry->d_inode; |
71421259 MS |
1490 | struct fuse_conn *fc = get_fuse_conn(inode); |
1491 | struct fuse_req *req; | |
1492 | int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; | |
1493 | pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; | |
1494 | int err; | |
1495 | ||
8fb47a4f | 1496 | if (fl->fl_lmops && fl->fl_lmops->lm_grant) { |
48e90761 MS |
1497 | /* NLM needs asynchronous locks, which we don't support yet */ |
1498 | return -ENOLCK; | |
1499 | } | |
1500 | ||
71421259 MS |
1501 | /* Unlock on close is handled by the flush method */ |
1502 | if (fl->fl_flags & FL_CLOSE) | |
1503 | return 0; | |
1504 | ||
1505 | req = fuse_get_req(fc); | |
1506 | if (IS_ERR(req)) | |
1507 | return PTR_ERR(req); | |
1508 | ||
a9ff4f87 | 1509 | fuse_lk_fill(req, file, fl, opcode, pid, flock); |
b93f858a | 1510 | fuse_request_send(fc, req); |
71421259 | 1511 | err = req->out.h.error; |
a4d27e75 MS |
1512 | /* locking is restartable */ |
1513 | if (err == -EINTR) | |
1514 | err = -ERESTARTSYS; | |
71421259 MS |
1515 | fuse_put_request(fc, req); |
1516 | return err; | |
1517 | } | |
1518 | ||
1519 | static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) | |
1520 | { | |
7706a9d6 | 1521 | struct inode *inode = file->f_path.dentry->d_inode; |
71421259 MS |
1522 | struct fuse_conn *fc = get_fuse_conn(inode); |
1523 | int err; | |
1524 | ||
48e90761 MS |
1525 | if (cmd == F_CANCELLK) { |
1526 | err = 0; | |
1527 | } else if (cmd == F_GETLK) { | |
71421259 | 1528 | if (fc->no_lock) { |
9d6a8c5c | 1529 | posix_test_lock(file, fl); |
71421259 MS |
1530 | err = 0; |
1531 | } else | |
1532 | err = fuse_getlk(file, fl); | |
1533 | } else { | |
1534 | if (fc->no_lock) | |
48e90761 | 1535 | err = posix_lock_file(file, fl, NULL); |
71421259 | 1536 | else |
a9ff4f87 | 1537 | err = fuse_setlk(file, fl, 0); |
71421259 MS |
1538 | } |
1539 | return err; | |
1540 | } | |
1541 | ||
a9ff4f87 MS |
1542 | static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) |
1543 | { | |
1544 | struct inode *inode = file->f_path.dentry->d_inode; | |
1545 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1546 | int err; | |
1547 | ||
37fb3a30 | 1548 | if (fc->no_flock) { |
a9ff4f87 MS |
1549 | err = flock_lock_file_wait(file, fl); |
1550 | } else { | |
37fb3a30 MS |
1551 | struct fuse_file *ff = file->private_data; |
1552 | ||
a9ff4f87 MS |
1553 | /* emulate flock with POSIX locks */ |
1554 | fl->fl_owner = (fl_owner_t) file; | |
37fb3a30 | 1555 | ff->flock = true; |
a9ff4f87 MS |
1556 | err = fuse_setlk(file, fl, 1); |
1557 | } | |
1558 | ||
1559 | return err; | |
1560 | } | |
1561 | ||
b2d2272f MS |
1562 | static sector_t fuse_bmap(struct address_space *mapping, sector_t block) |
1563 | { | |
1564 | struct inode *inode = mapping->host; | |
1565 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1566 | struct fuse_req *req; | |
1567 | struct fuse_bmap_in inarg; | |
1568 | struct fuse_bmap_out outarg; | |
1569 | int err; | |
1570 | ||
1571 | if (!inode->i_sb->s_bdev || fc->no_bmap) | |
1572 | return 0; | |
1573 | ||
1574 | req = fuse_get_req(fc); | |
1575 | if (IS_ERR(req)) | |
1576 | return 0; | |
1577 | ||
1578 | memset(&inarg, 0, sizeof(inarg)); | |
1579 | inarg.block = block; | |
1580 | inarg.blocksize = inode->i_sb->s_blocksize; | |
1581 | req->in.h.opcode = FUSE_BMAP; | |
1582 | req->in.h.nodeid = get_node_id(inode); | |
1583 | req->in.numargs = 1; | |
1584 | req->in.args[0].size = sizeof(inarg); | |
1585 | req->in.args[0].value = &inarg; | |
1586 | req->out.numargs = 1; | |
1587 | req->out.args[0].size = sizeof(outarg); | |
1588 | req->out.args[0].value = &outarg; | |
b93f858a | 1589 | fuse_request_send(fc, req); |
b2d2272f MS |
1590 | err = req->out.h.error; |
1591 | fuse_put_request(fc, req); | |
1592 | if (err == -ENOSYS) | |
1593 | fc->no_bmap = 1; | |
1594 | ||
1595 | return err ? 0 : outarg.block; | |
1596 | } | |
1597 | ||
5559b8f4 MS |
1598 | static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) |
1599 | { | |
1600 | loff_t retval; | |
1601 | struct inode *inode = file->f_path.dentry->d_inode; | |
1602 | ||
c07c3d19 MS |
1603 | /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ |
1604 | if (origin == SEEK_CUR || origin == SEEK_SET) | |
1605 | return generic_file_llseek(file, offset, origin); | |
06222e49 | 1606 | |
c07c3d19 MS |
1607 | mutex_lock(&inode->i_mutex); |
1608 | retval = fuse_update_attributes(inode, NULL, file, NULL); | |
1609 | if (!retval) | |
1610 | retval = generic_file_llseek(file, offset, origin); | |
5559b8f4 | 1611 | mutex_unlock(&inode->i_mutex); |
c07c3d19 | 1612 | |
5559b8f4 MS |
1613 | return retval; |
1614 | } | |
1615 | ||
59efec7b TH |
1616 | static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, |
1617 | unsigned int nr_segs, size_t bytes, bool to_user) | |
1618 | { | |
1619 | struct iov_iter ii; | |
1620 | int page_idx = 0; | |
1621 | ||
1622 | if (!bytes) | |
1623 | return 0; | |
1624 | ||
1625 | iov_iter_init(&ii, iov, nr_segs, bytes, 0); | |
1626 | ||
1627 | while (iov_iter_count(&ii)) { | |
1628 | struct page *page = pages[page_idx++]; | |
1629 | size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); | |
4aa0edd2 | 1630 | void *kaddr; |
59efec7b | 1631 | |
4aa0edd2 | 1632 | kaddr = kmap(page); |
59efec7b TH |
1633 | |
1634 | while (todo) { | |
1635 | char __user *uaddr = ii.iov->iov_base + ii.iov_offset; | |
1636 | size_t iov_len = ii.iov->iov_len - ii.iov_offset; | |
1637 | size_t copy = min(todo, iov_len); | |
1638 | size_t left; | |
1639 | ||
1640 | if (!to_user) | |
1641 | left = copy_from_user(kaddr, uaddr, copy); | |
1642 | else | |
1643 | left = copy_to_user(uaddr, kaddr, copy); | |
1644 | ||
1645 | if (unlikely(left)) | |
1646 | return -EFAULT; | |
1647 | ||
1648 | iov_iter_advance(&ii, copy); | |
1649 | todo -= copy; | |
1650 | kaddr += copy; | |
1651 | } | |
1652 | ||
0bd87182 | 1653 | kunmap(page); |
59efec7b TH |
1654 | } |
1655 | ||
1656 | return 0; | |
1657 | } | |
1658 | ||
d9d318d3 MS |
1659 | /* |
1660 | * CUSE servers compiled on 32bit broke on 64bit kernels because the | |
1661 | * ABI was defined to be 'struct iovec' which is different on 32bit | |
1662 | * and 64bit. Fortunately we can determine which structure the server | |
1663 | * used from the size of the reply. | |
1664 | */ | |
1baa26b2 MS |
1665 | static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, |
1666 | size_t transferred, unsigned count, | |
1667 | bool is_compat) | |
d9d318d3 MS |
1668 | { |
1669 | #ifdef CONFIG_COMPAT | |
1670 | if (count * sizeof(struct compat_iovec) == transferred) { | |
1671 | struct compat_iovec *ciov = src; | |
1672 | unsigned i; | |
1673 | ||
1674 | /* | |
1675 | * With this interface a 32bit server cannot support | |
1676 | * non-compat (i.e. ones coming from 64bit apps) ioctl | |
1677 | * requests | |
1678 | */ | |
1679 | if (!is_compat) | |
1680 | return -EINVAL; | |
1681 | ||
1682 | for (i = 0; i < count; i++) { | |
1683 | dst[i].iov_base = compat_ptr(ciov[i].iov_base); | |
1684 | dst[i].iov_len = ciov[i].iov_len; | |
1685 | } | |
1686 | return 0; | |
1687 | } | |
1688 | #endif | |
1689 | ||
1690 | if (count * sizeof(struct iovec) != transferred) | |
1691 | return -EIO; | |
1692 | ||
1693 | memcpy(dst, src, transferred); | |
1694 | return 0; | |
1695 | } | |
1696 | ||
7572777e MS |
1697 | /* Make sure iov_length() won't overflow */ |
1698 | static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) | |
1699 | { | |
1700 | size_t n; | |
1701 | u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; | |
1702 | ||
1703 | for (n = 0; n < count; n++) { | |
1704 | if (iov->iov_len > (size_t) max) | |
1705 | return -ENOMEM; | |
1706 | max -= iov->iov_len; | |
1707 | } | |
1708 | return 0; | |
1709 | } | |
1710 | ||
1baa26b2 MS |
1711 | static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, |
1712 | void *src, size_t transferred, unsigned count, | |
1713 | bool is_compat) | |
1714 | { | |
1715 | unsigned i; | |
1716 | struct fuse_ioctl_iovec *fiov = src; | |
1717 | ||
1718 | if (fc->minor < 16) { | |
1719 | return fuse_copy_ioctl_iovec_old(dst, src, transferred, | |
1720 | count, is_compat); | |
1721 | } | |
1722 | ||
1723 | if (count * sizeof(struct fuse_ioctl_iovec) != transferred) | |
1724 | return -EIO; | |
1725 | ||
1726 | for (i = 0; i < count; i++) { | |
1727 | /* Did the server supply an inappropriate value? */ | |
1728 | if (fiov[i].base != (unsigned long) fiov[i].base || | |
1729 | fiov[i].len != (unsigned long) fiov[i].len) | |
1730 | return -EIO; | |
1731 | ||
1732 | dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; | |
1733 | dst[i].iov_len = (size_t) fiov[i].len; | |
1734 | ||
1735 | #ifdef CONFIG_COMPAT | |
1736 | if (is_compat && | |
1737 | (ptr_to_compat(dst[i].iov_base) != fiov[i].base || | |
1738 | (compat_size_t) dst[i].iov_len != fiov[i].len)) | |
1739 | return -EIO; | |
1740 | #endif | |
1741 | } | |
1742 | ||
1743 | return 0; | |
1744 | } | |
1745 | ||
1746 | ||
59efec7b TH |
1747 | /* |
1748 | * For ioctls, there is no generic way to determine how much memory | |
1749 | * needs to be read and/or written. Furthermore, ioctls are allowed | |
1750 | * to dereference the passed pointer, so the parameter requires deep | |
1751 | * copying but FUSE has no idea whatsoever about what to copy in or | |
1752 | * out. | |
1753 | * | |
1754 | * This is solved by allowing FUSE server to retry ioctl with | |
1755 | * necessary in/out iovecs. Let's assume the ioctl implementation | |
1756 | * needs to read in the following structure. | |
1757 | * | |
1758 | * struct a { | |
1759 | * char *buf; | |
1760 | * size_t buflen; | |
1761 | * } | |
1762 | * | |
1763 | * On the first callout to FUSE server, inarg->in_size and | |
1764 | * inarg->out_size will be NULL; then, the server completes the ioctl | |
1765 | * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and | |
1766 | * the actual iov array to | |
1767 | * | |
1768 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } | |
1769 | * | |
1770 | * which tells FUSE to copy in the requested area and retry the ioctl. | |
1771 | * On the second round, the server has access to the structure and | |
1772 | * from that it can tell what to look for next, so on the invocation, | |
1773 | * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to | |
1774 | * | |
1775 | * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, | |
1776 | * { .iov_base = a.buf, .iov_len = a.buflen } } | |
1777 | * | |
1778 | * FUSE will copy both struct a and the pointed buffer from the | |
1779 | * process doing the ioctl and retry ioctl with both struct a and the | |
1780 | * buffer. | |
1781 | * | |
1782 | * This time, FUSE server has everything it needs and completes ioctl | |
1783 | * without FUSE_IOCTL_RETRY which finishes the ioctl call. | |
1784 | * | |
1785 | * Copying data out works the same way. | |
1786 | * | |
1787 | * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel | |
1788 | * automatically initializes in and out iovs by decoding @cmd with | |
1789 | * _IOC_* macros and the server is not allowed to request RETRY. This | |
1790 | * limits ioctl data transfers to well-formed ioctls and is the forced | |
1791 | * behavior for all FUSE servers. | |
1792 | */ | |
08cbf542 TH |
1793 | long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, |
1794 | unsigned int flags) | |
59efec7b | 1795 | { |
59efec7b | 1796 | struct fuse_file *ff = file->private_data; |
d36f2487 | 1797 | struct fuse_conn *fc = ff->fc; |
59efec7b TH |
1798 | struct fuse_ioctl_in inarg = { |
1799 | .fh = ff->fh, | |
1800 | .cmd = cmd, | |
1801 | .arg = arg, | |
1802 | .flags = flags | |
1803 | }; | |
1804 | struct fuse_ioctl_out outarg; | |
1805 | struct fuse_req *req = NULL; | |
1806 | struct page **pages = NULL; | |
8ac83505 | 1807 | struct iovec *iov_page = NULL; |
59efec7b TH |
1808 | struct iovec *in_iov = NULL, *out_iov = NULL; |
1809 | unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; | |
1810 | size_t in_size, out_size, transferred; | |
1811 | int err; | |
1812 | ||
1baa26b2 MS |
1813 | #if BITS_PER_LONG == 32 |
1814 | inarg.flags |= FUSE_IOCTL_32BIT; | |
1815 | #else | |
1816 | if (flags & FUSE_IOCTL_COMPAT) | |
1817 | inarg.flags |= FUSE_IOCTL_32BIT; | |
1818 | #endif | |
1819 | ||
59efec7b | 1820 | /* assume all the iovs returned by client always fits in a page */ |
1baa26b2 | 1821 | BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); |
59efec7b | 1822 | |
59efec7b | 1823 | err = -ENOMEM; |
c411cc88 | 1824 | pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); |
8ac83505 | 1825 | iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); |
59efec7b TH |
1826 | if (!pages || !iov_page) |
1827 | goto out; | |
1828 | ||
1829 | /* | |
1830 | * If restricted, initialize IO parameters as encoded in @cmd. | |
1831 | * RETRY from server is not allowed. | |
1832 | */ | |
1833 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { | |
8ac83505 | 1834 | struct iovec *iov = iov_page; |
59efec7b | 1835 | |
c9f0523d | 1836 | iov->iov_base = (void __user *)arg; |
59efec7b TH |
1837 | iov->iov_len = _IOC_SIZE(cmd); |
1838 | ||
1839 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | |
1840 | in_iov = iov; | |
1841 | in_iovs = 1; | |
1842 | } | |
1843 | ||
1844 | if (_IOC_DIR(cmd) & _IOC_READ) { | |
1845 | out_iov = iov; | |
1846 | out_iovs = 1; | |
1847 | } | |
1848 | } | |
1849 | ||
1850 | retry: | |
1851 | inarg.in_size = in_size = iov_length(in_iov, in_iovs); | |
1852 | inarg.out_size = out_size = iov_length(out_iov, out_iovs); | |
1853 | ||
1854 | /* | |
1855 | * Out data can be used either for actual out data or iovs, | |
1856 | * make sure there always is at least one page. | |
1857 | */ | |
1858 | out_size = max_t(size_t, out_size, PAGE_SIZE); | |
1859 | max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); | |
1860 | ||
1861 | /* make sure there are enough buffer pages and init request with them */ | |
1862 | err = -ENOMEM; | |
1863 | if (max_pages > FUSE_MAX_PAGES_PER_REQ) | |
1864 | goto out; | |
1865 | while (num_pages < max_pages) { | |
1866 | pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | |
1867 | if (!pages[num_pages]) | |
1868 | goto out; | |
1869 | num_pages++; | |
1870 | } | |
1871 | ||
1872 | req = fuse_get_req(fc); | |
1873 | if (IS_ERR(req)) { | |
1874 | err = PTR_ERR(req); | |
1875 | req = NULL; | |
1876 | goto out; | |
1877 | } | |
1878 | memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); | |
1879 | req->num_pages = num_pages; | |
1880 | ||
1881 | /* okay, let's send it to the client */ | |
1882 | req->in.h.opcode = FUSE_IOCTL; | |
d36f2487 | 1883 | req->in.h.nodeid = ff->nodeid; |
59efec7b TH |
1884 | req->in.numargs = 1; |
1885 | req->in.args[0].size = sizeof(inarg); | |
1886 | req->in.args[0].value = &inarg; | |
1887 | if (in_size) { | |
1888 | req->in.numargs++; | |
1889 | req->in.args[1].size = in_size; | |
1890 | req->in.argpages = 1; | |
1891 | ||
1892 | err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, | |
1893 | false); | |
1894 | if (err) | |
1895 | goto out; | |
1896 | } | |
1897 | ||
1898 | req->out.numargs = 2; | |
1899 | req->out.args[0].size = sizeof(outarg); | |
1900 | req->out.args[0].value = &outarg; | |
1901 | req->out.args[1].size = out_size; | |
1902 | req->out.argpages = 1; | |
1903 | req->out.argvar = 1; | |
1904 | ||
b93f858a | 1905 | fuse_request_send(fc, req); |
59efec7b TH |
1906 | err = req->out.h.error; |
1907 | transferred = req->out.args[1].size; | |
1908 | fuse_put_request(fc, req); | |
1909 | req = NULL; | |
1910 | if (err) | |
1911 | goto out; | |
1912 | ||
1913 | /* did it ask for retry? */ | |
1914 | if (outarg.flags & FUSE_IOCTL_RETRY) { | |
8ac83505 | 1915 | void *vaddr; |
59efec7b TH |
1916 | |
1917 | /* no retry if in restricted mode */ | |
1918 | err = -EIO; | |
1919 | if (!(flags & FUSE_IOCTL_UNRESTRICTED)) | |
1920 | goto out; | |
1921 | ||
1922 | in_iovs = outarg.in_iovs; | |
1923 | out_iovs = outarg.out_iovs; | |
1924 | ||
1925 | /* | |
1926 | * Make sure things are in boundary, separate checks | |
1927 | * are to protect against overflow. | |
1928 | */ | |
1929 | err = -ENOMEM; | |
1930 | if (in_iovs > FUSE_IOCTL_MAX_IOV || | |
1931 | out_iovs > FUSE_IOCTL_MAX_IOV || | |
1932 | in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) | |
1933 | goto out; | |
1934 | ||
2408f6ef | 1935 | vaddr = kmap_atomic(pages[0]); |
1baa26b2 | 1936 | err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, |
d9d318d3 MS |
1937 | transferred, in_iovs + out_iovs, |
1938 | (flags & FUSE_IOCTL_COMPAT) != 0); | |
2408f6ef | 1939 | kunmap_atomic(vaddr); |
d9d318d3 MS |
1940 | if (err) |
1941 | goto out; | |
59efec7b | 1942 | |
8ac83505 | 1943 | in_iov = iov_page; |
59efec7b TH |
1944 | out_iov = in_iov + in_iovs; |
1945 | ||
7572777e MS |
1946 | err = fuse_verify_ioctl_iov(in_iov, in_iovs); |
1947 | if (err) | |
1948 | goto out; | |
1949 | ||
1950 | err = fuse_verify_ioctl_iov(out_iov, out_iovs); | |
1951 | if (err) | |
1952 | goto out; | |
1953 | ||
59efec7b TH |
1954 | goto retry; |
1955 | } | |
1956 | ||
1957 | err = -EIO; | |
1958 | if (transferred > inarg.out_size) | |
1959 | goto out; | |
1960 | ||
1961 | err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); | |
1962 | out: | |
1963 | if (req) | |
1964 | fuse_put_request(fc, req); | |
8ac83505 | 1965 | free_page((unsigned long) iov_page); |
59efec7b TH |
1966 | while (num_pages) |
1967 | __free_page(pages[--num_pages]); | |
1968 | kfree(pages); | |
1969 | ||
1970 | return err ? err : outarg.result; | |
1971 | } | |
08cbf542 | 1972 | EXPORT_SYMBOL_GPL(fuse_do_ioctl); |
59efec7b | 1973 | |
b18da0c5 MS |
1974 | long fuse_ioctl_common(struct file *file, unsigned int cmd, |
1975 | unsigned long arg, unsigned int flags) | |
d36f2487 MS |
1976 | { |
1977 | struct inode *inode = file->f_dentry->d_inode; | |
1978 | struct fuse_conn *fc = get_fuse_conn(inode); | |
1979 | ||
1980 | if (!fuse_allow_task(fc, current)) | |
1981 | return -EACCES; | |
1982 | ||
1983 | if (is_bad_inode(inode)) | |
1984 | return -EIO; | |
1985 | ||
1986 | return fuse_do_ioctl(file, cmd, arg, flags); | |
1987 | } | |
1988 | ||
59efec7b TH |
1989 | static long fuse_file_ioctl(struct file *file, unsigned int cmd, |
1990 | unsigned long arg) | |
1991 | { | |
b18da0c5 | 1992 | return fuse_ioctl_common(file, cmd, arg, 0); |
59efec7b TH |
1993 | } |
1994 | ||
1995 | static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, | |
1996 | unsigned long arg) | |
1997 | { | |
b18da0c5 | 1998 | return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); |
59efec7b TH |
1999 | } |
2000 | ||
95668a69 TH |
2001 | /* |
2002 | * All files which have been polled are linked to RB tree | |
2003 | * fuse_conn->polled_files which is indexed by kh. Walk the tree and | |
2004 | * find the matching one. | |
2005 | */ | |
2006 | static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, | |
2007 | struct rb_node **parent_out) | |
2008 | { | |
2009 | struct rb_node **link = &fc->polled_files.rb_node; | |
2010 | struct rb_node *last = NULL; | |
2011 | ||
2012 | while (*link) { | |
2013 | struct fuse_file *ff; | |
2014 | ||
2015 | last = *link; | |
2016 | ff = rb_entry(last, struct fuse_file, polled_node); | |
2017 | ||
2018 | if (kh < ff->kh) | |
2019 | link = &last->rb_left; | |
2020 | else if (kh > ff->kh) | |
2021 | link = &last->rb_right; | |
2022 | else | |
2023 | return link; | |
2024 | } | |
2025 | ||
2026 | if (parent_out) | |
2027 | *parent_out = last; | |
2028 | return link; | |
2029 | } | |
2030 | ||
2031 | /* | |
2032 | * The file is about to be polled. Make sure it's on the polled_files | |
2033 | * RB tree. Note that files once added to the polled_files tree are | |
2034 | * not removed before the file is released. This is because a file | |
2035 | * polled once is likely to be polled again. | |
2036 | */ | |
2037 | static void fuse_register_polled_file(struct fuse_conn *fc, | |
2038 | struct fuse_file *ff) | |
2039 | { | |
2040 | spin_lock(&fc->lock); | |
2041 | if (RB_EMPTY_NODE(&ff->polled_node)) { | |
2042 | struct rb_node **link, *parent; | |
2043 | ||
2044 | link = fuse_find_polled_node(fc, ff->kh, &parent); | |
2045 | BUG_ON(*link); | |
2046 | rb_link_node(&ff->polled_node, parent, link); | |
2047 | rb_insert_color(&ff->polled_node, &fc->polled_files); | |
2048 | } | |
2049 | spin_unlock(&fc->lock); | |
2050 | } | |
2051 | ||
08cbf542 | 2052 | unsigned fuse_file_poll(struct file *file, poll_table *wait) |
95668a69 | 2053 | { |
95668a69 | 2054 | struct fuse_file *ff = file->private_data; |
797759aa | 2055 | struct fuse_conn *fc = ff->fc; |
95668a69 TH |
2056 | struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; |
2057 | struct fuse_poll_out outarg; | |
2058 | struct fuse_req *req; | |
2059 | int err; | |
2060 | ||
2061 | if (fc->no_poll) | |
2062 | return DEFAULT_POLLMASK; | |
2063 | ||
2064 | poll_wait(file, &ff->poll_wait, wait); | |
2065 | ||
2066 | /* | |
2067 | * Ask for notification iff there's someone waiting for it. | |
2068 | * The client may ignore the flag and always notify. | |
2069 | */ | |
2070 | if (waitqueue_active(&ff->poll_wait)) { | |
2071 | inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; | |
2072 | fuse_register_polled_file(fc, ff); | |
2073 | } | |
2074 | ||
2075 | req = fuse_get_req(fc); | |
2076 | if (IS_ERR(req)) | |
201fa69a | 2077 | return POLLERR; |
95668a69 TH |
2078 | |
2079 | req->in.h.opcode = FUSE_POLL; | |
797759aa | 2080 | req->in.h.nodeid = ff->nodeid; |
95668a69 TH |
2081 | req->in.numargs = 1; |
2082 | req->in.args[0].size = sizeof(inarg); | |
2083 | req->in.args[0].value = &inarg; | |
2084 | req->out.numargs = 1; | |
2085 | req->out.args[0].size = sizeof(outarg); | |
2086 | req->out.args[0].value = &outarg; | |
b93f858a | 2087 | fuse_request_send(fc, req); |
95668a69 TH |
2088 | err = req->out.h.error; |
2089 | fuse_put_request(fc, req); | |
2090 | ||
2091 | if (!err) | |
2092 | return outarg.revents; | |
2093 | if (err == -ENOSYS) { | |
2094 | fc->no_poll = 1; | |
2095 | return DEFAULT_POLLMASK; | |
2096 | } | |
2097 | return POLLERR; | |
2098 | } | |
08cbf542 | 2099 | EXPORT_SYMBOL_GPL(fuse_file_poll); |
95668a69 TH |
2100 | |
2101 | /* | |
2102 | * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and | |
2103 | * wakes up the poll waiters. | |
2104 | */ | |
2105 | int fuse_notify_poll_wakeup(struct fuse_conn *fc, | |
2106 | struct fuse_notify_poll_wakeup_out *outarg) | |
2107 | { | |
2108 | u64 kh = outarg->kh; | |
2109 | struct rb_node **link; | |
2110 | ||
2111 | spin_lock(&fc->lock); | |
2112 | ||
2113 | link = fuse_find_polled_node(fc, kh, NULL); | |
2114 | if (*link) { | |
2115 | struct fuse_file *ff; | |
2116 | ||
2117 | ff = rb_entry(*link, struct fuse_file, polled_node); | |
2118 | wake_up_interruptible_sync(&ff->poll_wait); | |
2119 | } | |
2120 | ||
2121 | spin_unlock(&fc->lock); | |
2122 | return 0; | |
2123 | } | |
2124 | ||
4273b793 AA |
2125 | static ssize_t fuse_loop_dio(struct file *filp, const struct iovec *iov, |
2126 | unsigned long nr_segs, loff_t *ppos, int rw) | |
2127 | { | |
2128 | const struct iovec *vector = iov; | |
2129 | ssize_t ret = 0; | |
2130 | ||
2131 | while (nr_segs > 0) { | |
2132 | void __user *base; | |
2133 | size_t len; | |
2134 | ssize_t nr; | |
2135 | ||
2136 | base = vector->iov_base; | |
2137 | len = vector->iov_len; | |
2138 | vector++; | |
2139 | nr_segs--; | |
2140 | ||
2141 | if (rw == WRITE) | |
2142 | nr = __fuse_direct_write(filp, base, len, ppos); | |
2143 | else | |
2144 | nr = fuse_direct_read(filp, base, len, ppos); | |
2145 | ||
2146 | if (nr < 0) { | |
2147 | if (!ret) | |
2148 | ret = nr; | |
2149 | break; | |
2150 | } | |
2151 | ret += nr; | |
2152 | if (nr != len) | |
2153 | break; | |
2154 | } | |
2155 | ||
2156 | return ret; | |
2157 | } | |
2158 | ||
2159 | ||
2160 | static ssize_t | |
2161 | fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
2162 | loff_t offset, unsigned long nr_segs) | |
2163 | { | |
2164 | ssize_t ret = 0; | |
2165 | struct file *file = NULL; | |
2166 | loff_t pos = 0; | |
2167 | ||
2168 | file = iocb->ki_filp; | |
2169 | pos = offset; | |
2170 | ||
2171 | ret = fuse_loop_dio(file, iov, nr_segs, &pos, rw); | |
2172 | ||
2173 | return ret; | |
2174 | } | |
2175 | ||
05ba1f08 AP |
2176 | long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
2177 | loff_t length) | |
2178 | { | |
2179 | struct fuse_file *ff = file->private_data; | |
2180 | struct fuse_conn *fc = ff->fc; | |
2181 | struct fuse_req *req; | |
2182 | struct fuse_fallocate_in inarg = { | |
2183 | .fh = ff->fh, | |
2184 | .offset = offset, | |
2185 | .length = length, | |
2186 | .mode = mode | |
2187 | }; | |
2188 | int err; | |
2189 | ||
519c6040 MS |
2190 | if (fc->no_fallocate) |
2191 | return -EOPNOTSUPP; | |
2192 | ||
05ba1f08 AP |
2193 | req = fuse_get_req(fc); |
2194 | if (IS_ERR(req)) | |
2195 | return PTR_ERR(req); | |
2196 | ||
2197 | req->in.h.opcode = FUSE_FALLOCATE; | |
2198 | req->in.h.nodeid = ff->nodeid; | |
2199 | req->in.numargs = 1; | |
2200 | req->in.args[0].size = sizeof(inarg); | |
2201 | req->in.args[0].value = &inarg; | |
2202 | fuse_request_send(fc, req); | |
2203 | err = req->out.h.error; | |
519c6040 MS |
2204 | if (err == -ENOSYS) { |
2205 | fc->no_fallocate = 1; | |
2206 | err = -EOPNOTSUPP; | |
2207 | } | |
05ba1f08 AP |
2208 | fuse_put_request(fc, req); |
2209 | ||
2210 | return err; | |
2211 | } | |
2212 | EXPORT_SYMBOL_GPL(fuse_file_fallocate); | |
2213 | ||
4b6f5d20 | 2214 | static const struct file_operations fuse_file_operations = { |
5559b8f4 | 2215 | .llseek = fuse_file_llseek, |
543ade1f | 2216 | .read = do_sync_read, |
bcb4be80 | 2217 | .aio_read = fuse_file_aio_read, |
543ade1f | 2218 | .write = do_sync_write, |
ea9b9907 | 2219 | .aio_write = fuse_file_aio_write, |
b6aeaded MS |
2220 | .mmap = fuse_file_mmap, |
2221 | .open = fuse_open, | |
2222 | .flush = fuse_flush, | |
2223 | .release = fuse_release, | |
2224 | .fsync = fuse_fsync, | |
71421259 | 2225 | .lock = fuse_file_lock, |
a9ff4f87 | 2226 | .flock = fuse_file_flock, |
5ffc4ef4 | 2227 | .splice_read = generic_file_splice_read, |
59efec7b TH |
2228 | .unlocked_ioctl = fuse_file_ioctl, |
2229 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 2230 | .poll = fuse_file_poll, |
05ba1f08 | 2231 | .fallocate = fuse_file_fallocate, |
b6aeaded MS |
2232 | }; |
2233 | ||
4b6f5d20 | 2234 | static const struct file_operations fuse_direct_io_file_operations = { |
5559b8f4 | 2235 | .llseek = fuse_file_llseek, |
413ef8cb MS |
2236 | .read = fuse_direct_read, |
2237 | .write = fuse_direct_write, | |
fc280c96 | 2238 | .mmap = fuse_direct_mmap, |
413ef8cb MS |
2239 | .open = fuse_open, |
2240 | .flush = fuse_flush, | |
2241 | .release = fuse_release, | |
2242 | .fsync = fuse_fsync, | |
71421259 | 2243 | .lock = fuse_file_lock, |
a9ff4f87 | 2244 | .flock = fuse_file_flock, |
59efec7b TH |
2245 | .unlocked_ioctl = fuse_file_ioctl, |
2246 | .compat_ioctl = fuse_file_compat_ioctl, | |
95668a69 | 2247 | .poll = fuse_file_poll, |
05ba1f08 | 2248 | .fallocate = fuse_file_fallocate, |
fc280c96 | 2249 | /* no splice_read */ |
413ef8cb MS |
2250 | }; |
2251 | ||
f5e54d6e | 2252 | static const struct address_space_operations fuse_file_aops = { |
b6aeaded | 2253 | .readpage = fuse_readpage, |
3be5a52b MS |
2254 | .writepage = fuse_writepage, |
2255 | .launder_page = fuse_launder_page, | |
db50b96c | 2256 | .readpages = fuse_readpages, |
3be5a52b | 2257 | .set_page_dirty = __set_page_dirty_nobuffers, |
b2d2272f | 2258 | .bmap = fuse_bmap, |
4273b793 | 2259 | .direct_IO = fuse_direct_IO, |
b6aeaded MS |
2260 | }; |
2261 | ||
2262 | void fuse_init_file_inode(struct inode *inode) | |
2263 | { | |
45323fb7 MS |
2264 | inode->i_fop = &fuse_file_operations; |
2265 | inode->i_data.a_ops = &fuse_file_aops; | |
b6aeaded | 2266 | } |