2 FUSE: Filesystem in Userspace
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
15 static const struct file_operations fuse_direct_io_file_operations;
17 static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
18 struct fuse_open_out *outargp)
20 struct fuse_conn *fc = get_fuse_conn(inode);
21 struct fuse_open_in inarg;
25 req = fuse_get_request(fc);
29 memset(&inarg, 0, sizeof(inarg));
30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
31 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
32 req->in.h.nodeid = get_node_id(inode);
35 req->in.args[0].size = sizeof(inarg);
36 req->in.args[0].value = &inarg;
38 req->out.args[0].size = sizeof(*outargp);
39 req->out.args[0].value = outargp;
40 request_send(fc, req);
41 err = req->out.h.error;
42 fuse_put_request(fc, req);
47 struct fuse_file *fuse_file_alloc(void)
50 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
52 ff->release_req = fuse_request_alloc();
53 if (!ff->release_req) {
61 void fuse_file_free(struct fuse_file *ff)
63 fuse_request_free(ff->release_req);
67 void fuse_finish_open(struct inode *inode, struct file *file,
68 struct fuse_file *ff, struct fuse_open_out *outarg)
70 if (outarg->open_flags & FOPEN_DIRECT_IO)
71 file->f_op = &fuse_direct_io_file_operations;
72 if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
73 invalidate_inode_pages(inode->i_mapping);
75 file->private_data = ff;
78 int fuse_open_common(struct inode *inode, struct file *file, int isdir)
80 struct fuse_open_out outarg;
84 /* VFS checks this, but only _after_ ->open() */
85 if (file->f_flags & O_DIRECT)
88 err = generic_file_open(inode, file);
92 /* If opening the root node, no lookup has been performed on
93 it, so the attributes must be refreshed */
94 if (get_node_id(inode) == FUSE_ROOT_ID) {
95 err = fuse_do_getattr(inode);
100 ff = fuse_file_alloc();
104 err = fuse_send_open(inode, file, isdir, &outarg);
109 outarg.open_flags &= ~FOPEN_DIRECT_IO;
110 fuse_finish_open(inode, file, ff, &outarg);
116 /* Special case for failed iget in CREATE */
117 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
119 /* If called from end_io_requests(), req has more than one
120 reference and fuse_reset_request() cannot work */
122 u64 nodeid = req->in.h.nodeid;
123 fuse_reset_request(req);
124 fuse_send_forget(fc, req, nodeid, 1);
126 fuse_put_request(fc, req);
129 void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
130 u64 nodeid, struct inode *inode, int flags, int isdir)
132 struct fuse_req * req = ff->release_req;
133 struct fuse_release_in *inarg = &req->misc.release_in;
136 inarg->flags = flags;
137 req->in.h.opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
138 req->in.h.nodeid = nodeid;
141 req->in.args[0].size = sizeof(struct fuse_release_in);
142 req->in.args[0].value = inarg;
143 request_send_background(fc, req);
145 req->end = fuse_release_end;
149 int fuse_release_common(struct inode *inode, struct file *file, int isdir)
151 struct fuse_file *ff = file->private_data;
153 struct fuse_conn *fc = get_fuse_conn(inode);
154 u64 nodeid = get_node_id(inode);
155 fuse_send_release(fc, ff, nodeid, inode, file->f_flags, isdir);
158 /* Return value is ignored by VFS */
162 static int fuse_open(struct inode *inode, struct file *file)
164 return fuse_open_common(inode, file, 0);
167 static int fuse_release(struct inode *inode, struct file *file)
169 return fuse_release_common(inode, file, 0);
172 static int fuse_flush(struct file *file)
174 struct inode *inode = file->f_dentry->d_inode;
175 struct fuse_conn *fc = get_fuse_conn(inode);
176 struct fuse_file *ff = file->private_data;
177 struct fuse_req *req;
178 struct fuse_flush_in inarg;
181 if (is_bad_inode(inode))
187 req = fuse_get_request(fc);
191 memset(&inarg, 0, sizeof(inarg));
193 req->in.h.opcode = FUSE_FLUSH;
194 req->in.h.nodeid = get_node_id(inode);
198 req->in.args[0].size = sizeof(inarg);
199 req->in.args[0].value = &inarg;
200 request_send(fc, req);
201 err = req->out.h.error;
202 fuse_put_request(fc, req);
203 if (err == -ENOSYS) {
210 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
213 struct inode *inode = de->d_inode;
214 struct fuse_conn *fc = get_fuse_conn(inode);
215 struct fuse_file *ff = file->private_data;
216 struct fuse_req *req;
217 struct fuse_fsync_in inarg;
220 if (is_bad_inode(inode))
223 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
226 req = fuse_get_request(fc);
230 memset(&inarg, 0, sizeof(inarg));
232 inarg.fsync_flags = datasync ? 1 : 0;
233 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
234 req->in.h.nodeid = get_node_id(inode);
238 req->in.args[0].size = sizeof(inarg);
239 req->in.args[0].value = &inarg;
240 request_send(fc, req);
241 err = req->out.h.error;
242 fuse_put_request(fc, req);
243 if (err == -ENOSYS) {
253 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
255 return fuse_fsync_common(file, de, datasync, 0);
258 void fuse_read_fill(struct fuse_req *req, struct file *file,
259 struct inode *inode, loff_t pos, size_t count, int opcode)
261 struct fuse_file *ff = file->private_data;
262 struct fuse_read_in *inarg = &req->misc.read_in;
267 req->in.h.opcode = opcode;
268 req->in.h.nodeid = get_node_id(inode);
272 req->in.args[0].size = sizeof(struct fuse_read_in);
273 req->in.args[0].value = inarg;
274 req->out.argpages = 1;
276 req->out.numargs = 1;
277 req->out.args[0].size = count;
280 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
281 struct inode *inode, loff_t pos, size_t count)
283 struct fuse_conn *fc = get_fuse_conn(inode);
284 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
285 request_send(fc, req);
286 return req->out.args[0].size;
289 static int fuse_readpage(struct file *file, struct page *page)
291 struct inode *inode = page->mapping->host;
292 struct fuse_conn *fc = get_fuse_conn(inode);
293 struct fuse_req *req;
297 if (is_bad_inode(inode))
301 req = fuse_get_request(fc);
305 req->out.page_zeroing = 1;
307 req->pages[0] = page;
308 fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE);
309 err = req->out.h.error;
310 fuse_put_request(fc, req);
312 SetPageUptodate(page);
313 fuse_invalidate_attr(inode); /* atime changed */
319 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
323 fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */
325 for (i = 0; i < req->num_pages; i++) {
326 struct page *page = req->pages[i];
327 if (!req->out.h.error)
328 SetPageUptodate(page);
333 fuse_put_request(fc, req);
336 static void fuse_send_readpages(struct fuse_req *req, struct file *file,
339 struct fuse_conn *fc = get_fuse_conn(inode);
340 loff_t pos = page_offset(req->pages[0]);
341 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
342 req->out.page_zeroing = 1;
343 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
344 if (fc->async_read) {
345 req->end = fuse_readpages_end;
346 request_send_background(fc, req);
348 request_send(fc, req);
349 fuse_readpages_end(fc, req);
353 struct fuse_readpages_data {
354 struct fuse_req *req;
359 static int fuse_readpages_fill(void *_data, struct page *page)
361 struct fuse_readpages_data *data = _data;
362 struct fuse_req *req = data->req;
363 struct inode *inode = data->inode;
364 struct fuse_conn *fc = get_fuse_conn(inode);
366 if (req->num_pages &&
367 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
368 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
369 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
370 fuse_send_readpages(req, data->file, inode);
371 data->req = req = fuse_get_request(fc);
377 req->pages[req->num_pages] = page;
382 static int fuse_readpages(struct file *file, struct address_space *mapping,
383 struct list_head *pages, unsigned nr_pages)
385 struct inode *inode = mapping->host;
386 struct fuse_conn *fc = get_fuse_conn(inode);
387 struct fuse_readpages_data data;
390 if (is_bad_inode(inode))
395 data.req = fuse_get_request(fc);
399 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
401 fuse_send_readpages(data.req, file, inode);
405 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
406 struct inode *inode, loff_t pos, size_t count)
408 struct fuse_conn *fc = get_fuse_conn(inode);
409 struct fuse_file *ff = file->private_data;
410 struct fuse_write_in inarg;
411 struct fuse_write_out outarg;
413 memset(&inarg, 0, sizeof(struct fuse_write_in));
417 req->in.h.opcode = FUSE_WRITE;
418 req->in.h.nodeid = get_node_id(inode);
421 req->in.argpages = 1;
423 req->in.args[0].size = sizeof(struct fuse_write_in);
424 req->in.args[0].value = &inarg;
425 req->in.args[1].size = count;
426 req->out.numargs = 1;
427 req->out.args[0].size = sizeof(struct fuse_write_out);
428 req->out.args[0].value = &outarg;
429 request_send(fc, req);
433 static int fuse_prepare_write(struct file *file, struct page *page,
434 unsigned offset, unsigned to)
440 static int fuse_commit_write(struct file *file, struct page *page,
441 unsigned offset, unsigned to)
445 unsigned count = to - offset;
446 struct inode *inode = page->mapping->host;
447 struct fuse_conn *fc = get_fuse_conn(inode);
448 loff_t pos = page_offset(page) + offset;
449 struct fuse_req *req;
451 if (is_bad_inode(inode))
454 req = fuse_get_request(fc);
459 req->pages[0] = page;
460 req->page_offset = offset;
461 nres = fuse_send_write(req, file, inode, pos, count);
462 err = req->out.h.error;
463 fuse_put_request(fc, req);
464 if (!err && nres != count)
468 if (pos > i_size_read(inode))
469 i_size_write(inode, pos);
471 if (offset == 0 && to == PAGE_CACHE_SIZE) {
472 clear_page_dirty(page);
473 SetPageUptodate(page);
476 fuse_invalidate_attr(inode);
480 static void fuse_release_user_pages(struct fuse_req *req, int write)
484 for (i = 0; i < req->num_pages; i++) {
485 struct page *page = req->pages[i];
487 set_page_dirty_lock(page);
492 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
493 unsigned nbytes, int write)
495 unsigned long user_addr = (unsigned long) buf;
496 unsigned offset = user_addr & ~PAGE_MASK;
499 /* This doesn't work with nfsd */
503 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
504 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
505 npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
506 down_read(¤t->mm->mmap_sem);
507 npages = get_user_pages(current, current->mm, user_addr, npages, write,
508 0, req->pages, NULL);
509 up_read(¤t->mm->mmap_sem);
513 req->num_pages = npages;
514 req->page_offset = offset;
518 static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
519 size_t count, loff_t *ppos, int write)
521 struct inode *inode = file->f_dentry->d_inode;
522 struct fuse_conn *fc = get_fuse_conn(inode);
523 size_t nmax = write ? fc->max_write : fc->max_read;
526 struct fuse_req *req;
528 if (is_bad_inode(inode))
531 req = fuse_get_request(fc);
537 size_t nbytes = min(count, nmax);
538 int err = fuse_get_user_pages(req, buf, nbytes, !write);
543 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
544 nbytes = min(count, nbytes);
546 nres = fuse_send_write(req, file, inode, pos, nbytes);
548 nres = fuse_send_read(req, file, inode, pos, nbytes);
549 fuse_release_user_pages(req, !write);
550 if (req->out.h.error) {
552 res = req->out.h.error;
554 } else if (nres > nbytes) {
565 fuse_reset_request(req);
567 fuse_put_request(fc, req);
569 if (write && pos > i_size_read(inode))
570 i_size_write(inode, pos);
573 fuse_invalidate_attr(inode);
578 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
579 size_t count, loff_t *ppos)
581 return fuse_direct_io(file, buf, count, ppos, 0);
584 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
585 size_t count, loff_t *ppos)
587 struct inode *inode = file->f_dentry->d_inode;
589 /* Don't allow parallel writes to the same file */
590 mutex_lock(&inode->i_mutex);
591 res = fuse_direct_io(file, buf, count, ppos, 1);
592 mutex_unlock(&inode->i_mutex);
596 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
598 if ((vma->vm_flags & VM_SHARED)) {
599 if ((vma->vm_flags & VM_WRITE))
602 vma->vm_flags &= ~VM_MAYWRITE;
604 return generic_file_mmap(file, vma);
607 static int fuse_set_page_dirty(struct page *page)
609 printk("fuse_set_page_dirty: should not happen\n");
614 static const struct file_operations fuse_file_operations = {
615 .llseek = generic_file_llseek,
616 .read = generic_file_read,
617 .write = generic_file_write,
618 .mmap = fuse_file_mmap,
621 .release = fuse_release,
623 .sendfile = generic_file_sendfile,
626 static const struct file_operations fuse_direct_io_file_operations = {
627 .llseek = generic_file_llseek,
628 .read = fuse_direct_read,
629 .write = fuse_direct_write,
632 .release = fuse_release,
634 /* no mmap and sendfile */
637 static struct address_space_operations fuse_file_aops = {
638 .readpage = fuse_readpage,
639 .prepare_write = fuse_prepare_write,
640 .commit_write = fuse_commit_write,
641 .readpages = fuse_readpages,
642 .set_page_dirty = fuse_set_page_dirty,
645 void fuse_init_file_inode(struct inode *inode)
647 inode->i_fop = &fuse_file_operations;
648 inode->i_data.a_ops = &fuse_file_aops;