1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2024 CTERA Networks.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/file.h>
16 * Return true if need to wait for new opens in caching mode.
18 static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
20 return READ_ONCE(fi->iocachectr) < 0 && !fuse_inode_backing(fi);
24 * Start cached io mode.
26 * Blocks new parallel dio writes and waits for the in-progress parallel dio
29 int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
31 struct fuse_inode *fi = get_fuse_inode(inode);
33 /* There are no io modes if server does not implement open */
39 * Setting the bit advises new direct-io writes to use an exclusive
40 * lock - without it the wait below might be forever.
42 while (fuse_is_io_cache_wait(fi)) {
43 set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
44 spin_unlock(&fi->lock);
45 wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi));
50 * Check if inode entered passthrough io mode while waiting for parallel
51 * dio write completion.
53 if (fuse_inode_backing(fi)) {
54 clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
55 spin_unlock(&fi->lock);
59 WARN_ON(ff->iomode == IOM_UNCACHED);
60 if (ff->iomode == IOM_NONE) {
61 ff->iomode = IOM_CACHED;
62 if (fi->iocachectr == 0)
63 set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
66 spin_unlock(&fi->lock);
70 static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
72 struct fuse_inode *fi = get_fuse_inode(inode);
75 WARN_ON(fi->iocachectr <= 0);
76 WARN_ON(ff->iomode != IOM_CACHED);
77 ff->iomode = IOM_NONE;
79 if (fi->iocachectr == 0)
80 clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
81 spin_unlock(&fi->lock);
84 /* Start strictly uncached io mode where cache access is not allowed */
85 int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb)
87 struct fuse_inode *fi = get_fuse_inode(inode);
88 struct fuse_backing *oldfb;
92 /* deny conflicting backing files on same fuse inode */
93 oldfb = fuse_inode_backing(fi);
94 if (oldfb && oldfb != fb) {
98 if (fi->iocachectr > 0) {
102 WARN_ON(ff->iomode != IOM_NONE);
104 ff->iomode = IOM_UNCACHED;
106 /* fuse inode holds a single refcount of backing file */
108 oldfb = fuse_inode_backing_set(fi, fb);
109 WARN_ON_ONCE(oldfb != NULL);
111 fuse_backing_put(fb);
114 spin_unlock(&fi->lock);
118 void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
120 struct fuse_inode *fi = get_fuse_inode(inode);
121 struct fuse_backing *oldfb = NULL;
123 spin_lock(&fi->lock);
124 WARN_ON(fi->iocachectr >= 0);
125 WARN_ON(ff->iomode != IOM_UNCACHED);
126 ff->iomode = IOM_NONE;
128 if (!fi->iocachectr) {
129 wake_up(&fi->direct_io_waitq);
130 oldfb = fuse_inode_backing_set(fi, NULL);
132 spin_unlock(&fi->lock);
134 fuse_backing_put(oldfb);
138 * Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
139 * A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
140 * operations go directly to the server, but mmap is done on the backing file.
141 * FOPEN_PASSTHROUGH mode should not co-exist with any users of the fuse inode
142 * page cache, so FOPEN_KEEP_CACHE is a strange and undesired combination.
144 #define FOPEN_PASSTHROUGH_MASK \
145 (FOPEN_PASSTHROUGH | FOPEN_DIRECT_IO | FOPEN_PARALLEL_DIRECT_WRITES | \
148 static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
150 struct fuse_file *ff = file->private_data;
151 struct fuse_conn *fc = get_fuse_conn(inode);
152 struct fuse_backing *fb;
155 /* Check allowed conditions for file open in passthrough mode */
156 if (!IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) || !fc->passthrough ||
157 (ff->open_flags & ~FOPEN_PASSTHROUGH_MASK))
160 fb = fuse_passthrough_open(file, inode,
161 ff->args->open_outarg.backing_id);
165 /* First passthrough file open denies caching inode io mode */
166 err = fuse_file_uncached_io_start(inode, ff, fb);
170 fuse_passthrough_release(ff, fb);
171 fuse_backing_put(fb);
176 /* Request access to submit new io to inode via open file */
177 int fuse_file_io_open(struct file *file, struct inode *inode)
179 struct fuse_file *ff = file->private_data;
180 struct fuse_inode *fi = get_fuse_inode(inode);
184 * io modes are not relevant with DAX and with server that does not
187 if (FUSE_IS_DAX(inode) || !ff->args)
191 * Server is expected to use FOPEN_PASSTHROUGH for all opens of an inode
192 * which is already open for passthrough.
195 if (fuse_inode_backing(fi) && !(ff->open_flags & FOPEN_PASSTHROUGH))
199 * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO.
201 if (!(ff->open_flags & FOPEN_DIRECT_IO))
202 ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES;
205 * First passthrough file open denies caching inode io mode.
206 * First caching file open enters caching inode io mode.
208 * Note that if user opens a file open with O_DIRECT, but server did
209 * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT,
210 * so we put the inode in caching mode to prevent parallel dio.
212 if ((ff->open_flags & FOPEN_DIRECT_IO) &&
213 !(ff->open_flags & FOPEN_PASSTHROUGH))
216 if (ff->open_flags & FOPEN_PASSTHROUGH)
217 err = fuse_file_passthrough_open(inode, file);
219 err = fuse_file_cached_io_start(inode, ff);
226 pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n",
227 ff->open_flags, err);
229 * The file open mode determines the inode io mode.
230 * Using incorrect open mode is a server mistake, which results in
231 * user visible failure of open() with EIO error.
236 /* No more pending io and no new io possible to inode via open/mmapped file */
237 void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
240 * Last parallel dio close allows caching inode io mode.
241 * Last caching file close exits caching inode io mode.
243 switch (ff->iomode) {
248 fuse_file_uncached_io_end(inode, ff);
251 fuse_file_cached_io_end(inode, ff);