1 // SPDX-License-Identifier: GPL-2.0
3 * I/O and data path helper functionality.
5 * Borrowed from NFS Copyright (c) 2016 Trond Myklebust
8 #include <linux/kernel.h>
9 #include <linux/netfs.h>
13 * inode_dio_wait_interruptible - wait for outstanding DIO requests to finish
14 * @inode: inode to wait for
16 * Waits for all pending direct I/O requests to finish so that we can
17 * proceed with a truncate or equivalent operation.
19 * Must be called under a lock that serializes taking new references
20 * to i_dio_count, usually by inode->i_mutex.
22 static int netfs_inode_dio_wait_interruptible(struct inode *inode)
24 if (inode_dio_finished(inode))
27 inode_dio_wait_interruptible(inode);
28 return !inode_dio_finished(inode) ? -ERESTARTSYS : 0;
31 /* Call with exclusively locked inode->i_rwsem */
32 static int netfs_block_o_direct(struct netfs_inode *ictx)
34 if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags))
36 clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
37 return netfs_inode_dio_wait_interruptible(&ictx->inode);
41 * netfs_start_io_read - declare the file is being used for buffered reads
44 * Declare that a buffered read operation is about to start, and ensure
45 * that we block all direct I/O.
46 * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is unset,
47 * and holds a shared lock on inode->i_rwsem to ensure that the flag
49 * In practice, this means that buffered read operations are allowed to
50 * execute in parallel, thanks to the shared lock, whereas direct I/O
51 * operations need to wait to grab an exclusive lock in order to set
53 * Note that buffered writes and truncates both take a write lock on
54 * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
56 int netfs_start_io_read(struct inode *inode)
57 __acquires(inode->i_rwsem)
59 struct netfs_inode *ictx = netfs_inode(inode);
62 if (down_read_interruptible(&inode->i_rwsem) < 0)
64 if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) == 0)
66 up_read(&inode->i_rwsem);
69 if (down_write_killable(&inode->i_rwsem) < 0)
71 if (netfs_block_o_direct(ictx) < 0) {
72 up_write(&inode->i_rwsem);
75 downgrade_write(&inode->i_rwsem);
78 EXPORT_SYMBOL(netfs_start_io_read);
81 * netfs_end_io_read - declare that the buffered read operation is done
84 * Declare that a buffered read operation is done, and release the shared
85 * lock on inode->i_rwsem.
87 void netfs_end_io_read(struct inode *inode)
88 __releases(inode->i_rwsem)
90 up_read(&inode->i_rwsem);
92 EXPORT_SYMBOL(netfs_end_io_read);
95 * netfs_start_io_write - declare the file is being used for buffered writes
98 * Declare that a buffered read operation is about to start, and ensure
99 * that we block all direct I/O.
101 int netfs_start_io_write(struct inode *inode)
102 __acquires(inode->i_rwsem)
104 struct netfs_inode *ictx = netfs_inode(inode);
106 if (down_write_killable(&inode->i_rwsem) < 0)
108 if (netfs_block_o_direct(ictx) < 0) {
109 up_write(&inode->i_rwsem);
112 downgrade_write(&inode->i_rwsem);
115 EXPORT_SYMBOL(netfs_start_io_write);
118 * netfs_end_io_write - declare that the buffered write operation is done
121 * Declare that a buffered write operation is done, and release the
122 * lock on inode->i_rwsem.
124 void netfs_end_io_write(struct inode *inode)
125 __releases(inode->i_rwsem)
127 up_read(&inode->i_rwsem);
129 EXPORT_SYMBOL(netfs_end_io_write);
131 /* Call with exclusively locked inode->i_rwsem */
132 static int netfs_block_buffered(struct inode *inode)
134 struct netfs_inode *ictx = netfs_inode(inode);
137 if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) {
138 set_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
139 if (inode->i_mapping->nrpages != 0) {
140 unmap_mapping_range(inode->i_mapping, 0, 0, 0);
141 ret = filemap_fdatawait(inode->i_mapping);
143 clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
152 * netfs_start_io_direct - declare the file is being used for direct i/o
155 * Declare that a direct I/O operation is about to start, and ensure
156 * that we block all buffered I/O.
157 * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is set,
158 * and holds a shared lock on inode->i_rwsem to ensure that the flag
160 * In practice, this means that direct I/O operations are allowed to
161 * execute in parallel, thanks to the shared lock, whereas buffered I/O
162 * operations need to wait to grab an exclusive lock in order to clear
163 * NETFS_ICTX_ODIRECT.
164 * Note that buffered writes and truncates both take a write lock on
165 * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
167 int netfs_start_io_direct(struct inode *inode)
168 __acquires(inode->i_rwsem)
170 struct netfs_inode *ictx = netfs_inode(inode);
173 /* Be an optimist! */
174 if (down_read_interruptible(&inode->i_rwsem) < 0)
176 if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) != 0)
178 up_read(&inode->i_rwsem);
181 if (down_write_killable(&inode->i_rwsem) < 0)
183 ret = netfs_block_buffered(inode);
185 up_write(&inode->i_rwsem);
188 downgrade_write(&inode->i_rwsem);
191 EXPORT_SYMBOL(netfs_start_io_direct);
194 * netfs_end_io_direct - declare that the direct i/o operation is done
197 * Declare that a direct I/O operation is done, and release the shared
198 * lock on inode->i_rwsem.
200 void netfs_end_io_direct(struct inode *inode)
201 __releases(inode->i_rwsem)
203 up_read(&inode->i_rwsem);
205 EXPORT_SYMBOL(netfs_end_io_direct);