]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
a844f451 | 20 | #include "xfs_bit.h" |
1da177e4 | 21 | #include "xfs_log.h" |
a844f451 | 22 | #include "xfs_inum.h" |
1da177e4 | 23 | #include "xfs_sb.h" |
a844f451 | 24 | #include "xfs_ag.h" |
1da177e4 | 25 | #include "xfs_trans.h" |
1da177e4 LT |
26 | #include "xfs_mount.h" |
27 | #include "xfs_bmap_btree.h" | |
1da177e4 | 28 | #include "xfs_alloc.h" |
1da177e4 LT |
29 | #include "xfs_dinode.h" |
30 | #include "xfs_inode.h" | |
fd3200be | 31 | #include "xfs_inode_item.h" |
dda35b8f | 32 | #include "xfs_bmap.h" |
1da177e4 | 33 | #include "xfs_error.h" |
739bfb2a | 34 | #include "xfs_vnodeops.h" |
f999a5bf | 35 | #include "xfs_da_btree.h" |
ddcd856d | 36 | #include "xfs_ioctl.h" |
dda35b8f | 37 | #include "xfs_trace.h" |
1da177e4 LT |
38 | |
39 | #include <linux/dcache.h> | |
2fe17c10 | 40 | #include <linux/falloc.h> |
1da177e4 | 41 | |
f0f37e2f | 42 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 43 | |
487f84f3 DC |
44 | /* |
45 | * Locking primitives for read and write IO paths to ensure we consistently use | |
46 | * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | |
47 | */ | |
48 | static inline void | |
49 | xfs_rw_ilock( | |
50 | struct xfs_inode *ip, | |
51 | int type) | |
52 | { | |
53 | if (type & XFS_IOLOCK_EXCL) | |
54 | mutex_lock(&VFS_I(ip)->i_mutex); | |
55 | xfs_ilock(ip, type); | |
56 | } | |
57 | ||
58 | static inline void | |
59 | xfs_rw_iunlock( | |
60 | struct xfs_inode *ip, | |
61 | int type) | |
62 | { | |
63 | xfs_iunlock(ip, type); | |
64 | if (type & XFS_IOLOCK_EXCL) | |
65 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
66 | } | |
67 | ||
68 | static inline void | |
69 | xfs_rw_ilock_demote( | |
70 | struct xfs_inode *ip, | |
71 | int type) | |
72 | { | |
73 | xfs_ilock_demote(ip, type); | |
74 | if (type & XFS_IOLOCK_EXCL) | |
75 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
76 | } | |
77 | ||
dda35b8f CH |
78 | /* |
79 | * xfs_iozero | |
80 | * | |
81 | * xfs_iozero clears the specified range of buffer supplied, | |
82 | * and marks all the affected blocks as valid and modified. If | |
83 | * an affected block is not allocated, it will be allocated. If | |
84 | * an affected block is not completely overwritten, and is not | |
85 | * valid before the operation, it will be read from disk before | |
86 | * being partially zeroed. | |
87 | */ | |
88 | STATIC int | |
89 | xfs_iozero( | |
90 | struct xfs_inode *ip, /* inode */ | |
91 | loff_t pos, /* offset in file */ | |
92 | size_t count) /* size of data to zero */ | |
93 | { | |
94 | struct page *page; | |
95 | struct address_space *mapping; | |
96 | int status; | |
97 | ||
98 | mapping = VFS_I(ip)->i_mapping; | |
99 | do { | |
100 | unsigned offset, bytes; | |
101 | void *fsdata; | |
102 | ||
103 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
104 | bytes = PAGE_CACHE_SIZE - offset; | |
105 | if (bytes > count) | |
106 | bytes = count; | |
107 | ||
108 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
109 | AOP_FLAG_UNINTERRUPTIBLE, | |
110 | &page, &fsdata); | |
111 | if (status) | |
112 | break; | |
113 | ||
114 | zero_user(page, offset, bytes); | |
115 | ||
116 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
117 | page, fsdata); | |
118 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
119 | pos += bytes; | |
120 | count -= bytes; | |
121 | status = 0; | |
122 | } while (count); | |
123 | ||
124 | return (-status); | |
125 | } | |
126 | ||
1da2f2db CH |
127 | /* |
128 | * Fsync operations on directories are much simpler than on regular files, | |
129 | * as there is no file data to flush, and thus also no need for explicit | |
130 | * cache flush operations, and there are no non-transaction metadata updates | |
131 | * on directories either. | |
132 | */ | |
133 | STATIC int | |
134 | xfs_dir_fsync( | |
135 | struct file *file, | |
136 | loff_t start, | |
137 | loff_t end, | |
138 | int datasync) | |
139 | { | |
140 | struct xfs_inode *ip = XFS_I(file->f_mapping->host); | |
141 | struct xfs_mount *mp = ip->i_mount; | |
142 | xfs_lsn_t lsn = 0; | |
143 | ||
144 | trace_xfs_dir_fsync(ip); | |
145 | ||
146 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
147 | if (xfs_ipincount(ip)) | |
148 | lsn = ip->i_itemp->ili_last_lsn; | |
149 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
150 | ||
151 | if (!lsn) | |
152 | return 0; | |
153 | return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); | |
154 | } | |
155 | ||
fd3200be CH |
156 | STATIC int |
157 | xfs_file_fsync( | |
158 | struct file *file, | |
02c24a82 JB |
159 | loff_t start, |
160 | loff_t end, | |
fd3200be CH |
161 | int datasync) |
162 | { | |
7ea80859 CH |
163 | struct inode *inode = file->f_mapping->host; |
164 | struct xfs_inode *ip = XFS_I(inode); | |
a27a263b | 165 | struct xfs_mount *mp = ip->i_mount; |
fd3200be CH |
166 | struct xfs_trans *tp; |
167 | int error = 0; | |
168 | int log_flushed = 0; | |
b1037058 | 169 | xfs_lsn_t lsn = 0; |
fd3200be | 170 | |
cca28fb8 | 171 | trace_xfs_file_fsync(ip); |
fd3200be | 172 | |
02c24a82 JB |
173 | error = filemap_write_and_wait_range(inode->i_mapping, start, end); |
174 | if (error) | |
175 | return error; | |
176 | ||
a27a263b | 177 | if (XFS_FORCED_SHUTDOWN(mp)) |
fd3200be CH |
178 | return -XFS_ERROR(EIO); |
179 | ||
180 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
181 | ||
a27a263b CH |
182 | if (mp->m_flags & XFS_MOUNT_BARRIER) { |
183 | /* | |
184 | * If we have an RT and/or log subvolume we need to make sure | |
185 | * to flush the write cache the device used for file data | |
186 | * first. This is to ensure newly written file data make | |
187 | * it to disk before logging the new inode size in case of | |
188 | * an extending write. | |
189 | */ | |
190 | if (XFS_IS_REALTIME_INODE(ip)) | |
191 | xfs_blkdev_issue_flush(mp->m_rtdev_targp); | |
192 | else if (mp->m_logdev_targp != mp->m_ddev_targp) | |
193 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
194 | } | |
195 | ||
fd3200be CH |
196 | /* |
197 | * We always need to make sure that the required inode state is safe on | |
198 | * disk. The inode might be clean but we still might need to force the | |
199 | * log because of committed transactions that haven't hit the disk yet. | |
200 | * Likewise, there could be unflushed non-transactional changes to the | |
201 | * inode core that have to go to disk and this requires us to issue | |
202 | * a synchronous transaction to capture these changes correctly. | |
203 | * | |
204 | * This code relies on the assumption that if the i_update_core field | |
205 | * of the inode is clear and the inode is unpinned then it is clean | |
206 | * and no action is required. | |
207 | */ | |
208 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
209 | ||
66d834ea CH |
210 | /* |
211 | * First check if the VFS inode is marked dirty. All the dirtying | |
212 | * of non-transactional updates no goes through mark_inode_dirty*, | |
213 | * which allows us to distinguish beteeen pure timestamp updates | |
214 | * and i_size updates which need to be caught for fdatasync. | |
215 | * After that also theck for the dirty state in the XFS inode, which | |
216 | * might gets cleared when the inode gets written out via the AIL | |
217 | * or xfs_iflush_cluster. | |
218 | */ | |
7ea80859 CH |
219 | if (((inode->i_state & I_DIRTY_DATASYNC) || |
220 | ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && | |
66d834ea | 221 | ip->i_update_core) { |
fd3200be CH |
222 | /* |
223 | * Kick off a transaction to log the inode core to get the | |
224 | * updates. The sync transaction will also force the log. | |
225 | */ | |
226 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
a27a263b | 227 | tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); |
fd3200be | 228 | error = xfs_trans_reserve(tp, 0, |
a27a263b | 229 | XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); |
fd3200be CH |
230 | if (error) { |
231 | xfs_trans_cancel(tp, 0); | |
232 | return -error; | |
233 | } | |
234 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
235 | ||
236 | /* | |
237 | * Note - it's possible that we might have pushed ourselves out | |
238 | * of the way during trans_reserve which would flush the inode. | |
239 | * But there's no guarantee that the inode buffer has actually | |
240 | * gone out yet (it's delwri). Plus the buffer could be pinned | |
241 | * anyway if it's part of an inode in another recent | |
242 | * transaction. So we play it safe and fire off the | |
243 | * transaction anyway. | |
244 | */ | |
ddc3415a | 245 | xfs_trans_ijoin(tp, ip, 0); |
fd3200be | 246 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
b1037058 | 247 | error = xfs_trans_commit(tp, 0); |
fd3200be | 248 | |
b1037058 | 249 | lsn = ip->i_itemp->ili_last_lsn; |
fd3200be CH |
250 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
251 | } else { | |
252 | /* | |
253 | * Timestamps/size haven't changed since last inode flush or | |
254 | * inode transaction commit. That means either nothing got | |
255 | * written or a transaction committed which caught the updates. | |
256 | * If the latter happened and the transaction hasn't hit the | |
257 | * disk yet, the inode will be still be pinned. If it is, | |
258 | * force the log. | |
259 | */ | |
b1037058 CH |
260 | if (xfs_ipincount(ip)) |
261 | lsn = ip->i_itemp->ili_last_lsn; | |
024910cb | 262 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
fd3200be CH |
263 | } |
264 | ||
b1037058 CH |
265 | if (!error && lsn) |
266 | error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); | |
267 | ||
a27a263b CH |
268 | /* |
269 | * If we only have a single device, and the log force about was | |
270 | * a no-op we might have to flush the data device cache here. | |
271 | * This can only happen for fdatasync/O_DSYNC if we were overwriting | |
272 | * an already allocated file and thus do not have any metadata to | |
273 | * commit. | |
274 | */ | |
275 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && | |
276 | mp->m_logdev_targp == mp->m_ddev_targp && | |
277 | !XFS_IS_REALTIME_INODE(ip) && | |
278 | !log_flushed) | |
279 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
fd3200be CH |
280 | |
281 | return -error; | |
282 | } | |
283 | ||
00258e36 CH |
284 | STATIC ssize_t |
285 | xfs_file_aio_read( | |
dda35b8f CH |
286 | struct kiocb *iocb, |
287 | const struct iovec *iovp, | |
00258e36 CH |
288 | unsigned long nr_segs, |
289 | loff_t pos) | |
dda35b8f CH |
290 | { |
291 | struct file *file = iocb->ki_filp; | |
292 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
293 | struct xfs_inode *ip = XFS_I(inode); |
294 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
295 | size_t size = 0; |
296 | ssize_t ret = 0; | |
00258e36 | 297 | int ioflags = 0; |
dda35b8f CH |
298 | xfs_fsize_t n; |
299 | unsigned long seg; | |
300 | ||
dda35b8f CH |
301 | XFS_STATS_INC(xs_read_calls); |
302 | ||
00258e36 CH |
303 | BUG_ON(iocb->ki_pos != pos); |
304 | ||
305 | if (unlikely(file->f_flags & O_DIRECT)) | |
306 | ioflags |= IO_ISDIRECT; | |
307 | if (file->f_mode & FMODE_NOCMTIME) | |
308 | ioflags |= IO_INVIS; | |
309 | ||
dda35b8f | 310 | /* START copy & waste from filemap.c */ |
00258e36 | 311 | for (seg = 0; seg < nr_segs; seg++) { |
dda35b8f CH |
312 | const struct iovec *iv = &iovp[seg]; |
313 | ||
314 | /* | |
315 | * If any segment has a negative length, or the cumulative | |
316 | * length ever wraps negative then return -EINVAL. | |
317 | */ | |
318 | size += iv->iov_len; | |
319 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
320 | return XFS_ERROR(-EINVAL); | |
321 | } | |
322 | /* END copy & waste from filemap.c */ | |
323 | ||
324 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
325 | xfs_buftarg_t *target = | |
326 | XFS_IS_REALTIME_INODE(ip) ? | |
327 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
00258e36 | 328 | if ((iocb->ki_pos & target->bt_smask) || |
dda35b8f | 329 | (size & target->bt_smask)) { |
00258e36 CH |
330 | if (iocb->ki_pos == ip->i_size) |
331 | return 0; | |
dda35b8f CH |
332 | return -XFS_ERROR(EINVAL); |
333 | } | |
334 | } | |
335 | ||
00258e36 CH |
336 | n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; |
337 | if (n <= 0 || size == 0) | |
dda35b8f CH |
338 | return 0; |
339 | ||
340 | if (n < size) | |
341 | size = n; | |
342 | ||
343 | if (XFS_FORCED_SHUTDOWN(mp)) | |
344 | return -EIO; | |
345 | ||
0c38a251 DC |
346 | /* |
347 | * Locking is a bit tricky here. If we take an exclusive lock | |
348 | * for direct IO, we effectively serialise all new concurrent | |
349 | * read IO to this file and block it behind IO that is currently in | |
350 | * progress because IO in progress holds the IO lock shared. We only | |
351 | * need to hold the lock exclusive to blow away the page cache, so | |
352 | * only take lock exclusively if the page cache needs invalidation. | |
353 | * This allows the normal direct IO case of no page cache pages to | |
354 | * proceeed concurrently without serialisation. | |
355 | */ | |
356 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | |
357 | if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { | |
358 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | |
487f84f3 DC |
359 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
360 | ||
00258e36 CH |
361 | if (inode->i_mapping->nrpages) { |
362 | ret = -xfs_flushinval_pages(ip, | |
363 | (iocb->ki_pos & PAGE_CACHE_MASK), | |
364 | -1, FI_REMAPF_LOCKED); | |
487f84f3 DC |
365 | if (ret) { |
366 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | |
367 | return ret; | |
368 | } | |
00258e36 | 369 | } |
487f84f3 | 370 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
0c38a251 | 371 | } |
dda35b8f | 372 | |
00258e36 | 373 | trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); |
dda35b8f | 374 | |
00258e36 | 375 | ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); |
dda35b8f CH |
376 | if (ret > 0) |
377 | XFS_STATS_ADD(xs_read_bytes, ret); | |
378 | ||
487f84f3 | 379 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
380 | return ret; |
381 | } | |
382 | ||
00258e36 CH |
383 | STATIC ssize_t |
384 | xfs_file_splice_read( | |
dda35b8f CH |
385 | struct file *infilp, |
386 | loff_t *ppos, | |
387 | struct pipe_inode_info *pipe, | |
388 | size_t count, | |
00258e36 | 389 | unsigned int flags) |
dda35b8f | 390 | { |
00258e36 | 391 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
00258e36 | 392 | int ioflags = 0; |
dda35b8f CH |
393 | ssize_t ret; |
394 | ||
395 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
396 | |
397 | if (infilp->f_mode & FMODE_NOCMTIME) | |
398 | ioflags |= IO_INVIS; | |
399 | ||
dda35b8f CH |
400 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
401 | return -EIO; | |
402 | ||
487f84f3 | 403 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
dda35b8f | 404 | |
dda35b8f CH |
405 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
406 | ||
407 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
408 | if (ret > 0) | |
409 | XFS_STATS_ADD(xs_read_bytes, ret); | |
410 | ||
487f84f3 | 411 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
412 | return ret; |
413 | } | |
414 | ||
edafb6da DC |
415 | STATIC void |
416 | xfs_aio_write_isize_update( | |
417 | struct inode *inode, | |
418 | loff_t *ppos, | |
419 | ssize_t bytes_written) | |
420 | { | |
421 | struct xfs_inode *ip = XFS_I(inode); | |
422 | xfs_fsize_t isize = i_size_read(inode); | |
423 | ||
424 | if (bytes_written > 0) | |
425 | XFS_STATS_ADD(xs_write_bytes, bytes_written); | |
426 | ||
427 | if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && | |
428 | *ppos > isize)) | |
429 | *ppos = isize; | |
430 | ||
431 | if (*ppos > ip->i_size) { | |
487f84f3 | 432 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
edafb6da DC |
433 | if (*ppos > ip->i_size) |
434 | ip->i_size = *ppos; | |
487f84f3 | 435 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
edafb6da DC |
436 | } |
437 | } | |
438 | ||
4c5cfd1b DC |
439 | /* |
440 | * If this was a direct or synchronous I/O that failed (such as ENOSPC) then | |
25985edc | 441 | * part of the I/O may have been written to disk before the error occurred. In |
4c5cfd1b DC |
442 | * this case the on-disk file size may have been adjusted beyond the in-memory |
443 | * file size and now needs to be truncated back. | |
444 | */ | |
445 | STATIC void | |
446 | xfs_aio_write_newsize_update( | |
7271d243 DC |
447 | struct xfs_inode *ip, |
448 | xfs_fsize_t new_size) | |
4c5cfd1b | 449 | { |
7271d243 | 450 | if (new_size == ip->i_new_size) { |
487f84f3 | 451 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
7271d243 DC |
452 | if (new_size == ip->i_new_size) |
453 | ip->i_new_size = 0; | |
4c5cfd1b DC |
454 | if (ip->i_d.di_size > ip->i_size) |
455 | ip->i_d.di_size = ip->i_size; | |
487f84f3 | 456 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
4c5cfd1b DC |
457 | } |
458 | } | |
459 | ||
487f84f3 DC |
460 | /* |
461 | * xfs_file_splice_write() does not use xfs_rw_ilock() because | |
462 | * generic_file_splice_write() takes the i_mutex itself. This, in theory, | |
463 | * couuld cause lock inversions between the aio_write path and the splice path | |
464 | * if someone is doing concurrent splice(2) based writes and write(2) based | |
465 | * writes to the same inode. The only real way to fix this is to re-implement | |
466 | * the generic code here with correct locking orders. | |
467 | */ | |
00258e36 CH |
468 | STATIC ssize_t |
469 | xfs_file_splice_write( | |
dda35b8f CH |
470 | struct pipe_inode_info *pipe, |
471 | struct file *outfilp, | |
472 | loff_t *ppos, | |
473 | size_t count, | |
00258e36 | 474 | unsigned int flags) |
dda35b8f | 475 | { |
dda35b8f | 476 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 | 477 | struct xfs_inode *ip = XFS_I(inode); |
edafb6da | 478 | xfs_fsize_t new_size; |
00258e36 CH |
479 | int ioflags = 0; |
480 | ssize_t ret; | |
dda35b8f CH |
481 | |
482 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
483 | |
484 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
485 | ioflags |= IO_INVIS; | |
486 | ||
dda35b8f CH |
487 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
488 | return -EIO; | |
489 | ||
490 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
491 | ||
dda35b8f CH |
492 | new_size = *ppos + count; |
493 | ||
494 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
495 | if (new_size > ip->i_size) | |
496 | ip->i_new_size = new_size; | |
497 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
498 | ||
499 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); | |
500 | ||
501 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
dda35b8f | 502 | |
edafb6da | 503 | xfs_aio_write_isize_update(inode, ppos, ret); |
7271d243 | 504 | xfs_aio_write_newsize_update(ip, new_size); |
dda35b8f CH |
505 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
506 | return ret; | |
507 | } | |
508 | ||
509 | /* | |
510 | * This routine is called to handle zeroing any space in the last | |
511 | * block of the file that is beyond the EOF. We do this since the | |
512 | * size is being increased without writing anything to that block | |
513 | * and we don't want anyone to read the garbage on the disk. | |
514 | */ | |
515 | STATIC int /* error (positive) */ | |
516 | xfs_zero_last_block( | |
517 | xfs_inode_t *ip, | |
518 | xfs_fsize_t offset, | |
519 | xfs_fsize_t isize) | |
520 | { | |
521 | xfs_fileoff_t last_fsb; | |
522 | xfs_mount_t *mp = ip->i_mount; | |
523 | int nimaps; | |
524 | int zero_offset; | |
525 | int zero_len; | |
526 | int error = 0; | |
527 | xfs_bmbt_irec_t imap; | |
528 | ||
529 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
530 | ||
531 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
532 | if (zero_offset == 0) { | |
533 | /* | |
534 | * There are no extra bytes in the last block on disk to | |
535 | * zero, so return. | |
536 | */ | |
537 | return 0; | |
538 | } | |
539 | ||
540 | last_fsb = XFS_B_TO_FSBT(mp, isize); | |
541 | nimaps = 1; | |
5c8ed202 DC |
542 | error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); |
543 | if (error) | |
dda35b8f | 544 | return error; |
dda35b8f CH |
545 | ASSERT(nimaps > 0); |
546 | /* | |
547 | * If the block underlying isize is just a hole, then there | |
548 | * is nothing to zero. | |
549 | */ | |
550 | if (imap.br_startblock == HOLESTARTBLOCK) { | |
551 | return 0; | |
552 | } | |
553 | /* | |
554 | * Zero the part of the last block beyond the EOF, and write it | |
555 | * out sync. We need to drop the ilock while we do this so we | |
556 | * don't deadlock when the buffer cache calls back to us. | |
557 | */ | |
558 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
559 | ||
560 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
561 | if (isize + zero_len > offset) | |
562 | zero_len = offset - isize; | |
563 | error = xfs_iozero(ip, isize, zero_len); | |
564 | ||
565 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
566 | ASSERT(error >= 0); | |
567 | return error; | |
568 | } | |
569 | ||
570 | /* | |
571 | * Zero any on disk space between the current EOF and the new, | |
572 | * larger EOF. This handles the normal case of zeroing the remainder | |
573 | * of the last block in the file and the unusual case of zeroing blocks | |
574 | * out beyond the size of the file. This second case only happens | |
575 | * with fixed size extents and when the system crashes before the inode | |
576 | * size was updated but after blocks were allocated. If fill is set, | |
577 | * then any holes in the range are filled and zeroed. If not, the holes | |
578 | * are left alone as holes. | |
579 | */ | |
580 | ||
581 | int /* error (positive) */ | |
582 | xfs_zero_eof( | |
583 | xfs_inode_t *ip, | |
584 | xfs_off_t offset, /* starting I/O offset */ | |
585 | xfs_fsize_t isize) /* current inode size */ | |
586 | { | |
587 | xfs_mount_t *mp = ip->i_mount; | |
588 | xfs_fileoff_t start_zero_fsb; | |
589 | xfs_fileoff_t end_zero_fsb; | |
590 | xfs_fileoff_t zero_count_fsb; | |
591 | xfs_fileoff_t last_fsb; | |
592 | xfs_fileoff_t zero_off; | |
593 | xfs_fsize_t zero_len; | |
594 | int nimaps; | |
595 | int error = 0; | |
596 | xfs_bmbt_irec_t imap; | |
597 | ||
598 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
599 | ASSERT(offset > isize); | |
600 | ||
601 | /* | |
602 | * First handle zeroing the block on which isize resides. | |
603 | * We only zero a part of that block so it is handled specially. | |
604 | */ | |
605 | error = xfs_zero_last_block(ip, offset, isize); | |
606 | if (error) { | |
607 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
608 | return error; | |
609 | } | |
610 | ||
611 | /* | |
612 | * Calculate the range between the new size and the old | |
613 | * where blocks needing to be zeroed may exist. To get the | |
614 | * block where the last byte in the file currently resides, | |
615 | * we need to subtract one from the size and truncate back | |
616 | * to a block boundary. We subtract 1 in case the size is | |
617 | * exactly on a block boundary. | |
618 | */ | |
619 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
620 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
621 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
622 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
623 | if (last_fsb == end_zero_fsb) { | |
624 | /* | |
625 | * The size was only incremented on its last block. | |
626 | * We took care of that above, so just return. | |
627 | */ | |
628 | return 0; | |
629 | } | |
630 | ||
631 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
632 | while (start_zero_fsb <= end_zero_fsb) { | |
633 | nimaps = 1; | |
634 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
5c8ed202 DC |
635 | error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, |
636 | &imap, &nimaps, 0); | |
dda35b8f CH |
637 | if (error) { |
638 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
639 | return error; | |
640 | } | |
641 | ASSERT(nimaps > 0); | |
642 | ||
643 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
644 | imap.br_startblock == HOLESTARTBLOCK) { | |
645 | /* | |
646 | * This loop handles initializing pages that were | |
647 | * partially initialized by the code below this | |
648 | * loop. It basically zeroes the part of the page | |
649 | * that sits on a hole and sets the page as P_HOLE | |
650 | * and calls remapf if it is a mapped file. | |
651 | */ | |
652 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
653 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
654 | continue; | |
655 | } | |
656 | ||
657 | /* | |
658 | * There are blocks we need to zero. | |
659 | * Drop the inode lock while we're doing the I/O. | |
660 | * We'll still have the iolock to protect us. | |
661 | */ | |
662 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
663 | ||
664 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); | |
665 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
666 | ||
667 | if ((zero_off + zero_len) > offset) | |
668 | zero_len = offset - zero_off; | |
669 | ||
670 | error = xfs_iozero(ip, zero_off, zero_len); | |
671 | if (error) { | |
672 | goto out_lock; | |
673 | } | |
674 | ||
675 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
676 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
677 | ||
678 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
679 | } | |
680 | ||
681 | return 0; | |
682 | ||
683 | out_lock: | |
684 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
685 | ASSERT(error >= 0); | |
686 | return error; | |
687 | } | |
688 | ||
4d8d1581 DC |
689 | /* |
690 | * Common pre-write limit and setup checks. | |
691 | * | |
692 | * Returns with iolock held according to @iolock. | |
693 | */ | |
694 | STATIC ssize_t | |
695 | xfs_file_aio_write_checks( | |
696 | struct file *file, | |
697 | loff_t *pos, | |
698 | size_t *count, | |
7271d243 | 699 | xfs_fsize_t *new_sizep, |
4d8d1581 DC |
700 | int *iolock) |
701 | { | |
702 | struct inode *inode = file->f_mapping->host; | |
703 | struct xfs_inode *ip = XFS_I(inode); | |
704 | xfs_fsize_t new_size; | |
705 | int error = 0; | |
706 | ||
c58cb165 | 707 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
7271d243 DC |
708 | *new_sizep = 0; |
709 | restart: | |
4d8d1581 DC |
710 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); |
711 | if (error) { | |
712 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); | |
713 | *iolock = 0; | |
714 | return error; | |
715 | } | |
716 | ||
4d8d1581 DC |
717 | if (likely(!(file->f_mode & FMODE_NOCMTIME))) |
718 | file_update_time(file); | |
719 | ||
720 | /* | |
721 | * If the offset is beyond the size of the file, we need to zero any | |
722 | * blocks that fall between the existing EOF and the start of this | |
7271d243 DC |
723 | * write. There is no need to issue zeroing if another in-flght IO ends |
724 | * at or before this one If zeronig is needed and we are currently | |
725 | * holding the iolock shared, we need to update it to exclusive which | |
726 | * involves dropping all locks and relocking to maintain correct locking | |
727 | * order. If we do this, restart the function to ensure all checks and | |
728 | * values are still valid. | |
4d8d1581 | 729 | */ |
7271d243 DC |
730 | if ((ip->i_new_size && *pos > ip->i_new_size) || |
731 | (!ip->i_new_size && *pos > ip->i_size)) { | |
732 | if (*iolock == XFS_IOLOCK_SHARED) { | |
733 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); | |
734 | *iolock = XFS_IOLOCK_EXCL; | |
735 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | |
736 | goto restart; | |
737 | } | |
4d8d1581 | 738 | error = -xfs_zero_eof(ip, *pos, ip->i_size); |
7271d243 DC |
739 | } |
740 | ||
741 | /* | |
742 | * If this IO extends beyond EOF, we may need to update ip->i_new_size. | |
743 | * We have already zeroed space beyond EOF (if necessary). Only update | |
744 | * ip->i_new_size if this IO ends beyond any other in-flight writes. | |
745 | */ | |
746 | new_size = *pos + *count; | |
747 | if (new_size > ip->i_size) { | |
748 | if (new_size > ip->i_new_size) | |
749 | ip->i_new_size = new_size; | |
750 | *new_sizep = new_size; | |
751 | } | |
4d8d1581 DC |
752 | |
753 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); | |
754 | if (error) | |
755 | return error; | |
756 | ||
757 | /* | |
758 | * If we're writing the file then make sure to clear the setuid and | |
759 | * setgid bits if the process is not being run by root. This keeps | |
760 | * people from modifying setuid and setgid binaries. | |
761 | */ | |
762 | return file_remove_suid(file); | |
763 | ||
764 | } | |
765 | ||
f0d26e86 DC |
766 | /* |
767 | * xfs_file_dio_aio_write - handle direct IO writes | |
768 | * | |
769 | * Lock the inode appropriately to prepare for and issue a direct IO write. | |
eda77982 | 770 | * By separating it from the buffered write path we remove all the tricky to |
f0d26e86 DC |
771 | * follow locking changes and looping. |
772 | * | |
eda77982 DC |
773 | * If there are cached pages or we're extending the file, we need IOLOCK_EXCL |
774 | * until we're sure the bytes at the new EOF have been zeroed and/or the cached | |
775 | * pages are flushed out. | |
776 | * | |
777 | * In most cases the direct IO writes will be done holding IOLOCK_SHARED | |
778 | * allowing them to be done in parallel with reads and other direct IO writes. | |
779 | * However, if the IO is not aligned to filesystem blocks, the direct IO layer | |
780 | * needs to do sub-block zeroing and that requires serialisation against other | |
781 | * direct IOs to the same block. In this case we need to serialise the | |
782 | * submission of the unaligned IOs so that we don't get racing block zeroing in | |
783 | * the dio layer. To avoid the problem with aio, we also need to wait for | |
784 | * outstanding IOs to complete so that unwritten extent conversion is completed | |
785 | * before we try to map the overlapping block. This is currently implemented by | |
4a06fd26 | 786 | * hitting it with a big hammer (i.e. inode_dio_wait()). |
eda77982 | 787 | * |
f0d26e86 DC |
788 | * Returns with locks held indicated by @iolock and errors indicated by |
789 | * negative return values. | |
790 | */ | |
791 | STATIC ssize_t | |
792 | xfs_file_dio_aio_write( | |
793 | struct kiocb *iocb, | |
794 | const struct iovec *iovp, | |
795 | unsigned long nr_segs, | |
796 | loff_t pos, | |
797 | size_t ocount, | |
7271d243 | 798 | xfs_fsize_t *new_size, |
f0d26e86 DC |
799 | int *iolock) |
800 | { | |
801 | struct file *file = iocb->ki_filp; | |
802 | struct address_space *mapping = file->f_mapping; | |
803 | struct inode *inode = mapping->host; | |
804 | struct xfs_inode *ip = XFS_I(inode); | |
805 | struct xfs_mount *mp = ip->i_mount; | |
806 | ssize_t ret = 0; | |
f0d26e86 | 807 | size_t count = ocount; |
eda77982 | 808 | int unaligned_io = 0; |
f0d26e86 DC |
809 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
810 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
811 | ||
812 | *iolock = 0; | |
813 | if ((pos & target->bt_smask) || (count & target->bt_smask)) | |
814 | return -XFS_ERROR(EINVAL); | |
815 | ||
eda77982 DC |
816 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
817 | unaligned_io = 1; | |
818 | ||
7271d243 DC |
819 | /* |
820 | * We don't need to take an exclusive lock unless there page cache needs | |
821 | * to be invalidated or unaligned IO is being executed. We don't need to | |
822 | * consider the EOF extension case here because | |
823 | * xfs_file_aio_write_checks() will relock the inode as necessary for | |
824 | * EOF zeroing cases and fill out the new inode size as appropriate. | |
825 | */ | |
826 | if (unaligned_io || mapping->nrpages) | |
f0d26e86 DC |
827 | *iolock = XFS_IOLOCK_EXCL; |
828 | else | |
829 | *iolock = XFS_IOLOCK_SHARED; | |
c58cb165 CH |
830 | xfs_rw_ilock(ip, *iolock); |
831 | ||
832 | /* | |
833 | * Recheck if there are cached pages that need invalidate after we got | |
834 | * the iolock to protect against other threads adding new pages while | |
835 | * we were waiting for the iolock. | |
836 | */ | |
837 | if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) { | |
838 | xfs_rw_iunlock(ip, *iolock); | |
839 | *iolock = XFS_IOLOCK_EXCL; | |
840 | xfs_rw_ilock(ip, *iolock); | |
841 | } | |
f0d26e86 | 842 | |
7271d243 | 843 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); |
4d8d1581 | 844 | if (ret) |
f0d26e86 DC |
845 | return ret; |
846 | ||
847 | if (mapping->nrpages) { | |
f0d26e86 DC |
848 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, |
849 | FI_REMAPF_LOCKED); | |
850 | if (ret) | |
851 | return ret; | |
852 | } | |
853 | ||
eda77982 DC |
854 | /* |
855 | * If we are doing unaligned IO, wait for all other IO to drain, | |
856 | * otherwise demote the lock if we had to flush cached pages | |
857 | */ | |
858 | if (unaligned_io) | |
4a06fd26 | 859 | inode_dio_wait(inode); |
eda77982 | 860 | else if (*iolock == XFS_IOLOCK_EXCL) { |
f0d26e86 DC |
861 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
862 | *iolock = XFS_IOLOCK_SHARED; | |
863 | } | |
864 | ||
865 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | |
866 | ret = generic_file_direct_write(iocb, iovp, | |
867 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | |
868 | ||
869 | /* No fallback to buffered IO on errors for XFS. */ | |
870 | ASSERT(ret < 0 || ret == count); | |
871 | return ret; | |
872 | } | |
873 | ||
00258e36 | 874 | STATIC ssize_t |
637bbc75 | 875 | xfs_file_buffered_aio_write( |
dda35b8f CH |
876 | struct kiocb *iocb, |
877 | const struct iovec *iovp, | |
00258e36 | 878 | unsigned long nr_segs, |
637bbc75 DC |
879 | loff_t pos, |
880 | size_t ocount, | |
7271d243 | 881 | xfs_fsize_t *new_size, |
637bbc75 | 882 | int *iolock) |
dda35b8f CH |
883 | { |
884 | struct file *file = iocb->ki_filp; | |
885 | struct address_space *mapping = file->f_mapping; | |
886 | struct inode *inode = mapping->host; | |
00258e36 | 887 | struct xfs_inode *ip = XFS_I(inode); |
637bbc75 DC |
888 | ssize_t ret; |
889 | int enospc = 0; | |
637bbc75 | 890 | size_t count = ocount; |
dda35b8f | 891 | |
637bbc75 | 892 | *iolock = XFS_IOLOCK_EXCL; |
c58cb165 | 893 | xfs_rw_ilock(ip, *iolock); |
dda35b8f | 894 | |
7271d243 | 895 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); |
4d8d1581 | 896 | if (ret) |
637bbc75 | 897 | return ret; |
dda35b8f CH |
898 | |
899 | /* We can write back this queue in page reclaim */ | |
900 | current->backing_dev_info = mapping->backing_dev_info; | |
901 | ||
dda35b8f | 902 | write_retry: |
637bbc75 DC |
903 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); |
904 | ret = generic_file_buffered_write(iocb, iovp, nr_segs, | |
905 | pos, &iocb->ki_pos, count, ret); | |
906 | /* | |
907 | * if we just got an ENOSPC, flush the inode now we aren't holding any | |
908 | * page locks and retry *once* | |
909 | */ | |
910 | if (ret == -ENOSPC && !enospc) { | |
911 | ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); | |
912 | if (ret) | |
913 | return ret; | |
914 | enospc = 1; | |
915 | goto write_retry; | |
dda35b8f | 916 | } |
dda35b8f | 917 | current->backing_dev_info = NULL; |
637bbc75 DC |
918 | return ret; |
919 | } | |
920 | ||
921 | STATIC ssize_t | |
922 | xfs_file_aio_write( | |
923 | struct kiocb *iocb, | |
924 | const struct iovec *iovp, | |
925 | unsigned long nr_segs, | |
926 | loff_t pos) | |
927 | { | |
928 | struct file *file = iocb->ki_filp; | |
929 | struct address_space *mapping = file->f_mapping; | |
930 | struct inode *inode = mapping->host; | |
931 | struct xfs_inode *ip = XFS_I(inode); | |
932 | ssize_t ret; | |
933 | int iolock; | |
934 | size_t ocount = 0; | |
7271d243 | 935 | xfs_fsize_t new_size = 0; |
637bbc75 DC |
936 | |
937 | XFS_STATS_INC(xs_write_calls); | |
938 | ||
939 | BUG_ON(iocb->ki_pos != pos); | |
940 | ||
941 | ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
942 | if (ret) | |
943 | return ret; | |
944 | ||
945 | if (ocount == 0) | |
946 | return 0; | |
947 | ||
948 | xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); | |
949 | ||
950 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
951 | return -EIO; | |
952 | ||
953 | if (unlikely(file->f_flags & O_DIRECT)) | |
954 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, | |
7271d243 | 955 | ocount, &new_size, &iolock); |
637bbc75 DC |
956 | else |
957 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | |
7271d243 | 958 | ocount, &new_size, &iolock); |
dda35b8f | 959 | |
edafb6da | 960 | xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); |
dda35b8f | 961 | |
dda35b8f | 962 | if (ret <= 0) |
637bbc75 | 963 | goto out_unlock; |
dda35b8f | 964 | |
dda35b8f CH |
965 | /* Handle various SYNC-type writes */ |
966 | if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { | |
967 | loff_t end = pos + ret - 1; | |
340a0a01 | 968 | int error; |
dda35b8f | 969 | |
487f84f3 | 970 | xfs_rw_iunlock(ip, iolock); |
340a0a01 | 971 | error = xfs_file_fsync(file, pos, end, |
02c24a82 | 972 | (file->f_flags & __O_SYNC) ? 0 : 1); |
487f84f3 | 973 | xfs_rw_ilock(ip, iolock); |
340a0a01 MT |
974 | if (error) |
975 | ret = error; | |
dda35b8f CH |
976 | } |
977 | ||
637bbc75 | 978 | out_unlock: |
7271d243 | 979 | xfs_aio_write_newsize_update(ip, new_size); |
487f84f3 | 980 | xfs_rw_iunlock(ip, iolock); |
a363f0c2 | 981 | return ret; |
dda35b8f CH |
982 | } |
983 | ||
2fe17c10 CH |
984 | STATIC long |
985 | xfs_file_fallocate( | |
986 | struct file *file, | |
987 | int mode, | |
988 | loff_t offset, | |
989 | loff_t len) | |
990 | { | |
991 | struct inode *inode = file->f_path.dentry->d_inode; | |
992 | long error; | |
993 | loff_t new_size = 0; | |
994 | xfs_flock64_t bf; | |
995 | xfs_inode_t *ip = XFS_I(inode); | |
996 | int cmd = XFS_IOC_RESVSP; | |
82878897 | 997 | int attr_flags = XFS_ATTR_NOLOCK; |
2fe17c10 CH |
998 | |
999 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
1000 | return -EOPNOTSUPP; | |
1001 | ||
1002 | bf.l_whence = 0; | |
1003 | bf.l_start = offset; | |
1004 | bf.l_len = len; | |
1005 | ||
1006 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
1007 | ||
1008 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
1009 | cmd = XFS_IOC_UNRESVSP; | |
1010 | ||
1011 | /* check the new inode size is valid before allocating */ | |
1012 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
1013 | offset + len > i_size_read(inode)) { | |
1014 | new_size = offset + len; | |
1015 | error = inode_newsize_ok(inode, new_size); | |
1016 | if (error) | |
1017 | goto out_unlock; | |
1018 | } | |
1019 | ||
82878897 DC |
1020 | if (file->f_flags & O_DSYNC) |
1021 | attr_flags |= XFS_ATTR_SYNC; | |
1022 | ||
1023 | error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); | |
2fe17c10 CH |
1024 | if (error) |
1025 | goto out_unlock; | |
1026 | ||
1027 | /* Change file size if needed */ | |
1028 | if (new_size) { | |
1029 | struct iattr iattr; | |
1030 | ||
1031 | iattr.ia_valid = ATTR_SIZE; | |
1032 | iattr.ia_size = new_size; | |
c4ed4243 | 1033 | error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); |
2fe17c10 CH |
1034 | } |
1035 | ||
1036 | out_unlock: | |
1037 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
1038 | return error; | |
1039 | } | |
1040 | ||
1041 | ||
1da177e4 | 1042 | STATIC int |
3562fd45 | 1043 | xfs_file_open( |
1da177e4 | 1044 | struct inode *inode, |
f999a5bf | 1045 | struct file *file) |
1da177e4 | 1046 | { |
f999a5bf | 1047 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 1048 | return -EFBIG; |
f999a5bf CH |
1049 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
1050 | return -EIO; | |
1051 | return 0; | |
1052 | } | |
1053 | ||
1054 | STATIC int | |
1055 | xfs_dir_open( | |
1056 | struct inode *inode, | |
1057 | struct file *file) | |
1058 | { | |
1059 | struct xfs_inode *ip = XFS_I(inode); | |
1060 | int mode; | |
1061 | int error; | |
1062 | ||
1063 | error = xfs_file_open(inode, file); | |
1064 | if (error) | |
1065 | return error; | |
1066 | ||
1067 | /* | |
1068 | * If there are any blocks, read-ahead block 0 as we're almost | |
1069 | * certain to have the next operation be a read there. | |
1070 | */ | |
1071 | mode = xfs_ilock_map_shared(ip); | |
1072 | if (ip->i_d.di_nextents > 0) | |
1073 | xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | |
1074 | xfs_iunlock(ip, mode); | |
1075 | return 0; | |
1da177e4 LT |
1076 | } |
1077 | ||
1da177e4 | 1078 | STATIC int |
3562fd45 | 1079 | xfs_file_release( |
1da177e4 LT |
1080 | struct inode *inode, |
1081 | struct file *filp) | |
1082 | { | |
739bfb2a | 1083 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
1084 | } |
1085 | ||
1da177e4 | 1086 | STATIC int |
3562fd45 | 1087 | xfs_file_readdir( |
1da177e4 LT |
1088 | struct file *filp, |
1089 | void *dirent, | |
1090 | filldir_t filldir) | |
1091 | { | |
051e7cd4 | 1092 | struct inode *inode = filp->f_path.dentry->d_inode; |
739bfb2a | 1093 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
1094 | int error; |
1095 | size_t bufsize; | |
1096 | ||
1097 | /* | |
1098 | * The Linux API doesn't pass down the total size of the buffer | |
1099 | * we read into down to the filesystem. With the filldir concept | |
1100 | * it's not needed for correct information, but the XFS dir2 leaf | |
1101 | * code wants an estimate of the buffer size to calculate it's | |
1102 | * readahead window and size the buffers used for mapping to | |
1103 | * physical blocks. | |
1104 | * | |
1105 | * Try to give it an estimate that's good enough, maybe at some | |
1106 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 1107 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 1108 | */ |
a9cc799e | 1109 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 1110 | |
739bfb2a | 1111 | error = xfs_readdir(ip, dirent, bufsize, |
051e7cd4 CH |
1112 | (xfs_off_t *)&filp->f_pos, filldir); |
1113 | if (error) | |
1114 | return -error; | |
1115 | return 0; | |
1da177e4 LT |
1116 | } |
1117 | ||
1da177e4 | 1118 | STATIC int |
3562fd45 | 1119 | xfs_file_mmap( |
1da177e4 LT |
1120 | struct file *filp, |
1121 | struct vm_area_struct *vma) | |
1122 | { | |
3562fd45 | 1123 | vma->vm_ops = &xfs_file_vm_ops; |
d0217ac0 | 1124 | vma->vm_flags |= VM_CAN_NONLINEAR; |
6fac0cb4 | 1125 | |
fbc1462b | 1126 | file_accessed(filp); |
1da177e4 LT |
1127 | return 0; |
1128 | } | |
1129 | ||
4f57dbc6 DC |
1130 | /* |
1131 | * mmap()d file has taken write protection fault and is being made | |
1132 | * writable. We can set the page state up correctly for a writable | |
1133 | * page, which means we can do correct delalloc accounting (ENOSPC | |
1134 | * checking!) and unwritten extent mapping. | |
1135 | */ | |
1136 | STATIC int | |
1137 | xfs_vm_page_mkwrite( | |
1138 | struct vm_area_struct *vma, | |
c2ec175c | 1139 | struct vm_fault *vmf) |
4f57dbc6 | 1140 | { |
c2ec175c | 1141 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
1142 | } |
1143 | ||
4b6f5d20 | 1144 | const struct file_operations xfs_file_operations = { |
1da177e4 LT |
1145 | .llseek = generic_file_llseek, |
1146 | .read = do_sync_read, | |
bb3f724e | 1147 | .write = do_sync_write, |
3562fd45 NS |
1148 | .aio_read = xfs_file_aio_read, |
1149 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
1150 | .splice_read = xfs_file_splice_read, |
1151 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 1152 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 1153 | #ifdef CONFIG_COMPAT |
3562fd45 | 1154 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 1155 | #endif |
3562fd45 NS |
1156 | .mmap = xfs_file_mmap, |
1157 | .open = xfs_file_open, | |
1158 | .release = xfs_file_release, | |
1159 | .fsync = xfs_file_fsync, | |
2fe17c10 | 1160 | .fallocate = xfs_file_fallocate, |
1da177e4 LT |
1161 | }; |
1162 | ||
4b6f5d20 | 1163 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1164 | .open = xfs_dir_open, |
1da177e4 | 1165 | .read = generic_read_dir, |
3562fd45 | 1166 | .readdir = xfs_file_readdir, |
59af1584 | 1167 | .llseek = generic_file_llseek, |
3562fd45 | 1168 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1169 | #ifdef CONFIG_COMPAT |
3562fd45 | 1170 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1171 | #endif |
1da2f2db | 1172 | .fsync = xfs_dir_fsync, |
1da177e4 LT |
1173 | }; |
1174 | ||
f0f37e2f | 1175 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1176 | .fault = filemap_fault, |
4f57dbc6 | 1177 | .page_mkwrite = xfs_vm_page_mkwrite, |
6fac0cb4 | 1178 | }; |