4 * Copyright (C) 1992, 1993, 1994, 1995
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/file.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * ext4 fs regular file handling primitives
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
21 #include <linux/time.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/aio.h>
27 #include <linux/quotaops.h>
28 #include <linux/pagevec.h>
30 #include "ext4_jbd2.h"
35 * Called when an inode is released. Note that this is different
36 * from ext4_file_open: open gets called at every open, but release
37 * gets called only when /all/ the files are closed.
39 static int ext4_release_file(struct inode *inode, struct file *filp)
41 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42 ext4_alloc_da_blocks(inode);
43 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
45 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
47 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
50 down_write(&EXT4_I(inode)->i_data_sem);
51 ext4_discard_preallocations(inode);
52 up_write(&EXT4_I(inode)->i_data_sem);
54 if (is_dx(inode) && filp->private_data)
55 ext4_htree_free_dir_info(filp->private_data);
60 void ext4_unwritten_wait(struct inode *inode)
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
64 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
77 ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
78 unsigned long nr_segs, loff_t pos)
80 struct super_block *sb = inode->i_sb;
81 int blockmask = sb->s_blocksize - 1;
82 size_t count = iov_length(iov, nr_segs);
83 loff_t final_size = pos + count;
85 if (pos >= inode->i_size)
88 if ((pos & blockmask) || (final_size & blockmask))
95 ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
96 unsigned long nr_segs, loff_t pos)
98 struct file *file = iocb->ki_filp;
99 struct inode *inode = file->f_mapping->host;
100 struct blk_plug plug;
101 int unaligned_aio = 0;
104 size_t length = iov_length(iov, nr_segs);
106 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
107 !is_sync_kiocb(iocb))
108 unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
110 /* Unaligned direct AIO must be serialized; see comment above */
112 mutex_lock(ext4_aio_mutex(inode));
113 ext4_unwritten_wait(inode);
116 BUG_ON(iocb->ki_pos != pos);
118 mutex_lock(&inode->i_mutex);
119 blk_start_plug(&plug);
121 iocb->private = &overwrite;
123 /* check whether we do a DIO overwrite or not */
124 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
125 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
126 struct ext4_map_blocks map;
127 unsigned int blkbits = inode->i_blkbits;
130 map.m_lblk = pos >> blkbits;
131 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
135 err = ext4_map_blocks(NULL, inode, &map, 0);
137 * 'err==len' means that all of blocks has been preallocated no
138 * matter they are initialized or not. For excluding
139 * uninitialized extents, we need to check m_flags. There are
140 * two conditions that indicate for initialized extents.
141 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
142 * 2) If we do a real lookup, non-flags are returned.
143 * So we should check these two conditions.
145 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
149 ret = __generic_file_aio_write(iocb, iov, nr_segs);
150 mutex_unlock(&inode->i_mutex);
155 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
159 blk_finish_plug(&plug);
162 mutex_unlock(ext4_aio_mutex(inode));
168 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
169 unsigned long nr_segs, loff_t pos)
171 struct inode *inode = file_inode(iocb->ki_filp);
175 * If we have encountered a bitmap-format file, the size limit
176 * is smaller than s_maxbytes, which is for extent-mapped files.
179 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
180 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
181 size_t length = iov_length(iov, nr_segs);
183 if ((pos > sbi->s_bitmap_maxbytes ||
184 (pos == sbi->s_bitmap_maxbytes && length > 0)))
187 if (pos + length > sbi->s_bitmap_maxbytes) {
188 nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
189 sbi->s_bitmap_maxbytes - pos);
193 if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
194 ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
196 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
201 static const struct vm_operations_struct ext4_file_vm_ops = {
202 .fault = filemap_fault,
203 .map_pages = filemap_map_pages,
204 .page_mkwrite = ext4_page_mkwrite,
205 .remap_pages = generic_file_remap_pages,
208 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
210 struct address_space *mapping = file->f_mapping;
212 if (!mapping->a_ops->readpage)
215 vma->vm_ops = &ext4_file_vm_ops;
219 static int ext4_file_open(struct inode * inode, struct file * filp)
221 struct super_block *sb = inode->i_sb;
222 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
223 struct vfsmount *mnt = filp->f_path.mnt;
227 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
228 !(sb->s_flags & MS_RDONLY))) {
229 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
231 * Sample where the filesystem has been mounted and
232 * store it in the superblock for sysadmin convenience
233 * when trying to sort through large numbers of block
234 * devices or filesystem images.
236 memset(buf, 0, sizeof(buf));
238 path.dentry = mnt->mnt_root;
239 cp = d_path(&path, buf, sizeof(buf));
244 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
246 return PTR_ERR(handle);
247 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
249 ext4_journal_stop(handle);
252 strlcpy(sbi->s_es->s_last_mounted, cp,
253 sizeof(sbi->s_es->s_last_mounted));
254 ext4_handle_dirty_super(handle, sb);
255 ext4_journal_stop(handle);
259 * Set up the jbd2_inode if we are opening the inode for
260 * writing and the journal is present
262 if (filp->f_mode & FMODE_WRITE) {
263 int ret = ext4_inode_attach_jinode(inode);
267 return dquot_file_open(inode, filp);
271 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
272 * file rather than ext4_ext_walk_space() because we can introduce
273 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
274 * function. When extent status tree has been fully implemented, it will
275 * track all extent status for a file and we can directly use it to
276 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
280 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
281 * lookup page cache to check whether or not there has some data between
282 * [startoff, endoff] because, if this range contains an unwritten extent,
283 * we determine this extent as a data or a hole according to whether the
284 * page cache has data or not.
286 static int ext4_find_unwritten_pgoff(struct inode *inode,
288 struct ext4_map_blocks *map,
292 unsigned int blkbits;
300 blkbits = inode->i_sb->s_blocksize_bits;
303 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
305 index = startoff >> PAGE_CACHE_SHIFT;
306 end = endoff >> PAGE_CACHE_SHIFT;
308 pagevec_init(&pvec, 0);
311 unsigned long nr_pages;
313 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
314 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
317 if (whence == SEEK_DATA)
320 BUG_ON(whence != SEEK_HOLE);
322 * If this is the first time to go into the loop and
323 * offset is not beyond the end offset, it will be a
324 * hole at this offset
326 if (lastoff == startoff || lastoff < endoff)
332 * If this is the first time to go into the loop and
333 * offset is smaller than the first page offset, it will be a
334 * hole at this offset.
336 if (lastoff == startoff && whence == SEEK_HOLE &&
337 lastoff < page_offset(pvec.pages[0])) {
342 for (i = 0; i < nr_pages; i++) {
343 struct page *page = pvec.pages[i];
344 struct buffer_head *bh, *head;
347 * If the current offset is not beyond the end of given
348 * range, it will be a hole.
350 if (lastoff < endoff && whence == SEEK_HOLE &&
359 if (unlikely(page->mapping != inode->i_mapping)) {
364 if (!page_has_buffers(page)) {
369 if (page_has_buffers(page)) {
370 lastoff = page_offset(page);
371 bh = head = page_buffers(page);
373 if (buffer_uptodate(bh) ||
374 buffer_unwritten(bh)) {
375 if (whence == SEEK_DATA)
378 if (whence == SEEK_HOLE)
382 *offset = max_t(loff_t,
387 lastoff += bh->b_size;
388 bh = bh->b_this_page;
389 } while (bh != head);
392 lastoff = page_offset(page) + PAGE_SIZE;
397 * The no. of pages is less than our desired, that would be a
400 if (nr_pages < num && whence == SEEK_HOLE) {
406 index = pvec.pages[i - 1]->index + 1;
407 pagevec_release(&pvec);
408 } while (index <= end);
411 pagevec_release(&pvec);
416 * ext4_seek_data() retrieves the offset for SEEK_DATA.
418 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
420 struct inode *inode = file->f_mapping->host;
421 struct ext4_map_blocks map;
422 struct extent_status es;
423 ext4_lblk_t start, last, end;
424 loff_t dataoff, isize;
428 mutex_lock(&inode->i_mutex);
430 isize = i_size_read(inode);
431 if (offset >= isize) {
432 mutex_unlock(&inode->i_mutex);
436 blkbits = inode->i_sb->s_blocksize_bits;
437 start = offset >> blkbits;
439 end = isize >> blkbits;
444 map.m_len = end - last + 1;
445 ret = ext4_map_blocks(NULL, inode, &map, 0);
446 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
448 dataoff = (loff_t)last << blkbits;
453 * If there is a delay extent at this offset,
454 * it will be as a data.
456 ext4_es_find_delayed_extent_range(inode, last, last, &es);
457 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
459 dataoff = (loff_t)last << blkbits;
464 * If there is a unwritten extent at this offset,
465 * it will be as a data or a hole according to page
466 * cache that has data or not.
468 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
470 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
477 dataoff = (loff_t)last << blkbits;
478 } while (last <= end);
480 mutex_unlock(&inode->i_mutex);
485 return vfs_setpos(file, dataoff, maxsize);
489 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
491 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
493 struct inode *inode = file->f_mapping->host;
494 struct ext4_map_blocks map;
495 struct extent_status es;
496 ext4_lblk_t start, last, end;
497 loff_t holeoff, isize;
501 mutex_lock(&inode->i_mutex);
503 isize = i_size_read(inode);
504 if (offset >= isize) {
505 mutex_unlock(&inode->i_mutex);
509 blkbits = inode->i_sb->s_blocksize_bits;
510 start = offset >> blkbits;
512 end = isize >> blkbits;
517 map.m_len = end - last + 1;
518 ret = ext4_map_blocks(NULL, inode, &map, 0);
519 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
521 holeoff = (loff_t)last << blkbits;
526 * If there is a delay extent at this offset,
527 * we will skip this extent.
529 ext4_es_find_delayed_extent_range(inode, last, last, &es);
530 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
531 last = es.es_lblk + es.es_len;
532 holeoff = (loff_t)last << blkbits;
537 * If there is a unwritten extent at this offset,
538 * it will be as a data or a hole according to page
539 * cache that has data or not.
541 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
543 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
547 holeoff = (loff_t)last << blkbits;
554 } while (last <= end);
556 mutex_unlock(&inode->i_mutex);
561 return vfs_setpos(file, holeoff, maxsize);
565 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
566 * by calling generic_file_llseek_size() with the appropriate maxbytes
569 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
571 struct inode *inode = file->f_mapping->host;
574 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
575 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
577 maxbytes = inode->i_sb->s_maxbytes;
583 return generic_file_llseek_size(file, offset, whence,
584 maxbytes, i_size_read(inode));
586 return ext4_seek_data(file, offset, maxbytes);
588 return ext4_seek_hole(file, offset, maxbytes);
594 const struct file_operations ext4_file_operations = {
595 .llseek = ext4_llseek,
596 .read = do_sync_read,
597 .write = do_sync_write,
598 .aio_read = generic_file_aio_read,
599 .aio_write = ext4_file_write,
600 .unlocked_ioctl = ext4_ioctl,
602 .compat_ioctl = ext4_compat_ioctl,
604 .mmap = ext4_file_mmap,
605 .open = ext4_file_open,
606 .release = ext4_release_file,
607 .fsync = ext4_sync_file,
608 .splice_read = generic_file_splice_read,
609 .splice_write = generic_file_splice_write,
610 .fallocate = ext4_fallocate,
613 const struct inode_operations ext4_file_inode_operations = {
614 .setattr = ext4_setattr,
615 .getattr = ext4_getattr,
616 .setxattr = generic_setxattr,
617 .getxattr = generic_getxattr,
618 .listxattr = ext4_listxattr,
619 .removexattr = generic_removexattr,
620 .get_acl = ext4_get_acl,
621 .set_acl = ext4_set_acl,
622 .fiemap = ext4_fiemap,