}
}
if (bio) {
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
}
return bdev;
static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
- return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+ struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
+ return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
}
/*
return err;
}
+ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct fscrypt_ctx *ctx = NULL;
+ struct bio *bio;
+
+ if (f2fs_encrypted_file(inode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
+ }
+
+ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ f2fs_target_device(sbi, blkaddr, bio);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+ return bio;
+ }
+
+ /* This can handle encryption stuffs */
+ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+ block_t blkaddr)
+ {
+ struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ bio_put(bio);
+ return -EFAULT;
+ }
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ return 0;
+ }
+
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn = F2FS_NODE(dn->node_page);
__le32 *addr_array;
+ int base = 0;
+
+ if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+ base = get_extra_isize(dn->inode);
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
- addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+ addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
for (; count > 0; dn->ofs_in_node++) {
- block_t blkaddr =
- datablock_addr(dn->node_page, dn->ofs_in_node);
+ block_t blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
if (blkaddr == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR;
__set_data_blkaddr(dn);
struct page *page;
struct extent_info ei = {0,0,0};
int err;
- struct f2fs_io_info fio = {
- .sbi = F2FS_I_SB(inode),
- .type = DATA,
- .op = REQ_OP_READ,
- .op_flags = op_flags,
- .encrypted_page = NULL,
- };
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- return read_mapping_page(mapping, index, NULL);
page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
return page;
}
- fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
- fio.page = page;
- err = f2fs_submit_page_bio(&fio);
+ err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
if (err)
goto put_err;
return page;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
- return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
+ return (f2fs_encrypted_file(inode) ||
(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
F2FS_I_SB(inode)->s_ndevs);
}
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO);
}
- if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
+ if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block:
- blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) {
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO, NULL);
+ F2FS_GET_BLOCK_DEFAULT, NULL);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
return ret;
}
- static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages)
- {
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct fscrypt_ctx *ctx = NULL;
- struct bio *bio;
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- return ERR_CAST(ctx);
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
- }
-
- bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- fscrypt_release_ctx(ctx);
- return ERR_PTR(-ENOMEM);
- }
- f2fs_target_device(sbi, blkaddr, bio);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
-
- return bio;
- }
-
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
map.m_len = last_block - block_in_file;
if (f2fs_map_blocks(inode, &map, 0,
- F2FS_GET_BLOCK_READ))
+ F2FS_GET_BLOCK_DEFAULT))
goto set_error_page;
}
got_it:
bio = NULL;
}
if (bio == NULL) {
- bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+ bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
}
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
struct inode *inode = fio->page->mapping->host;
gfp_t gfp_flags = GFP_NOFS;
- if (!f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
+ if (!f2fs_encrypted_file(inode))
return 0;
/* wait for GCed encrypted page writeback */
- f2fs_wait_on_encrypted_page_writeback(fio->sbi, fio->old_blkaddr);
+ f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
}
static int __write_data_page(struct page *page, bool *submitted,
- struct writeback_control *wbc)
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
.encrypted_page = NULL,
.submitted = false,
.need_lock = LOCK_RETRY,
+ .io_type = io_type,
};
trace_f2fs_writepage(page, DATA);
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
- return __write_data_page(page, NULL, wbc);
+ return __write_data_page(page, NULL, wbc, FS_DATA_IO);
}
/*
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc)
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
int ret = 0;
int done = 0;
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = __write_data_page(page, &submitted, wbc);
+ ret = __write_data_page(page, &submitted, wbc, io_type);
if (unlikely(ret)) {
/*
* keep nr_to_write, since vfs uses this to
return ret;
}
- static int f2fs_write_data_pages(struct address_space *mapping,
- struct writeback_control *wbc)
+ int __f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
goto skip_write;
blk_start_plug(&plug);
- ret = f2fs_write_cache_pages(mapping, wbc);
+ ret = f2fs_write_cache_pages(mapping, wbc, io_type);
blk_finish_plug(&plug);
if (wbc->sync_mode == WB_SYNC_ALL)
return 0;
}
+ static int f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+ {
+ struct inode *inode = mapping->host;
+
+ return __f2fs_write_data_pages(mapping, wbc,
+ F2FS_I(inode)->cp_task == current ?
+ FS_CP_DATA_IO : FS_DATA_IO);
+ }
+
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
set_new_dnode(&dn, inode, ipage, ipage, 0);
if (f2fs_has_inline_data(inode)) {
- if (pos + len <= MAX_INLINE_DATA) {
+ if (pos + len <= MAX_INLINE_DATA(inode)) {
read_inline_data(page, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
+ if (f2fs_encrypted_file(inode))
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else {
- struct bio *bio;
-
- bio = f2fs_grab_bio(inode, blkaddr, 1);
- if (IS_ERR(bio)) {
- err = PTR_ERR(bio);
- goto fail;
- }
- bio->bi_opf = REQ_OP_READ;
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- bio_put(bio);
- err = -EFAULT;
+ err = f2fs_submit_page_read(inode, page, blkaddr);
+ if (err)
goto fail;
- }
-
- __submit_bio(sbi, bio, DATA);
lock_page(page);
if (unlikely(page->mapping != mapping)) {
up_read(&F2FS_I(inode)->dio_rwsem[rw]);
if (rw == WRITE) {
- if (err > 0)
+ if (err > 0) {
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
+ err);
set_inode_flag(inode, FI_UPDATE_WRITE);
- else if (err < 0)
+ } else if (err < 0) {
f2fs_write_failed(mapping, offset + count);
+ }
}
trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
SetPagePrivate(newpage);
set_page_private(newpage, page_private(page));
- migrate_page_copy(newpage, page);
+ if (mode != MIGRATE_SYNC_NO_COPY)
+ migrate_page_copy(newpage, page);
+ else
+ migrate_page_states(newpage, page);
return MIGRATEPAGE_SUCCESS;
}
if (!PageUptodate(page))
SetPageUptodate(page);
+ f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
+
trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
/* fill the page */
f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
+ if (f2fs_encrypted_file(inode))
+ f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
out_sem:
up_read(&F2FS_I(inode)->i_mmap_sem);
/* if fdatasync is triggered, let's do in-place-update */
if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
set_inode_flag(inode, FI_NEED_IPU);
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ ret = file_write_and_wait_range(file, start, end);
clear_inode_flag(inode, FI_NEED_IPU);
if (ret) {
goto sync_nodes;
}
- ret = wait_on_node_pages_writeback(sbi, ino);
- if (ret)
- goto out;
+ /*
+ * If it's atomic_write, it's just fine to keep write ordering. So
+ * here we don't need to wait for node write completion, since we use
+ * node chain which serializes node blocks. If one of node writes are
+ * reordered, we can see simply broken chain, resulting in stopping
+ * roll-forward recovery. It means we'll recover all or none node blocks
+ * given fsync mark.
+ */
+ if (!atomic) {
+ ret = wait_on_node_pages_writeback(sbi, ino);
+ if (ret)
+ goto out;
+ }
/* once recovery info is written, don't need to tack this */
remove_ino_entry(sbi, ino, APPEND_INO);
dn.ofs_in_node++, pgofs++,
data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
block_t blkaddr;
- blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ blkaddr = datablock_addr(dn.inode,
+ dn.node_page, dn.ofs_in_node);
if (__found_offset(blkaddr, dirty, pgofs, whence)) {
f2fs_put_dnode(&dn);
struct f2fs_node *raw_node;
int nr_free = 0, ofs = dn->ofs_in_node, len = count;
__le32 *addr;
+ int base = 0;
+
+ if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+ base = get_extra_isize(dn->inode);
raw_node = F2FS_NODE(dn->node_page);
- addr = blkaddr_in_node(raw_node) + ofs;
+ addr = blkaddr_in_node(raw_node) + base + ofs;
for (; count > 0; count--, addr++, dn->ofs_in_node++) {
block_t blkaddr = le32_to_cpu(*addr);
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags;
- flags = fi->i_flags & FS_FL_USER_VISIBLE;
+ flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
if (flags & FS_APPEND_FL)
stat->attributes |= STATX_ATTR_APPEND;
if (flags & FS_COMPR_FL)
done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
dn.ofs_in_node, len);
for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
- *blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ *blkaddr = datablock_addr(dn.inode,
+ dn.node_page, dn.ofs_in_node);
if (!is_checkpointed_data(sbi, *blkaddr)) {
if (test_opt(sbi, LFS)) {
ADDRS_PER_PAGE(dn.node_page, dst_inode) -
dn.ofs_in_node, len - i);
do {
- dn.data_blkaddr = datablock_addr(dn.node_page,
- dn.ofs_in_node);
+ dn.data_blkaddr = datablock_addr(dn.inode,
+ dn.node_page, dn.ofs_in_node);
truncate_data_blocks_range(&dn, 1);
if (do_replace[i]) {
int ret;
for (; index < end; index++, dn->ofs_in_node++) {
- if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR)
+ if (datablock_addr(dn->inode, dn->node_page,
+ dn->ofs_in_node) == NULL_ADDR)
count++;
}
dn->ofs_in_node = ofs_in_node;
for (index = start; index < end; index++, dn->ofs_in_node++) {
- dn->data_blkaddr =
- datablock_addr(dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
/*
* reserve_new_blocks will not guarantee entire block
* allocation.
return 0;
}
- #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
- #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
-
- static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
+ static int f2fs_file_flush(struct file *file, fl_owner_t id)
{
- if (S_ISDIR(mode))
- return flags;
- else if (S_ISREG(mode))
- return flags & F2FS_REG_FLMASK;
- else
- return flags & F2FS_OTHER_FLMASK;
+ struct inode *inode = file_inode(file);
+
+ /*
+ * If the process doing a transaction is crashed, we should do
+ * roll-back. Otherwise, other reader/write can see corrupted database
+ * until all the writers close its file. Since this should be done
+ * before dropping file lock, it needs to do in ->flush.
+ */
+ if (f2fs_is_atomic_file(inode) &&
+ F2FS_I(inode)->inmem_task == current)
+ drop_inmem_pages(inode);
+ return 0;
}
static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
+ unsigned int flags = fi->i_flags &
+ (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
return put_user(flags, (int __user *)arg);
}
+ static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
+ {
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ unsigned int oldflags;
+
+ /* Is it quota file? Do not allow user to mess with it */
+ if (IS_NOQUOTA(inode))
+ return -EPERM;
+
+ flags = f2fs_mask_flags(inode->i_mode, flags);
+
+ oldflags = fi->i_flags;
+
+ if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL))
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ flags = flags & (FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
+ flags |= oldflags & ~(FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL);
+ fi->i_flags = flags;
+
+ if (fi->i_flags & FS_PROJINHERIT_FL)
+ set_inode_flag(inode, FI_PROJ_INHERIT);
+ else
+ clear_inode_flag(inode, FI_PROJ_INHERIT);
+
+ inode->i_ctime = current_time(inode);
+ f2fs_set_inode_flags(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+ return 0;
+ }
+
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags;
- unsigned int oldflags;
int ret;
if (!inode_owner_or_capable(inode))
inode_lock(inode);
- /* Is it quota file? Do not allow user to mess with it */
- if (IS_NOQUOTA(inode)) {
- ret = -EPERM;
- goto unlock_out;
- }
-
- flags = f2fs_mask_flags(inode->i_mode, flags);
-
- oldflags = fi->i_flags;
-
- if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
- if (!capable(CAP_LINUX_IMMUTABLE)) {
- ret = -EPERM;
- goto unlock_out;
- }
- }
-
- flags = flags & FS_FL_USER_MODIFIABLE;
- flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
- fi->i_flags = flags;
+ ret = __f2fs_ioc_setflags(inode, flags);
- inode->i_ctime = current_time(inode);
- f2fs_set_inode_flags(inode);
- f2fs_mark_inode_dirty_sync(inode, false);
- unlock_out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret) {
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_HOT_DATA);
goto out;
}
inc_stat:
+ F2FS_I(inode)->inmem_task = current;
stat_inc_atomic_write(inode);
stat_update_max_atomic_write(inode);
out:
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
if (!ret) {
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_HOT_DATA);
stat_dec_atomic_write(inode);
}
} else {
- ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
}
err_out:
inode_unlock(inode);
f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_METAFLUSH:
- sync_meta_pages(sbi, META, LONG_MAX);
+ sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
f2fs_stop_checkpoint(sbi, false);
break;
default:
*/
while (map.m_lblk < pg_end) {
map.m_len = pg_end - map.m_lblk;
- err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
if (err)
goto out;
do_map:
map.m_len = pg_end - map.m_lblk;
- err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
if (err)
goto clear_out;
return ret;
}
+ static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
+ {
+ struct inode *inode = file_inode(filp);
+ u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
+
+ /* Must validate to set it with SQLite behavior in Android. */
+ sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
+
+ return put_user(sb_feature, (u32 __user *)arg);
+ }
+
+ #ifdef CONFIG_QUOTA
+ static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
+ {
+ struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct super_block *sb = sbi->sb;
+ struct dquot *transfer_to[MAXQUOTAS] = {};
+ struct page *ipage;
+ kprojid_t kprojid;
+ int err;
+
+ if (!f2fs_sb_has_project_quota(sb)) {
+ if (projid != F2FS_DEF_PROJID)
+ return -EOPNOTSUPP;
+ else
+ return 0;
+ }
+
+ if (!f2fs_has_extra_attr(inode))
+ return -EOPNOTSUPP;
+
+ kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
+
+ if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
+ return 0;
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+
+ err = -EPERM;
+ inode_lock(inode);
+
+ /* Is it quota file? Do not allow user to mess with it */
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ ipage = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto out_unlock;
+ }
+
+ if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
+ i_projid)) {
+ err = -EOVERFLOW;
+ f2fs_put_page(ipage, 1);
+ goto out_unlock;
+ }
+ f2fs_put_page(ipage, 1);
+
+ dquot_initialize(inode);
+
+ transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
+ if (!IS_ERR(transfer_to[PRJQUOTA])) {
+ err = __dquot_transfer(inode, transfer_to);
+ dqput(transfer_to[PRJQUOTA]);
+ if (err)
+ goto out_dirty;
+ }
+
+ F2FS_I(inode)->i_projid = kprojid;
+ inode->i_ctime = current_time(inode);
+ out_dirty:
+ f2fs_mark_inode_dirty_sync(inode, true);
+ out_unlock:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return err;
+ }
+ #else
+ static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
+ {
+ if (projid != F2FS_DEF_PROJID)
+ return -EOPNOTSUPP;
+ return 0;
+ }
+ #endif
+
+ /* Transfer internal flags to xflags */
+ static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags)
+ {
+ __u32 xflags = 0;
+
+ if (iflags & FS_SYNC_FL)
+ xflags |= FS_XFLAG_SYNC;
+ if (iflags & FS_IMMUTABLE_FL)
+ xflags |= FS_XFLAG_IMMUTABLE;
+ if (iflags & FS_APPEND_FL)
+ xflags |= FS_XFLAG_APPEND;
+ if (iflags & FS_NODUMP_FL)
+ xflags |= FS_XFLAG_NODUMP;
+ if (iflags & FS_NOATIME_FL)
+ xflags |= FS_XFLAG_NOATIME;
+ if (iflags & FS_PROJINHERIT_FL)
+ xflags |= FS_XFLAG_PROJINHERIT;
+ return xflags;
+ }
+
+ #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
+ FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
+ FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
+
+ /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
+ #define F2FS_FL_XFLAG_VISIBLE (FS_SYNC_FL | \
+ FS_IMMUTABLE_FL | \
+ FS_APPEND_FL | \
+ FS_NODUMP_FL | \
+ FS_NOATIME_FL | \
+ FS_PROJINHERIT_FL)
+
+ /* Transfer xflags flags to internal */
+ static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
+ {
+ unsigned long iflags = 0;
+
+ if (xflags & FS_XFLAG_SYNC)
+ iflags |= FS_SYNC_FL;
+ if (xflags & FS_XFLAG_IMMUTABLE)
+ iflags |= FS_IMMUTABLE_FL;
+ if (xflags & FS_XFLAG_APPEND)
+ iflags |= FS_APPEND_FL;
+ if (xflags & FS_XFLAG_NODUMP)
+ iflags |= FS_NODUMP_FL;
+ if (xflags & FS_XFLAG_NOATIME)
+ iflags |= FS_NOATIME_FL;
+ if (xflags & FS_XFLAG_PROJINHERIT)
+ iflags |= FS_PROJINHERIT_FL;
+
+ return iflags;
+ }
+
+ static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
+ {
+ struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct fsxattr fa;
+
+ memset(&fa, 0, sizeof(struct fsxattr));
+ fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
+ (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL));
+
+ if (f2fs_sb_has_project_quota(inode->i_sb))
+ fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
+ fi->i_projid);
+
+ if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
+ return -EFAULT;
+ return 0;
+ }
+
+ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
+ {
+ struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct fsxattr fa;
+ unsigned int flags;
+ int err;
+
+ if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
+ return -EFAULT;
+
+ /* Make sure caller has proper permission */
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS)
+ return -EOPNOTSUPP;
+
+ flags = f2fs_xflags_to_iflags(fa.fsx_xflags);
+ if (f2fs_mask_flags(inode->i_mode, flags) != flags)
+ return -EOPNOTSUPP;
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+
+ inode_lock(inode);
+ flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
+ (flags & F2FS_FL_XFLAG_VISIBLE);
+ err = __f2fs_ioc_setflags(inode, flags);
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ if (err)
+ return err;
+
+ err = f2fs_ioc_setproject(filp, fa.fsx_projid);
+ if (err)
+ return err;
+
+ return 0;
+ }
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
return f2fs_ioc_move_range(filp, arg);
case F2FS_IOC_FLUSH_DEVICE:
return f2fs_ioc_flush_device(filp, arg);
+ case F2FS_IOC_GET_FEATURES:
+ return f2fs_ioc_get_features(filp, arg);
+ case F2FS_IOC_FSGETXATTR:
+ return f2fs_ioc_fsgetxattr(filp, arg);
+ case F2FS_IOC_FSSETXATTR:
+ return f2fs_ioc_fssetxattr(filp, arg);
default:
return -ENOTTY;
}
ret = __generic_file_write_iter(iocb, from);
blk_finish_plug(&plug);
clear_inode_flag(inode, FI_NO_PREALLOC);
+
+ if (ret > 0)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
}
inode_unlock(inode);
case F2FS_IOC_DEFRAGMENT:
case F2FS_IOC_MOVE_RANGE:
case F2FS_IOC_FLUSH_DEVICE:
+ case F2FS_IOC_GET_FEATURES:
+ case F2FS_IOC_FSGETXATTR:
+ case F2FS_IOC_FSSETXATTR:
break;
default:
return -ENOIOCTLCMD;
.open = f2fs_file_open,
.release = f2fs_release_file,
.mmap = f2fs_file_mmap,
+ .flush = f2fs_file_flush,
.fsync = f2fs_sync_file,
.fallocate = f2fs_fallocate,
.unlocked_ioctl = f2fs_ioctl,
#include <linux/swap.h>
#include <linux/timer.h>
#include <linux/freezer.h>
+ #include <linux/sched/signal.h>
#include "f2fs.h"
#include "segment.h"
#include "node.h"
+ #include "gc.h"
#include "trace.h"
#include <trace/events/f2fs.h>
return result - size + __reverse_ffz(tmp);
}
+ bool need_SSR(struct f2fs_sb_info *sbi)
+ {
+ int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
+ int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+
+ if (test_opt(sbi, LFS))
+ return false;
+ if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+ return true;
+
+ return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
+ 2 * reserved_sections(sbi));
+ }
+
void register_inmem_page(struct inode *inode, struct page *page)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct node_info ni;
trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
-
+ retry:
set_new_dnode(&dn, inode, NULL, NULL, 0);
- if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
+ err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ cond_resched();
+ goto retry;
+ }
err = -EAGAIN;
goto next;
}
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_HOT_DATA);
stat_dec_atomic_write(inode);
}
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
+ .io_type = FS_DATA_IO,
};
pgoff_t last_idx = ULONG_MAX;
int err = 0;
inode_dec_dirty_pages(inode);
remove_dirty_inode(inode);
}
-
+ retry:
fio.page = page;
fio.old_blkaddr = NULL_ADDR;
fio.encrypted_page = NULL;
fio.need_lock = LOCK_DONE;
err = do_write_data_page(&fio);
if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ cond_resched();
+ goto retry;
+ }
unlock_page(page);
break;
}
-
/* record old blkaddr for revoking */
cur->old_addr = fio.old_blkaddr;
last_idx = page->index;
int ret;
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
ret = submit_bio_wait(bio);
bio_put(bio);
if (kthread_should_stop())
return 0;
+ sb_start_intwrite(sbi->sb);
+
if (!llist_empty(&fcc->issue_list)) {
struct flush_cmd *cmd, *next;
int ret;
fcc->dispatch_list = NULL;
}
+ sb_end_intwrite(sbi->sb);
+
wait_event_interruptible(*q,
kthread_should_stop() || !llist_empty(&fcc->issue_list));
goto repeat;
return ret;
}
- if (!atomic_read(&fcc->issing_flush)) {
- atomic_inc(&fcc->issing_flush);
+ if (atomic_inc_return(&fcc->issing_flush) == 1) {
ret = submit_flush_wait(sbi);
atomic_dec(&fcc->issing_flush);
init_completion(&cmd.wait);
- atomic_inc(&fcc->issing_flush);
llist_add(&cmd.llnode, &fcc->issue_list);
- if (!fcc->dispatch_list)
+ /* update issue_list before we wake up issue_flush thread */
+ smp_mb();
+
+ if (waitqueue_active(&fcc->flush_wait_queue))
wake_up(&fcc->flush_wait_queue);
if (fcc->f2fs_issue_flush) {
wait_for_completion(&cmd.wait);
atomic_dec(&fcc->issing_flush);
} else {
- llist_del_all(&fcc->issue_list);
- atomic_set(&fcc->issing_flush, 0);
+ struct llist_node *list;
+
+ list = llist_del_all(&fcc->issue_list);
+ if (!list) {
+ wait_for_completion(&cmd.wait);
+ atomic_dec(&fcc->issing_flush);
+ } else {
+ struct flush_cmd *tmp, *next;
+
+ ret = submit_flush_wait(sbi);
+
+ llist_for_each_entry_safe(tmp, next, list, llnode) {
+ if (tmp == &cmd) {
+ cmd.ret = ret;
+ atomic_dec(&fcc->issing_flush);
+ continue;
+ }
+ tmp->ret = ret;
+ complete(&tmp->wait);
+ }
+ }
}
return cmd.ret;
sentry = get_seg_entry(sbi, segno);
offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
- size = min((unsigned long)(end - blk), max_blocks);
+ if (end < START_BLOCK(sbi, segno + 1))
+ size = GET_BLKOFF_FROM_SEG0(sbi, end);
+ else
+ size = max_blocks;
map = (unsigned long *)(sentry->cur_valid_map);
offset = __find_rev_next_bit(map, size, offset);
f2fs_bug_on(sbi, offset != size);
- blk += size;
+ blk = START_BLOCK(sbi, segno + 1);
}
#endif
}
submit_bio(bio);
list_move_tail(&dc->list, &dcc->wait_list);
__check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+
+ f2fs_update_iostat(sbi, FS_DISCARD, 1);
}
} else {
__remove_discard_cmd(sbi, dc);
return 0;
}
- static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
+ static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int i, iter = 0;
+ int iter = 0, issued = 0;
+ int i;
+ bool io_interrupted = false;
mutex_lock(&dcc->cmd_lock);
f2fs_bug_on(sbi,
!__check_rb_tree_consistence(sbi, &dcc->root));
blk_start_plug(&plug);
- for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+ for (i = MAX_PLIST_NUM - 1;
+ i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
pend_list = &dcc->pend_list[i];
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
- if (!issue_cond || is_idle(sbi))
+ /* Hurry up to finish fstrim */
+ if (dcc->pend_list_tag[i] & P_TRIM) {
__submit_discard_cmd(sbi, dc);
- if (issue_cond && iter++ > DISCARD_ISSUE_RATE)
+ issued++;
+
+ if (fatal_signal_pending(current))
+ break;
+ continue;
+ }
+
+ if (!issue_cond) {
+ __submit_discard_cmd(sbi, dc);
+ issued++;
+ continue;
+ }
+
+ if (is_idle(sbi)) {
+ __submit_discard_cmd(sbi, dc);
+ issued++;
+ } else {
+ io_interrupted = true;
+ }
+
+ if (++iter >= DISCARD_ISSUE_RATE)
goto out;
}
+ if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM)
+ dcc->pend_list_tag[i] &= (~P_TRIM);
}
out:
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
+
+ if (!issued && io_interrupted)
+ issued = -1;
+
+ return issued;
+ }
+
+ static void __drop_discard_cmd(struct f2fs_sb_info *sbi)
+ {
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *pend_list;
+ struct discard_cmd *dc, *tmp;
+ int i;
+
+ mutex_lock(&dcc->cmd_lock);
+ for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+ pend_list = &dcc->pend_list[i];
+ list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ f2fs_bug_on(sbi, dc->state != D_PREP);
+ __remove_discard_cmd(sbi, dc);
+ }
+ }
+ mutex_unlock(&dcc->cmd_lock);
}
static void __wait_one_discard_bio(struct f2fs_sb_info *sbi,
}
}
- /* This comes from f2fs_put_super */
+ /* This comes from f2fs_put_super and f2fs_trim_fs */
void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
{
__issue_discard_cmd(sbi, false);
+ __drop_discard_cmd(sbi);
__wait_discard_cmd(sbi, false);
}
+ static void mark_discard_range_all(struct f2fs_sb_info *sbi)
+ {
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ int i;
+
+ mutex_lock(&dcc->cmd_lock);
+ for (i = 0; i < MAX_PLIST_NUM; i++)
+ dcc->pend_list_tag[i] |= P_TRIM;
+ mutex_unlock(&dcc->cmd_lock);
+ }
+
static int issue_discard_thread(void *data)
{
struct f2fs_sb_info *sbi = data;
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
wait_queue_head_t *q = &dcc->discard_wait_queue;
+ unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
+ int issued;
set_freezable();
do {
- wait_event_interruptible(*q, kthread_should_stop() ||
- freezing(current) ||
- atomic_read(&dcc->discard_cmd_cnt));
+ wait_event_interruptible_timeout(*q,
+ kthread_should_stop() || freezing(current) ||
+ dcc->discard_wake,
+ msecs_to_jiffies(wait_ms));
if (try_to_freeze())
continue;
if (kthread_should_stop())
return 0;
- __issue_discard_cmd(sbi, true);
- __wait_discard_cmd(sbi, true);
+ if (dcc->discard_wake) {
+ dcc->discard_wake = 0;
+ if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+ mark_discard_range_all(sbi);
+ }
+
+ sb_start_intwrite(sbi->sb);
+
+ issued = __issue_discard_cmd(sbi, true);
+ if (issued) {
+ __wait_discard_cmd(sbi, true);
+ wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
+ } else {
+ wait_ms = DEF_MAX_DISCARD_ISSUE_TIME;
+ }
+
+ sb_end_intwrite(sbi->sb);
- congestion_wait(BLK_RW_SYNC, HZ/50);
} while (!kthread_should_stop());
return 0;
}
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
- struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *head = &dcc->entry_list;
struct discard_entry *entry, *this;
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
goto find_next;
list_del(&entry->list);
- SM_I(sbi)->dcc_info->nr_discards -= total_len;
+ dcc->nr_discards -= total_len;
kmem_cache_free(discard_entry_slab, entry);
}
- wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
+ wake_up_discard_thread(sbi, false);
}
static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
if (!dcc)
return -ENOMEM;
+ dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
INIT_LIST_HEAD(&dcc->entry_list);
- for (i = 0; i < MAX_PLIST_NUM; i++)
+ for (i = 0; i < MAX_PLIST_NUM; i++) {
INIT_LIST_HEAD(&dcc->pend_list[i]);
+ if (i >= dcc->discard_granularity - 1)
+ dcc->pend_list_tag[i] |= P_ACTIVE;
+ }
INIT_LIST_HEAD(&dcc->wait_list);
mutex_init(&dcc->cmd_lock);
atomic_set(&dcc->issued_discard, 0);
struct seg_entry *se;
unsigned int segno, offset;
long int new_vblocks;
+ bool exist;
+ #ifdef CONFIG_F2FS_CHECK_FS
+ bool mir_exist;
+ #endif
segno = GET_SEGNO(sbi, blkaddr);
/* Update valid block bitmap */
if (del > 0) {
- if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
+ exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
- if (f2fs_test_and_set_bit(offset,
- se->cur_valid_map_mir))
- f2fs_bug_on(sbi, 1);
- else
- WARN_ON(1);
- #else
+ mir_exist = f2fs_test_and_set_bit(offset,
+ se->cur_valid_map_mir);
+ if (unlikely(exist != mir_exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
+ "when setting bitmap, blk:%u, old bit:%d",
+ blkaddr, exist);
f2fs_bug_on(sbi, 1);
+ }
#endif
+ if (unlikely(exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Bitmap was wrongly set, blk:%u", blkaddr);
+ f2fs_bug_on(sbi, 1);
+ se->valid_blocks--;
+ del = 0;
}
+
if (f2fs_discard_en(sbi) &&
!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
se->ckpt_valid_blocks++;
}
} else {
- if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
+ exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
- if (!f2fs_test_and_clear_bit(offset,
- se->cur_valid_map_mir))
- f2fs_bug_on(sbi, 1);
- else
- WARN_ON(1);
- #else
+ mir_exist = f2fs_test_and_clear_bit(offset,
+ se->cur_valid_map_mir);
+ if (unlikely(exist != mir_exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
+ "when clearing bitmap, blk:%u, old bit:%d",
+ blkaddr, exist);
f2fs_bug_on(sbi, 1);
+ }
#endif
+ if (unlikely(!exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Bitmap was wrongly cleared, blk:%u", blkaddr);
+ f2fs_bug_on(sbi, 1);
+ se->valid_blocks++;
+ del = 0;
}
+
if (f2fs_discard_en(sbi) &&
f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
* This function always allocates a used segment(from dirty seglist) by SSR
* manner, so it should recover the existing segment information of valid blocks
*/
- static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
+ static void change_curseg(struct f2fs_sb_info *sbi, int type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
curseg->alloc_type = SSR;
__next_free_blkoff(sbi, curseg, 0);
- if (reuse) {
- sum_page = get_sum_page(sbi, new_segno);
- sum_node = (struct f2fs_summary_block *)page_address(sum_page);
- memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
- f2fs_put_page(sum_page, 1);
- }
+ sum_page = get_sum_page(sbi, new_segno);
+ sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
+ f2fs_put_page(sum_page, 1);
}
static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
new_curseg(sbi, type, false);
else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
- change_curseg(sbi, type, true);
+ change_curseg(sbi, type);
else
new_curseg(sbi, type, false);
schedule();
}
+ /* It's time to issue all the filed discards */
+ mark_discard_range_all(sbi);
+ f2fs_wait_discard_bios(sbi);
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
return err;
mutex_unlock(&sit_i->sentry_lock);
- if (page && IS_NODESEG(type))
+ if (page && IS_NODESEG(type)) {
fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
+ f2fs_inode_chksum_set(sbi, page);
+ }
+
if (add_list) {
struct f2fs_bio_info *io;
}
}
- void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
+ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+ enum iostat_type io_type)
{
struct f2fs_io_info fio = {
.sbi = sbi,
set_page_writeback(page);
f2fs_submit_page_write(&fio);
+
+ f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
}
void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
set_summary(&sum, nid, 0, 0);
do_write_page(&sum, fio);
+
+ f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
}
void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
do_write_page(&sum, fio);
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
+
+ f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
}
int rewrite_data_page(struct f2fs_io_info *fio)
{
+ int err;
+
fio->new_blkaddr = fio->old_blkaddr;
stat_inc_inplace_blocks(fio->sbi);
- return f2fs_submit_page_bio(fio);
+
+ err = f2fs_submit_page_bio(fio);
+
+ f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+
+ return err;
}
void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
- change_curseg(sbi, type, true);
+ change_curseg(sbi, type);
}
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
if (recover_curseg) {
if (old_cursegno != curseg->segno) {
curseg->next_segno = old_cursegno;
- change_curseg(sbi, type, true);
+ change_curseg(sbi, type);
}
curseg->next_blkoff = old_blkoff;
}
}
}
- void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
- block_t blkaddr)
+ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct page *cpage;
TRACE_EVENT(f2fs_background_gc,
- TP_PROTO(struct super_block *sb, long wait_ms,
+ TP_PROTO(struct super_block *sb, unsigned int wait_ms,
unsigned int prefree, unsigned int free),
TP_ARGS(sb, wait_ms, prefree, free),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(long, wait_ms)
+ __field(unsigned int, wait_ms)
__field(unsigned int, prefree)
__field(unsigned int, free)
),
__entry->free = free;
),
- TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u",
+ TP_printk("dev = (%d,%d), wait_ms = %u, prefree = %u, free = %u",
show_dev(__entry->dev),
__entry->wait_ms,
__entry->prefree,
__entry->free)
);
+ TRACE_EVENT(f2fs_gc_begin,
+
+ TP_PROTO(struct super_block *sb, bool sync, bool background,
+ long long dirty_nodes, long long dirty_dents,
+ long long dirty_imeta, unsigned int free_sec,
+ unsigned int free_seg, int reserved_seg,
+ unsigned int prefree_seg),
+
+ TP_ARGS(sb, sync, background, dirty_nodes, dirty_dents, dirty_imeta,
+ free_sec, free_seg, reserved_seg, prefree_seg),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(bool, sync)
+ __field(bool, background)
+ __field(long long, dirty_nodes)
+ __field(long long, dirty_dents)
+ __field(long long, dirty_imeta)
+ __field(unsigned int, free_sec)
+ __field(unsigned int, free_seg)
+ __field(int, reserved_seg)
+ __field(unsigned int, prefree_seg)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->sync = sync;
+ __entry->background = background;
+ __entry->dirty_nodes = dirty_nodes;
+ __entry->dirty_dents = dirty_dents;
+ __entry->dirty_imeta = dirty_imeta;
+ __entry->free_sec = free_sec;
+ __entry->free_seg = free_seg;
+ __entry->reserved_seg = reserved_seg;
+ __entry->prefree_seg = prefree_seg;
+ ),
+
+ TP_printk("dev = (%d,%d), sync = %d, background = %d, nodes = %lld, "
+ "dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
+ "rsv_seg:%d, prefree_seg:%u",
+ show_dev(__entry->dev),
+ __entry->sync,
+ __entry->background,
+ __entry->dirty_nodes,
+ __entry->dirty_dents,
+ __entry->dirty_imeta,
+ __entry->free_sec,
+ __entry->free_seg,
+ __entry->reserved_seg,
+ __entry->prefree_seg)
+ );
+
+ TRACE_EVENT(f2fs_gc_end,
+
+ TP_PROTO(struct super_block *sb, int ret, int seg_freed,
+ int sec_freed, long long dirty_nodes,
+ long long dirty_dents, long long dirty_imeta,
+ unsigned int free_sec, unsigned int free_seg,
+ int reserved_seg, unsigned int prefree_seg),
+
+ TP_ARGS(sb, ret, seg_freed, sec_freed, dirty_nodes, dirty_dents,
+ dirty_imeta, free_sec, free_seg, reserved_seg, prefree_seg),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, ret)
+ __field(int, seg_freed)
+ __field(int, sec_freed)
+ __field(long long, dirty_nodes)
+ __field(long long, dirty_dents)
+ __field(long long, dirty_imeta)
+ __field(unsigned int, free_sec)
+ __field(unsigned int, free_seg)
+ __field(int, reserved_seg)
+ __field(unsigned int, prefree_seg)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->ret = ret;
+ __entry->seg_freed = seg_freed;
+ __entry->sec_freed = sec_freed;
+ __entry->dirty_nodes = dirty_nodes;
+ __entry->dirty_dents = dirty_dents;
+ __entry->dirty_imeta = dirty_imeta;
+ __entry->free_sec = free_sec;
+ __entry->free_seg = free_seg;
+ __entry->reserved_seg = reserved_seg;
+ __entry->prefree_seg = prefree_seg;
+ ),
+
+ TP_printk("dev = (%d,%d), ret = %d, seg_freed = %d, sec_freed = %d, "
+ "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, "
+ "free_seg:%u, rsv_seg:%d, prefree_seg:%u",
+ show_dev(__entry->dev),
+ __entry->ret,
+ __entry->seg_freed,
+ __entry->sec_freed,
+ __entry->dirty_nodes,
+ __entry->dirty_dents,
+ __entry->dirty_imeta,
+ __entry->free_sec,
+ __entry->free_seg,
+ __entry->reserved_seg,
+ __entry->prefree_seg)
+ );
+
TRACE_EVENT(f2fs_get_victim,
TP_PROTO(struct super_block *sb, int type, int gc_type,
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->target = bio->bi_bdev->bd_dev;
+ __entry->target = bio_dev(bio);
__entry->op = bio_op(bio);
__entry->op_flags = bio->bi_opf;
__entry->type = type;