1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <asm/unaligned.h>
10 #include <linux/f2fs_fs.h>
11 #include <linux/sched/signal.h>
12 #include <linux/unicode.h>
17 #include <trace/events/f2fs.h>
19 #if IS_ENABLED(CONFIG_UNICODE)
20 extern struct kmem_cache *f2fs_cf_name_slab;
23 static unsigned long dir_blocks(struct inode *inode)
25 return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
29 static unsigned int dir_buckets(unsigned int level, int dir_level)
31 if (level + dir_level < MAX_DIR_HASH_DEPTH / 2)
32 return BIT(level + dir_level);
34 return MAX_DIR_BUCKETS;
37 static unsigned int bucket_blocks(unsigned int level)
39 if (level < MAX_DIR_HASH_DEPTH / 2)
45 #if IS_ENABLED(CONFIG_UNICODE)
46 /* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */
47 int f2fs_init_casefolded_name(const struct inode *dir,
48 struct f2fs_filename *fname)
50 struct super_block *sb = dir->i_sb;
54 if (IS_CASEFOLDED(dir) &&
55 !is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) {
56 buf = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
57 GFP_NOFS, false, F2FS_SB(sb));
61 len = utf8_casefold(sb->s_encoding, fname->usr_fname,
64 kmem_cache_free(f2fs_cf_name_slab, buf);
65 if (sb_has_strict_encoding(sb))
67 /* fall back to treating name as opaque byte sequence */
70 fname->cf_name.name = buf;
71 fname->cf_name.len = len;
77 void f2fs_free_casefolded_name(struct f2fs_filename *fname)
79 unsigned char *buf = (unsigned char *)fname->cf_name.name;
82 kmem_cache_free(f2fs_cf_name_slab, buf);
83 fname->cf_name.name = NULL;
86 #endif /* CONFIG_UNICODE */
88 static int __f2fs_setup_filename(const struct inode *dir,
89 const struct fscrypt_name *crypt_name,
90 struct f2fs_filename *fname)
94 memset(fname, 0, sizeof(*fname));
96 fname->usr_fname = crypt_name->usr_fname;
97 fname->disk_name = crypt_name->disk_name;
98 #ifdef CONFIG_FS_ENCRYPTION
99 fname->crypto_buf = crypt_name->crypto_buf;
101 if (crypt_name->is_nokey_name) {
102 /* hash was decoded from the no-key name */
103 fname->hash = cpu_to_le32(crypt_name->hash);
105 err = f2fs_init_casefolded_name(dir, fname);
107 f2fs_free_filename(fname);
110 f2fs_hash_filename(dir, fname);
116 * Prepare to search for @iname in @dir. This is similar to
117 * fscrypt_setup_filename(), but this also handles computing the casefolded name
118 * and the f2fs dirhash if needed, then packing all the information about this
119 * filename up into a 'struct f2fs_filename'.
121 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
122 int lookup, struct f2fs_filename *fname)
124 struct fscrypt_name crypt_name;
127 err = fscrypt_setup_filename(dir, iname, lookup, &crypt_name);
131 return __f2fs_setup_filename(dir, &crypt_name, fname);
135 * Prepare to look up @dentry in @dir. This is similar to
136 * fscrypt_prepare_lookup(), but this also handles computing the casefolded name
137 * and the f2fs dirhash if needed, then packing all the information about this
138 * filename up into a 'struct f2fs_filename'.
140 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
141 struct f2fs_filename *fname)
143 struct fscrypt_name crypt_name;
146 err = fscrypt_prepare_lookup(dir, dentry, &crypt_name);
150 return __f2fs_setup_filename(dir, &crypt_name, fname);
153 void f2fs_free_filename(struct f2fs_filename *fname)
155 #ifdef CONFIG_FS_ENCRYPTION
156 kfree(fname->crypto_buf.name);
157 fname->crypto_buf.name = NULL;
159 f2fs_free_casefolded_name(fname);
162 static unsigned long dir_block_index(unsigned int level,
163 int dir_level, unsigned int idx)
166 unsigned long bidx = 0;
168 for (i = 0; i < level; i++)
169 bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
170 bidx += idx * bucket_blocks(level);
174 static struct f2fs_dir_entry *find_in_block(struct inode *dir,
175 struct page *dentry_page,
176 const struct f2fs_filename *fname,
179 struct f2fs_dentry_block *dentry_blk;
180 struct f2fs_dentry_ptr d;
182 dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
184 make_dentry_ptr_block(dir, &d, dentry_blk);
185 return f2fs_find_target_dentry(&d, fname, max_slots);
188 static inline int f2fs_match_name(const struct inode *dir,
189 const struct f2fs_filename *fname,
190 const u8 *de_name, u32 de_name_len)
192 struct fscrypt_name f;
194 #if IS_ENABLED(CONFIG_UNICODE)
195 if (fname->cf_name.name)
196 return generic_ci_match(dir, fname->usr_fname,
198 de_name, de_name_len);
201 f.usr_fname = fname->usr_fname;
202 f.disk_name = fname->disk_name;
203 #ifdef CONFIG_FS_ENCRYPTION
204 f.crypto_buf = fname->crypto_buf;
206 return fscrypt_match_name(&f, de_name, de_name_len);
209 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
210 const struct f2fs_filename *fname, int *max_slots)
212 struct f2fs_dir_entry *de;
213 unsigned long bit_pos = 0;
219 while (bit_pos < d->max) {
220 if (!test_bit_le(bit_pos, d->bitmap)) {
226 de = &d->dentry[bit_pos];
228 if (unlikely(!de->name_len)) {
233 if (de->hash_code == fname->hash) {
234 res = f2fs_match_name(d->inode, fname,
235 d->filename[bit_pos],
236 le16_to_cpu(de->name_len));
243 if (max_slots && max_len > *max_slots)
244 *max_slots = max_len;
247 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
252 if (max_slots && max_len > *max_slots)
253 *max_slots = max_len;
257 static struct f2fs_dir_entry *find_in_level(struct inode *dir,
259 const struct f2fs_filename *fname,
260 struct page **res_page)
262 int s = GET_DENTRY_SLOTS(fname->disk_name.len);
263 unsigned int nbucket, nblock;
264 unsigned int bidx, end_block;
265 struct page *dentry_page;
266 struct f2fs_dir_entry *de = NULL;
271 nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
272 nblock = bucket_blocks(level);
274 bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
275 le32_to_cpu(fname->hash) % nbucket);
276 end_block = bidx + nblock;
278 while (bidx < end_block) {
279 /* no need to allocate new dentry pages to all the indices */
280 dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
281 if (IS_ERR(dentry_page)) {
282 if (PTR_ERR(dentry_page) == -ENOENT) {
287 *res_page = dentry_page;
292 de = find_in_block(dir, dentry_page, fname, &max_slots);
294 *res_page = ERR_CAST(de);
298 *res_page = dentry_page;
304 f2fs_put_page(dentry_page, 0);
309 if (!de && room && F2FS_I(dir)->chash != fname->hash) {
310 F2FS_I(dir)->chash = fname->hash;
311 F2FS_I(dir)->clevel = level;
317 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
318 const struct f2fs_filename *fname,
319 struct page **res_page)
321 unsigned long npages = dir_blocks(dir);
322 struct f2fs_dir_entry *de = NULL;
323 unsigned int max_depth;
328 if (f2fs_has_inline_dentry(dir)) {
329 de = f2fs_find_in_inline_dir(dir, fname, res_page);
336 max_depth = F2FS_I(dir)->i_current_depth;
337 if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
338 f2fs_warn(F2FS_I_SB(dir), "Corrupted max_depth of %lu: %u",
339 dir->i_ino, max_depth);
340 max_depth = MAX_DIR_HASH_DEPTH;
341 f2fs_i_depth_write(dir, max_depth);
344 for (level = 0; level < max_depth; level++) {
345 de = find_in_level(dir, level, fname, res_page);
346 if (de || IS_ERR(*res_page))
350 /* This is to increase the speed of f2fs_create */
352 F2FS_I(dir)->task = current;
357 * Find an entry in the specified directory with the wanted name.
358 * It returns the page where the entry was found (as a parameter - res_page),
359 * and the entry itself. Page is returned mapped and unlocked.
360 * Entry is guaranteed to be valid.
362 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
363 const struct qstr *child, struct page **res_page)
365 struct f2fs_dir_entry *de = NULL;
366 struct f2fs_filename fname;
369 err = f2fs_setup_filename(dir, child, 1, &fname);
374 *res_page = ERR_PTR(err);
378 de = __f2fs_find_entry(dir, &fname, res_page);
380 f2fs_free_filename(&fname);
384 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
386 return f2fs_find_entry(dir, &dotdot_name, p);
389 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
393 struct f2fs_dir_entry *de;
395 de = f2fs_find_entry(dir, qstr, page);
397 res = le32_to_cpu(de->ino);
398 f2fs_put_page(*page, 0);
404 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
405 struct page *page, struct inode *inode)
407 enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
410 f2fs_wait_on_page_writeback(page, type, true, true);
411 de->ino = cpu_to_le32(inode->i_ino);
412 de->file_type = fs_umode_to_ftype(inode->i_mode);
413 set_page_dirty(page);
415 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
416 f2fs_mark_inode_dirty_sync(dir, false);
417 f2fs_put_page(page, 1);
420 static void init_dent_inode(struct inode *dir, struct inode *inode,
421 const struct f2fs_filename *fname,
424 struct f2fs_inode *ri;
426 if (!fname) /* tmpfile case? */
429 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
431 /* copy name info. to this inode page */
432 ri = F2FS_INODE(ipage);
433 ri->i_namelen = cpu_to_le32(fname->disk_name.len);
434 memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
435 if (IS_ENCRYPTED(dir)) {
436 file_set_enc_name(inode);
438 * Roll-forward recovery doesn't have encryption keys available,
439 * so it can't compute the dirhash for encrypted+casefolded
440 * filenames. Append it to i_name if possible. Else, disable
441 * roll-forward recovery of the dentry (i.e., make fsync'ing the
442 * file force a checkpoint) by setting LOST_PINO.
444 if (IS_CASEFOLDED(dir)) {
445 if (fname->disk_name.len + sizeof(f2fs_hash_t) <=
447 put_unaligned(fname->hash, (f2fs_hash_t *)
448 &ri->i_name[fname->disk_name.len]);
450 file_lost_pino(inode);
453 set_page_dirty(ipage);
456 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
457 struct f2fs_dentry_ptr *d)
459 struct fscrypt_str dot = FSTR_INIT(".", 1);
460 struct fscrypt_str dotdot = FSTR_INIT("..", 2);
462 /* update dirent of "." */
463 f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
465 /* update dirent of ".." */
466 f2fs_update_dentry(parent->i_ino, parent->i_mode, d, &dotdot, 0, 1);
469 static int make_empty_dir(struct inode *inode,
470 struct inode *parent, struct page *page)
472 struct page *dentry_page;
473 struct f2fs_dentry_block *dentry_blk;
474 struct f2fs_dentry_ptr d;
476 if (f2fs_has_inline_dentry(inode))
477 return f2fs_make_empty_inline_dir(inode, parent, page);
479 dentry_page = f2fs_get_new_data_page(inode, page, 0, true);
480 if (IS_ERR(dentry_page))
481 return PTR_ERR(dentry_page);
483 dentry_blk = page_address(dentry_page);
485 make_dentry_ptr_block(NULL, &d, dentry_blk);
486 f2fs_do_make_empty_dir(inode, parent, &d);
488 set_page_dirty(dentry_page);
489 f2fs_put_page(dentry_page, 1);
493 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
494 const struct f2fs_filename *fname, struct page *dpage)
499 if (is_inode_flag_set(inode, FI_NEW_INODE)) {
500 page = f2fs_new_inode_page(inode);
504 if (S_ISDIR(inode->i_mode)) {
505 /* in order to handle error case */
507 err = make_empty_dir(inode, dir, page);
515 err = f2fs_init_acl(inode, dir, page, dpage);
519 err = f2fs_init_security(inode, dir,
520 fname ? fname->usr_fname : NULL, page);
524 if (IS_ENCRYPTED(inode)) {
525 err = fscrypt_set_context(inode, page);
530 page = f2fs_get_node_page(F2FS_I_SB(dir), inode->i_ino);
535 init_dent_inode(dir, inode, fname, page);
538 * This file should be checkpointed during fsync.
539 * We lost i_pino from now on.
541 if (is_inode_flag_set(inode, FI_INC_LINK)) {
542 if (!S_ISDIR(inode->i_mode))
543 file_lost_pino(inode);
545 * If link the tmpfile to alias through linkat path,
546 * we should remove this inode from orphan list.
548 if (inode->i_nlink == 0)
549 f2fs_remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
550 f2fs_i_links_write(inode, true);
556 f2fs_update_inode(inode, page);
557 f2fs_put_page(page, 1);
561 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
562 unsigned int current_depth)
564 if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
565 if (S_ISDIR(inode->i_mode))
566 f2fs_i_links_write(dir, true);
567 clear_inode_flag(inode, FI_NEW_INODE);
569 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
570 f2fs_mark_inode_dirty_sync(dir, false);
572 if (F2FS_I(dir)->i_current_depth != current_depth)
573 f2fs_i_depth_write(dir, current_depth);
575 if (inode && is_inode_flag_set(inode, FI_INC_LINK))
576 clear_inode_flag(inode, FI_INC_LINK);
579 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots)
582 int zero_start, zero_end;
584 zero_start = find_next_zero_bit_le(bitmap, max_slots, bit_start);
585 if (zero_start >= max_slots)
588 zero_end = find_next_bit_le(bitmap, max_slots, zero_start);
589 if (zero_end - zero_start >= slots)
592 bit_start = zero_end + 1;
594 if (zero_end + 1 >= max_slots)
599 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
600 const struct f2fs_filename *fname)
602 struct f2fs_dentry_ptr d;
603 unsigned int bit_pos;
604 int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
606 make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
608 bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
610 return bit_pos < d.max;
613 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
614 const struct fscrypt_str *name, f2fs_hash_t name_hash,
615 unsigned int bit_pos)
617 struct f2fs_dir_entry *de;
618 int slots = GET_DENTRY_SLOTS(name->len);
621 de = &d->dentry[bit_pos];
622 de->hash_code = name_hash;
623 de->name_len = cpu_to_le16(name->len);
624 memcpy(d->filename[bit_pos], name->name, name->len);
625 de->ino = cpu_to_le32(ino);
626 de->file_type = fs_umode_to_ftype(mode);
627 for (i = 0; i < slots; i++) {
628 __set_bit_le(bit_pos + i, (void *)d->bitmap);
629 /* avoid wrong garbage data for readdir */
631 (de + i)->name_len = 0;
635 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
636 struct inode *inode, nid_t ino, umode_t mode)
638 unsigned int bit_pos;
640 unsigned int current_depth;
641 unsigned long bidx, block;
642 unsigned int nbucket, nblock;
643 struct page *dentry_page = NULL;
644 struct f2fs_dentry_block *dentry_blk = NULL;
645 struct f2fs_dentry_ptr d;
646 struct page *page = NULL;
650 slots = GET_DENTRY_SLOTS(fname->disk_name.len);
652 current_depth = F2FS_I(dir)->i_current_depth;
653 if (F2FS_I(dir)->chash == fname->hash) {
654 level = F2FS_I(dir)->clevel;
655 F2FS_I(dir)->chash = 0;
659 if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
662 if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
665 /* Increase the depth, if required */
666 if (level == current_depth)
669 nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
670 nblock = bucket_blocks(level);
672 bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
673 (le32_to_cpu(fname->hash) % nbucket));
675 for (block = bidx; block <= (bidx + nblock - 1); block++) {
676 dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
677 if (IS_ERR(dentry_page))
678 return PTR_ERR(dentry_page);
680 dentry_blk = page_address(dentry_page);
681 bit_pos = f2fs_room_for_filename(&dentry_blk->dentry_bitmap,
682 slots, NR_DENTRY_IN_BLOCK);
683 if (bit_pos < NR_DENTRY_IN_BLOCK)
686 f2fs_put_page(dentry_page, 1);
689 /* Move to next level to find the empty slot for new dentry */
693 f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
696 f2fs_down_write(&F2FS_I(inode)->i_sem);
697 page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
704 make_dentry_ptr_block(NULL, &d, dentry_blk);
705 f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
708 set_page_dirty(dentry_page);
711 f2fs_i_pino_write(inode, dir->i_ino);
713 /* synchronize inode page's data from inode cache */
714 if (is_inode_flag_set(inode, FI_NEW_INODE))
715 f2fs_update_inode(inode, page);
717 f2fs_put_page(page, 1);
720 f2fs_update_parent_metadata(dir, inode, current_depth);
723 f2fs_up_write(&F2FS_I(inode)->i_sem);
725 f2fs_put_page(dentry_page, 1);
730 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
731 struct inode *inode, nid_t ino, umode_t mode)
735 if (f2fs_has_inline_dentry(dir)) {
737 * Should get i_xattr_sem to keep the lock order:
738 * i_xattr_sem -> inode_page lock used by f2fs_setxattr.
740 f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
741 err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
742 f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
745 err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);
747 f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
752 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
755 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
756 struct inode *inode, nid_t ino, umode_t mode)
758 struct f2fs_filename fname;
759 struct page *page = NULL;
760 struct f2fs_dir_entry *de = NULL;
763 err = f2fs_setup_filename(dir, name, 0, &fname);
768 * An immature stackable filesystem shows a race condition between lookup
769 * and create. If we have same task when doing lookup and create, it's
770 * definitely fine as expected by VFS normally. Otherwise, let's just
771 * verify on-disk dentry one more time, which guarantees filesystem
774 if (current != F2FS_I(dir)->task) {
775 de = __f2fs_find_entry(dir, &fname, &page);
776 F2FS_I(dir)->task = NULL;
779 f2fs_put_page(page, 0);
781 } else if (IS_ERR(page)) {
784 err = f2fs_add_dentry(dir, &fname, inode, ino, mode);
786 f2fs_free_filename(&fname);
790 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
791 struct f2fs_filename *fname)
796 f2fs_down_write(&F2FS_I(inode)->i_sem);
797 page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
802 f2fs_put_page(page, 1);
804 clear_inode_flag(inode, FI_NEW_INODE);
805 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
807 f2fs_up_write(&F2FS_I(inode)->i_sem);
811 void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
813 struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
815 f2fs_down_write(&F2FS_I(inode)->i_sem);
817 if (S_ISDIR(inode->i_mode))
818 f2fs_i_links_write(dir, false);
819 inode_set_ctime_current(inode);
821 f2fs_i_links_write(inode, false);
822 if (S_ISDIR(inode->i_mode)) {
823 f2fs_i_links_write(inode, false);
824 f2fs_i_size_write(inode, 0);
826 f2fs_up_write(&F2FS_I(inode)->i_sem);
828 if (inode->i_nlink == 0)
829 f2fs_add_orphan_inode(inode);
831 f2fs_release_orphan_inode(sbi);
835 * It only removes the dentry from the dentry page, corresponding name
836 * entry in name page does not need to be touched during deletion.
838 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
839 struct inode *dir, struct inode *inode)
841 struct f2fs_dentry_block *dentry_blk;
842 unsigned int bit_pos;
843 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
846 f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
848 if (F2FS_OPTION(F2FS_I_SB(dir)).fsync_mode == FSYNC_MODE_STRICT)
849 f2fs_add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
851 if (f2fs_has_inline_dentry(dir))
852 return f2fs_delete_inline_entry(dentry, page, dir, inode);
855 f2fs_wait_on_page_writeback(page, DATA, true, true);
857 dentry_blk = page_address(page);
858 bit_pos = dentry - dentry_blk->dentry;
859 for (i = 0; i < slots; i++)
860 __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
862 /* Let's check and deallocate this dentry page */
863 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
866 set_page_dirty(page);
868 if (bit_pos == NR_DENTRY_IN_BLOCK &&
869 !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
870 f2fs_clear_page_cache_dirty_tag(page);
871 clear_page_dirty_for_io(page);
872 ClearPageUptodate(page);
873 clear_page_private_all(page);
875 inode_dec_dirty_pages(dir);
876 f2fs_remove_dirty_inode(dir);
878 f2fs_put_page(page, 1);
880 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
881 f2fs_mark_inode_dirty_sync(dir, false);
884 f2fs_drop_nlink(dir, inode);
887 bool f2fs_empty_dir(struct inode *dir)
889 unsigned long bidx = 0;
890 struct page *dentry_page;
891 unsigned int bit_pos;
892 struct f2fs_dentry_block *dentry_blk;
893 unsigned long nblock = dir_blocks(dir);
895 if (f2fs_has_inline_dentry(dir))
896 return f2fs_empty_inline_dir(dir);
898 while (bidx < nblock) {
901 dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
902 if (IS_ERR(dentry_page)) {
903 if (PTR_ERR(dentry_page) == -ENOENT) {
911 dentry_blk = page_address(dentry_page);
916 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
920 f2fs_put_page(dentry_page, 0);
922 if (bit_pos < NR_DENTRY_IN_BLOCK)
930 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
931 unsigned int start_pos, struct fscrypt_str *fstr)
933 unsigned char d_type = DT_UNKNOWN;
934 unsigned int bit_pos;
935 struct f2fs_dir_entry *de = NULL;
936 struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
937 struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
938 struct blk_plug plug;
939 bool readdir_ra = sbi->readdir_ra;
940 bool found_valid_dirent = false;
943 bit_pos = ((unsigned long)ctx->pos % d->max);
946 blk_start_plug(&plug);
948 while (bit_pos < d->max) {
949 bit_pos = find_next_bit_le(d->bitmap, d->max, bit_pos);
950 if (bit_pos >= d->max)
953 de = &d->dentry[bit_pos];
954 if (de->name_len == 0) {
955 if (found_valid_dirent || !bit_pos) {
956 f2fs_warn_ratelimited(sbi,
957 "invalid namelen(0), ino:%u, run fsck to fix.",
958 le32_to_cpu(de->ino));
959 set_sbi_flag(sbi, SBI_NEED_FSCK);
962 ctx->pos = start_pos + bit_pos;
966 d_type = fs_ftype_to_dtype(de->file_type);
968 de_name.name = d->filename[bit_pos];
969 de_name.len = le16_to_cpu(de->name_len);
971 /* check memory boundary before moving forward */
972 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
973 if (unlikely(bit_pos > d->max ||
974 le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) {
975 f2fs_warn(sbi, "%s: corrupted namelen=%d, run fsck to fix.",
976 __func__, le16_to_cpu(de->name_len));
977 set_sbi_flag(sbi, SBI_NEED_FSCK);
979 f2fs_handle_error(sbi, ERROR_CORRUPTED_DIRENT);
983 if (IS_ENCRYPTED(d->inode)) {
984 int save_len = fstr->len;
986 err = fscrypt_fname_disk_to_usr(d->inode,
987 (u32)le32_to_cpu(de->hash_code),
993 fstr->len = save_len;
996 if (!dir_emit(ctx, de_name.name, de_name.len,
997 le32_to_cpu(de->ino), d_type)) {
1003 f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
1005 ctx->pos = start_pos + bit_pos;
1006 found_valid_dirent = true;
1010 blk_finish_plug(&plug);
1014 static int f2fs_readdir(struct file *file, struct dir_context *ctx)
1016 struct inode *inode = file_inode(file);
1017 unsigned long npages = dir_blocks(inode);
1018 struct f2fs_dentry_block *dentry_blk = NULL;
1019 struct page *dentry_page = NULL;
1020 struct file_ra_state *ra = &file->f_ra;
1021 loff_t start_pos = ctx->pos;
1022 unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
1023 struct f2fs_dentry_ptr d;
1024 struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
1027 if (IS_ENCRYPTED(inode)) {
1028 err = fscrypt_prepare_readdir(inode);
1032 err = fscrypt_fname_alloc_buffer(F2FS_NAME_LEN, &fstr);
1037 if (f2fs_has_inline_dentry(inode)) {
1038 err = f2fs_read_inline_dir(file, ctx, &fstr);
1042 for (; n < npages; ctx->pos = n * NR_DENTRY_IN_BLOCK) {
1045 /* allow readdir() to be interrupted */
1046 if (fatal_signal_pending(current)) {
1052 /* readahead for multi pages of dir */
1053 if (npages - n > 1 && !ra_has_index(ra, n))
1054 page_cache_sync_readahead(inode->i_mapping, ra, file, n,
1055 min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
1057 dentry_page = f2fs_find_data_page(inode, n, &next_pgofs);
1058 if (IS_ERR(dentry_page)) {
1059 err = PTR_ERR(dentry_page);
1060 if (err == -ENOENT) {
1069 dentry_blk = page_address(dentry_page);
1071 make_dentry_ptr_block(inode, &d, dentry_blk);
1073 err = f2fs_fill_dentries(ctx, &d,
1074 n * NR_DENTRY_IN_BLOCK, &fstr);
1076 f2fs_put_page(dentry_page, 0);
1080 f2fs_put_page(dentry_page, 0);
1085 fscrypt_fname_free_buffer(&fstr);
1087 trace_f2fs_readdir(inode, start_pos, ctx->pos, err);
1088 return err < 0 ? err : 0;
1091 const struct file_operations f2fs_dir_operations = {
1092 .llseek = generic_file_llseek,
1093 .read = generic_read_dir,
1094 .iterate_shared = f2fs_readdir,
1095 .fsync = f2fs_sync_file,
1096 .unlocked_ioctl = f2fs_ioctl,
1097 #ifdef CONFIG_COMPAT
1098 .compat_ioctl = f2fs_compat_ioctl,