1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <linux/unaligned.h>
10 #include <linux/f2fs_fs.h>
11 #include <linux/sched/signal.h>
12 #include <linux/unicode.h>
17 #include <trace/events/f2fs.h>
19 #if IS_ENABLED(CONFIG_UNICODE)
20 extern struct kmem_cache *f2fs_cf_name_slab;
23 static unsigned long dir_blocks(struct inode *inode)
25 return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
29 static unsigned int dir_buckets(unsigned int level, int dir_level)
31 if (level + dir_level < MAX_DIR_HASH_DEPTH / 2)
32 return BIT(level + dir_level);
34 return MAX_DIR_BUCKETS;
37 static unsigned int bucket_blocks(unsigned int level)
39 if (level < MAX_DIR_HASH_DEPTH / 2)
45 #if IS_ENABLED(CONFIG_UNICODE)
46 /* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */
47 int f2fs_init_casefolded_name(const struct inode *dir,
48 struct f2fs_filename *fname)
50 struct super_block *sb = dir->i_sb;
54 if (IS_CASEFOLDED(dir) &&
55 !is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) {
56 buf = f2fs_kmem_cache_alloc(f2fs_cf_name_slab,
57 GFP_NOFS, false, F2FS_SB(sb));
61 len = utf8_casefold(sb->s_encoding, fname->usr_fname,
64 kmem_cache_free(f2fs_cf_name_slab, buf);
65 if (sb_has_strict_encoding(sb))
67 /* fall back to treating name as opaque byte sequence */
70 fname->cf_name.name = buf;
71 fname->cf_name.len = len;
77 void f2fs_free_casefolded_name(struct f2fs_filename *fname)
79 unsigned char *buf = (unsigned char *)fname->cf_name.name;
82 kmem_cache_free(f2fs_cf_name_slab, buf);
83 fname->cf_name.name = NULL;
86 #endif /* CONFIG_UNICODE */
88 static int __f2fs_setup_filename(const struct inode *dir,
89 const struct fscrypt_name *crypt_name,
90 struct f2fs_filename *fname)
94 memset(fname, 0, sizeof(*fname));
96 fname->usr_fname = crypt_name->usr_fname;
97 fname->disk_name = crypt_name->disk_name;
98 #ifdef CONFIG_FS_ENCRYPTION
99 fname->crypto_buf = crypt_name->crypto_buf;
101 if (crypt_name->is_nokey_name) {
102 /* hash was decoded from the no-key name */
103 fname->hash = cpu_to_le32(crypt_name->hash);
105 err = f2fs_init_casefolded_name(dir, fname);
107 f2fs_free_filename(fname);
110 f2fs_hash_filename(dir, fname);
116 * Prepare to search for @iname in @dir. This is similar to
117 * fscrypt_setup_filename(), but this also handles computing the casefolded name
118 * and the f2fs dirhash if needed, then packing all the information about this
119 * filename up into a 'struct f2fs_filename'.
121 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
122 int lookup, struct f2fs_filename *fname)
124 struct fscrypt_name crypt_name;
127 err = fscrypt_setup_filename(dir, iname, lookup, &crypt_name);
131 return __f2fs_setup_filename(dir, &crypt_name, fname);
135 * Prepare to look up @dentry in @dir. This is similar to
136 * fscrypt_prepare_lookup(), but this also handles computing the casefolded name
137 * and the f2fs dirhash if needed, then packing all the information about this
138 * filename up into a 'struct f2fs_filename'.
140 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
141 struct f2fs_filename *fname)
143 struct fscrypt_name crypt_name;
146 err = fscrypt_prepare_lookup(dir, dentry, &crypt_name);
150 return __f2fs_setup_filename(dir, &crypt_name, fname);
153 void f2fs_free_filename(struct f2fs_filename *fname)
155 #ifdef CONFIG_FS_ENCRYPTION
156 kfree(fname->crypto_buf.name);
157 fname->crypto_buf.name = NULL;
159 f2fs_free_casefolded_name(fname);
162 static unsigned long dir_block_index(unsigned int level,
163 int dir_level, unsigned int idx)
166 unsigned long bidx = 0;
168 for (i = 0; i < level; i++)
169 bidx += mul_u32_u32(dir_buckets(i, dir_level),
171 bidx += idx * bucket_blocks(level);
175 static struct f2fs_dir_entry *find_in_block(struct inode *dir,
176 struct page *dentry_page,
177 const struct f2fs_filename *fname,
180 struct f2fs_dentry_block *dentry_blk;
181 struct f2fs_dentry_ptr d;
183 dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
185 make_dentry_ptr_block(dir, &d, dentry_blk);
186 return f2fs_find_target_dentry(&d, fname, max_slots);
189 static inline int f2fs_match_name(const struct inode *dir,
190 const struct f2fs_filename *fname,
191 const u8 *de_name, u32 de_name_len)
193 struct fscrypt_name f;
195 #if IS_ENABLED(CONFIG_UNICODE)
196 if (fname->cf_name.name)
197 return generic_ci_match(dir, fname->usr_fname,
199 de_name, de_name_len);
202 f.usr_fname = fname->usr_fname;
203 f.disk_name = fname->disk_name;
204 #ifdef CONFIG_FS_ENCRYPTION
205 f.crypto_buf = fname->crypto_buf;
207 return fscrypt_match_name(&f, de_name, de_name_len);
210 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
211 const struct f2fs_filename *fname, int *max_slots)
213 struct f2fs_dir_entry *de;
214 unsigned long bit_pos = 0;
220 while (bit_pos < d->max) {
221 if (!test_bit_le(bit_pos, d->bitmap)) {
227 de = &d->dentry[bit_pos];
229 if (unlikely(!de->name_len)) {
234 if (de->hash_code == fname->hash) {
235 res = f2fs_match_name(d->inode, fname,
236 d->filename[bit_pos],
237 le16_to_cpu(de->name_len));
244 if (max_slots && max_len > *max_slots)
245 *max_slots = max_len;
248 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
253 if (max_slots && max_len > *max_slots)
254 *max_slots = max_len;
258 static struct f2fs_dir_entry *find_in_level(struct inode *dir,
260 const struct f2fs_filename *fname,
261 struct page **res_page)
263 int s = GET_DENTRY_SLOTS(fname->disk_name.len);
264 unsigned int nbucket, nblock;
265 unsigned int bidx, end_block;
266 struct page *dentry_page;
267 struct f2fs_dir_entry *de = NULL;
272 nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
273 nblock = bucket_blocks(level);
275 bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
276 le32_to_cpu(fname->hash) % nbucket);
277 end_block = bidx + nblock;
279 while (bidx < end_block) {
280 /* no need to allocate new dentry pages to all the indices */
281 dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
282 if (IS_ERR(dentry_page)) {
283 if (PTR_ERR(dentry_page) == -ENOENT) {
288 *res_page = dentry_page;
293 de = find_in_block(dir, dentry_page, fname, &max_slots);
295 *res_page = ERR_CAST(de);
299 *res_page = dentry_page;
305 f2fs_put_page(dentry_page, 0);
310 if (!de && room && F2FS_I(dir)->chash != fname->hash) {
311 F2FS_I(dir)->chash = fname->hash;
312 F2FS_I(dir)->clevel = level;
318 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
319 const struct f2fs_filename *fname,
320 struct page **res_page)
322 unsigned long npages = dir_blocks(dir);
323 struct f2fs_dir_entry *de = NULL;
324 unsigned int max_depth;
329 if (f2fs_has_inline_dentry(dir)) {
330 de = f2fs_find_in_inline_dir(dir, fname, res_page);
337 max_depth = F2FS_I(dir)->i_current_depth;
338 if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
339 f2fs_warn(F2FS_I_SB(dir), "Corrupted max_depth of %lu: %u",
340 dir->i_ino, max_depth);
341 max_depth = MAX_DIR_HASH_DEPTH;
342 f2fs_i_depth_write(dir, max_depth);
345 for (level = 0; level < max_depth; level++) {
346 de = find_in_level(dir, level, fname, res_page);
347 if (de || IS_ERR(*res_page))
351 /* This is to increase the speed of f2fs_create */
353 F2FS_I(dir)->task = current;
358 * Find an entry in the specified directory with the wanted name.
359 * It returns the page where the entry was found (as a parameter - res_page),
360 * and the entry itself. Page is returned mapped and unlocked.
361 * Entry is guaranteed to be valid.
363 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
364 const struct qstr *child, struct page **res_page)
366 struct f2fs_dir_entry *de = NULL;
367 struct f2fs_filename fname;
370 err = f2fs_setup_filename(dir, child, 1, &fname);
375 *res_page = ERR_PTR(err);
379 de = __f2fs_find_entry(dir, &fname, res_page);
381 f2fs_free_filename(&fname);
385 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
387 return f2fs_find_entry(dir, &dotdot_name, p);
390 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
394 struct f2fs_dir_entry *de;
396 de = f2fs_find_entry(dir, qstr, page);
398 res = le32_to_cpu(de->ino);
399 f2fs_put_page(*page, 0);
405 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
406 struct page *page, struct inode *inode)
408 enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
411 f2fs_wait_on_page_writeback(page, type, true, true);
412 de->ino = cpu_to_le32(inode->i_ino);
413 de->file_type = fs_umode_to_ftype(inode->i_mode);
414 set_page_dirty(page);
416 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
417 f2fs_mark_inode_dirty_sync(dir, false);
418 f2fs_put_page(page, 1);
421 static void init_dent_inode(struct inode *dir, struct inode *inode,
422 const struct f2fs_filename *fname,
425 struct f2fs_inode *ri;
427 if (!fname) /* tmpfile case? */
430 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
432 /* copy name info. to this inode page */
433 ri = F2FS_INODE(ipage);
434 ri->i_namelen = cpu_to_le32(fname->disk_name.len);
435 memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
436 if (IS_ENCRYPTED(dir)) {
437 file_set_enc_name(inode);
439 * Roll-forward recovery doesn't have encryption keys available,
440 * so it can't compute the dirhash for encrypted+casefolded
441 * filenames. Append it to i_name if possible. Else, disable
442 * roll-forward recovery of the dentry (i.e., make fsync'ing the
443 * file force a checkpoint) by setting LOST_PINO.
445 if (IS_CASEFOLDED(dir)) {
446 if (fname->disk_name.len + sizeof(f2fs_hash_t) <=
448 put_unaligned(fname->hash, (f2fs_hash_t *)
449 &ri->i_name[fname->disk_name.len]);
451 file_lost_pino(inode);
454 set_page_dirty(ipage);
457 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
458 struct f2fs_dentry_ptr *d)
460 struct fscrypt_str dot = FSTR_INIT(".", 1);
461 struct fscrypt_str dotdot = FSTR_INIT("..", 2);
463 /* update dirent of "." */
464 f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
466 /* update dirent of ".." */
467 f2fs_update_dentry(parent->i_ino, parent->i_mode, d, &dotdot, 0, 1);
470 static int make_empty_dir(struct inode *inode,
471 struct inode *parent, struct page *page)
473 struct page *dentry_page;
474 struct f2fs_dentry_block *dentry_blk;
475 struct f2fs_dentry_ptr d;
477 if (f2fs_has_inline_dentry(inode))
478 return f2fs_make_empty_inline_dir(inode, parent, page);
480 dentry_page = f2fs_get_new_data_page(inode, page, 0, true);
481 if (IS_ERR(dentry_page))
482 return PTR_ERR(dentry_page);
484 dentry_blk = page_address(dentry_page);
486 make_dentry_ptr_block(NULL, &d, dentry_blk);
487 f2fs_do_make_empty_dir(inode, parent, &d);
489 set_page_dirty(dentry_page);
490 f2fs_put_page(dentry_page, 1);
494 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
495 const struct f2fs_filename *fname, struct page *dpage)
500 if (is_inode_flag_set(inode, FI_NEW_INODE)) {
501 page = f2fs_new_inode_page(inode);
505 if (S_ISDIR(inode->i_mode)) {
506 /* in order to handle error case */
508 err = make_empty_dir(inode, dir, page);
516 err = f2fs_init_acl(inode, dir, page, dpage);
520 err = f2fs_init_security(inode, dir,
521 fname ? fname->usr_fname : NULL, page);
525 if (IS_ENCRYPTED(inode)) {
526 err = fscrypt_set_context(inode, page);
531 page = f2fs_get_node_page(F2FS_I_SB(dir), inode->i_ino);
536 init_dent_inode(dir, inode, fname, page);
539 * This file should be checkpointed during fsync.
540 * We lost i_pino from now on.
542 if (is_inode_flag_set(inode, FI_INC_LINK)) {
543 if (!S_ISDIR(inode->i_mode))
544 file_lost_pino(inode);
546 * If link the tmpfile to alias through linkat path,
547 * we should remove this inode from orphan list.
549 if (inode->i_nlink == 0)
550 f2fs_remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
551 f2fs_i_links_write(inode, true);
557 f2fs_update_inode(inode, page);
558 f2fs_put_page(page, 1);
562 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
563 unsigned int current_depth)
565 if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
566 if (S_ISDIR(inode->i_mode))
567 f2fs_i_links_write(dir, true);
568 clear_inode_flag(inode, FI_NEW_INODE);
570 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
571 f2fs_mark_inode_dirty_sync(dir, false);
573 if (F2FS_I(dir)->i_current_depth != current_depth)
574 f2fs_i_depth_write(dir, current_depth);
576 if (inode && is_inode_flag_set(inode, FI_INC_LINK))
577 clear_inode_flag(inode, FI_INC_LINK);
580 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots)
583 int zero_start, zero_end;
585 zero_start = find_next_zero_bit_le(bitmap, max_slots, bit_start);
586 if (zero_start >= max_slots)
589 zero_end = find_next_bit_le(bitmap, max_slots, zero_start);
590 if (zero_end - zero_start >= slots)
593 bit_start = zero_end + 1;
595 if (zero_end + 1 >= max_slots)
600 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
601 const struct f2fs_filename *fname)
603 struct f2fs_dentry_ptr d;
604 unsigned int bit_pos;
605 int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
607 make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
609 bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
611 return bit_pos < d.max;
614 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
615 const struct fscrypt_str *name, f2fs_hash_t name_hash,
616 unsigned int bit_pos)
618 struct f2fs_dir_entry *de;
619 int slots = GET_DENTRY_SLOTS(name->len);
622 de = &d->dentry[bit_pos];
623 de->hash_code = name_hash;
624 de->name_len = cpu_to_le16(name->len);
625 memcpy(d->filename[bit_pos], name->name, name->len);
626 de->ino = cpu_to_le32(ino);
627 de->file_type = fs_umode_to_ftype(mode);
628 for (i = 0; i < slots; i++) {
629 __set_bit_le(bit_pos + i, (void *)d->bitmap);
630 /* avoid wrong garbage data for readdir */
632 (de + i)->name_len = 0;
636 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
637 struct inode *inode, nid_t ino, umode_t mode)
639 unsigned int bit_pos;
641 unsigned int current_depth;
642 unsigned long bidx, block;
643 unsigned int nbucket, nblock;
644 struct page *dentry_page = NULL;
645 struct f2fs_dentry_block *dentry_blk = NULL;
646 struct f2fs_dentry_ptr d;
647 struct page *page = NULL;
651 slots = GET_DENTRY_SLOTS(fname->disk_name.len);
653 current_depth = F2FS_I(dir)->i_current_depth;
654 if (F2FS_I(dir)->chash == fname->hash) {
655 level = F2FS_I(dir)->clevel;
656 F2FS_I(dir)->chash = 0;
660 if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
663 if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
666 /* Increase the depth, if required */
667 if (level == current_depth)
670 nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
671 nblock = bucket_blocks(level);
673 bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
674 (le32_to_cpu(fname->hash) % nbucket));
676 for (block = bidx; block <= (bidx + nblock - 1); block++) {
677 dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
678 if (IS_ERR(dentry_page))
679 return PTR_ERR(dentry_page);
681 dentry_blk = page_address(dentry_page);
682 bit_pos = f2fs_room_for_filename(&dentry_blk->dentry_bitmap,
683 slots, NR_DENTRY_IN_BLOCK);
684 if (bit_pos < NR_DENTRY_IN_BLOCK)
687 f2fs_put_page(dentry_page, 1);
690 /* Move to next level to find the empty slot for new dentry */
694 f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
697 f2fs_down_write(&F2FS_I(inode)->i_sem);
698 page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
705 make_dentry_ptr_block(NULL, &d, dentry_blk);
706 f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
709 set_page_dirty(dentry_page);
712 f2fs_i_pino_write(inode, dir->i_ino);
714 /* synchronize inode page's data from inode cache */
715 if (is_inode_flag_set(inode, FI_NEW_INODE))
716 f2fs_update_inode(inode, page);
718 f2fs_put_page(page, 1);
721 f2fs_update_parent_metadata(dir, inode, current_depth);
724 f2fs_up_write(&F2FS_I(inode)->i_sem);
726 f2fs_put_page(dentry_page, 1);
731 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
732 struct inode *inode, nid_t ino, umode_t mode)
736 if (f2fs_has_inline_dentry(dir)) {
738 * Should get i_xattr_sem to keep the lock order:
739 * i_xattr_sem -> inode_page lock used by f2fs_setxattr.
741 f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
742 err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
743 f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
746 err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);
748 f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
753 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
756 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
757 struct inode *inode, nid_t ino, umode_t mode)
759 struct f2fs_filename fname;
760 struct page *page = NULL;
761 struct f2fs_dir_entry *de = NULL;
764 err = f2fs_setup_filename(dir, name, 0, &fname);
769 * An immature stackable filesystem shows a race condition between lookup
770 * and create. If we have same task when doing lookup and create, it's
771 * definitely fine as expected by VFS normally. Otherwise, let's just
772 * verify on-disk dentry one more time, which guarantees filesystem
775 if (current != F2FS_I(dir)->task) {
776 de = __f2fs_find_entry(dir, &fname, &page);
777 F2FS_I(dir)->task = NULL;
780 f2fs_put_page(page, 0);
782 } else if (IS_ERR(page)) {
785 err = f2fs_add_dentry(dir, &fname, inode, ino, mode);
787 f2fs_free_filename(&fname);
791 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
792 struct f2fs_filename *fname)
797 f2fs_down_write(&F2FS_I(inode)->i_sem);
798 page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
803 f2fs_put_page(page, 1);
805 clear_inode_flag(inode, FI_NEW_INODE);
806 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
808 f2fs_up_write(&F2FS_I(inode)->i_sem);
812 void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
814 struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
816 f2fs_down_write(&F2FS_I(inode)->i_sem);
818 if (S_ISDIR(inode->i_mode))
819 f2fs_i_links_write(dir, false);
820 inode_set_ctime_current(inode);
822 f2fs_i_links_write(inode, false);
823 if (S_ISDIR(inode->i_mode)) {
824 f2fs_i_links_write(inode, false);
825 f2fs_i_size_write(inode, 0);
827 f2fs_up_write(&F2FS_I(inode)->i_sem);
829 if (inode->i_nlink == 0)
830 f2fs_add_orphan_inode(inode);
832 f2fs_release_orphan_inode(sbi);
836 * It only removes the dentry from the dentry page, corresponding name
837 * entry in name page does not need to be touched during deletion.
839 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
840 struct inode *dir, struct inode *inode)
842 struct f2fs_dentry_block *dentry_blk;
843 unsigned int bit_pos;
844 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
845 pgoff_t index = page_folio(page)->index;
848 f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
850 if (F2FS_OPTION(F2FS_I_SB(dir)).fsync_mode == FSYNC_MODE_STRICT)
851 f2fs_add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
853 if (f2fs_has_inline_dentry(dir))
854 return f2fs_delete_inline_entry(dentry, page, dir, inode);
857 f2fs_wait_on_page_writeback(page, DATA, true, true);
859 dentry_blk = page_address(page);
860 bit_pos = dentry - dentry_blk->dentry;
861 for (i = 0; i < slots; i++)
862 __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
864 /* Let's check and deallocate this dentry page */
865 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
868 set_page_dirty(page);
870 if (bit_pos == NR_DENTRY_IN_BLOCK &&
871 !f2fs_truncate_hole(dir, index, index + 1)) {
872 f2fs_clear_page_cache_dirty_tag(page_folio(page));
873 clear_page_dirty_for_io(page);
874 ClearPageUptodate(page);
875 clear_page_private_all(page);
877 inode_dec_dirty_pages(dir);
878 f2fs_remove_dirty_inode(dir);
880 f2fs_put_page(page, 1);
882 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
883 f2fs_mark_inode_dirty_sync(dir, false);
886 f2fs_drop_nlink(dir, inode);
889 bool f2fs_empty_dir(struct inode *dir)
891 unsigned long bidx = 0;
892 struct page *dentry_page;
893 unsigned int bit_pos;
894 struct f2fs_dentry_block *dentry_blk;
895 unsigned long nblock = dir_blocks(dir);
897 if (f2fs_has_inline_dentry(dir))
898 return f2fs_empty_inline_dir(dir);
900 while (bidx < nblock) {
903 dentry_page = f2fs_find_data_page(dir, bidx, &next_pgofs);
904 if (IS_ERR(dentry_page)) {
905 if (PTR_ERR(dentry_page) == -ENOENT) {
913 dentry_blk = page_address(dentry_page);
918 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
922 f2fs_put_page(dentry_page, 0);
924 if (bit_pos < NR_DENTRY_IN_BLOCK)
932 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
933 unsigned int start_pos, struct fscrypt_str *fstr)
935 unsigned char d_type = DT_UNKNOWN;
936 unsigned int bit_pos;
937 struct f2fs_dir_entry *de = NULL;
938 struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
939 struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
940 struct blk_plug plug;
941 bool readdir_ra = sbi->readdir_ra;
942 bool found_valid_dirent = false;
945 bit_pos = ((unsigned long)ctx->pos % d->max);
948 blk_start_plug(&plug);
950 while (bit_pos < d->max) {
951 bit_pos = find_next_bit_le(d->bitmap, d->max, bit_pos);
952 if (bit_pos >= d->max)
955 de = &d->dentry[bit_pos];
956 if (de->name_len == 0) {
957 if (found_valid_dirent || !bit_pos) {
958 f2fs_warn_ratelimited(sbi,
959 "invalid namelen(0), ino:%u, run fsck to fix.",
960 le32_to_cpu(de->ino));
961 set_sbi_flag(sbi, SBI_NEED_FSCK);
964 ctx->pos = start_pos + bit_pos;
968 d_type = fs_ftype_to_dtype(de->file_type);
970 de_name.name = d->filename[bit_pos];
971 de_name.len = le16_to_cpu(de->name_len);
973 /* check memory boundary before moving forward */
974 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
975 if (unlikely(bit_pos > d->max ||
976 le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) {
977 f2fs_warn(sbi, "%s: corrupted namelen=%d, run fsck to fix.",
978 __func__, le16_to_cpu(de->name_len));
979 set_sbi_flag(sbi, SBI_NEED_FSCK);
981 f2fs_handle_error(sbi, ERROR_CORRUPTED_DIRENT);
985 if (IS_ENCRYPTED(d->inode)) {
986 int save_len = fstr->len;
988 err = fscrypt_fname_disk_to_usr(d->inode,
989 (u32)le32_to_cpu(de->hash_code),
995 fstr->len = save_len;
998 if (!dir_emit(ctx, de_name.name, de_name.len,
999 le32_to_cpu(de->ino), d_type)) {
1005 f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
1007 ctx->pos = start_pos + bit_pos;
1008 found_valid_dirent = true;
1012 blk_finish_plug(&plug);
1016 static int f2fs_readdir(struct file *file, struct dir_context *ctx)
1018 struct inode *inode = file_inode(file);
1019 unsigned long npages = dir_blocks(inode);
1020 struct f2fs_dentry_block *dentry_blk = NULL;
1021 struct page *dentry_page = NULL;
1022 struct file_ra_state *ra = &file->f_ra;
1023 loff_t start_pos = ctx->pos;
1024 unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
1025 struct f2fs_dentry_ptr d;
1026 struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
1029 if (IS_ENCRYPTED(inode)) {
1030 err = fscrypt_prepare_readdir(inode);
1034 err = fscrypt_fname_alloc_buffer(F2FS_NAME_LEN, &fstr);
1039 if (f2fs_has_inline_dentry(inode)) {
1040 err = f2fs_read_inline_dir(file, ctx, &fstr);
1044 for (; n < npages; ctx->pos = n * NR_DENTRY_IN_BLOCK) {
1047 /* allow readdir() to be interrupted */
1048 if (fatal_signal_pending(current)) {
1054 /* readahead for multi pages of dir */
1055 if (npages - n > 1 && !ra_has_index(ra, n))
1056 page_cache_sync_readahead(inode->i_mapping, ra, file, n,
1057 min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
1059 dentry_page = f2fs_find_data_page(inode, n, &next_pgofs);
1060 if (IS_ERR(dentry_page)) {
1061 err = PTR_ERR(dentry_page);
1062 if (err == -ENOENT) {
1071 dentry_blk = page_address(dentry_page);
1073 make_dentry_ptr_block(inode, &d, dentry_blk);
1075 err = f2fs_fill_dentries(ctx, &d,
1076 n * NR_DENTRY_IN_BLOCK, &fstr);
1078 f2fs_put_page(dentry_page, 0);
1082 f2fs_put_page(dentry_page, 0);
1087 fscrypt_fname_free_buffer(&fstr);
1089 trace_f2fs_readdir(inode, start_pos, ctx->pos, err);
1090 return err < 0 ? err : 0;
1093 const struct file_operations f2fs_dir_operations = {
1094 .llseek = generic_file_llseek,
1095 .read = generic_read_dir,
1096 .iterate_shared = f2fs_readdir,
1097 .fsync = f2fs_sync_file,
1098 .unlocked_ioctl = f2fs_ioctl,
1099 #ifdef CONFIG_COMPAT
1100 .compat_ioctl = f2fs_compat_ioctl,