1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/buffer_head.h>
10 #include <linux/mpage.h>
11 #include <linux/namei.h>
12 #include <linux/nls.h>
13 #include <linux/uio.h>
14 #include <linux/writeback.h>
21 * ntfs_read_mft - Read record and parse MFT.
23 static struct inode *ntfs_read_mft(struct inode *inode,
24 const struct cpu_str *name,
25 const struct MFT_REF *ref)
28 struct ntfs_inode *ni = ntfs_i(inode);
29 struct super_block *sb = inode->i_sb;
30 struct ntfs_sb_info *sbi = sb->s_fs_info;
32 struct ATTR_STD_INFO5 *std5 = NULL;
33 struct ATTR_LIST_ENTRY *le;
35 bool is_match = false;
38 unsigned long ino = inode->i_ino;
39 u32 rp_fa = 0, asize, t32;
40 u16 roff, rsize, names = 0, links = 0;
41 const struct ATTR_FILE_NAME *fname = NULL;
42 const struct INDEX_ROOT *root;
43 struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
46 struct runs_tree *run;
50 /* Setup 'uid' and 'gid' */
51 inode->i_uid = sbi->options->fs_uid;
52 inode->i_gid = sbi->options->fs_gid;
54 err = mi_init(&ni->mi, sbi, ino);
58 if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
59 t64 = sbi->mft.lbo >> sbi->cluster_bits;
60 t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
62 init_rwsem(&ni->file.run_lock);
64 if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
70 err = mi_read(&ni->mi, ino == MFT_REC_MFT);
77 if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
79 } else if (ref->seq != rec->seq) {
81 ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
82 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
84 } else if (!is_rec_inuse(rec)) {
86 ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
90 if (le32_to_cpu(rec->total) != sbi->record_size) {
96 if (!is_rec_base(rec)) {
101 /* Record should contain $I30 root. */
102 is_dir = rec->flags & RECORD_FLAG_DIR;
104 /* MFT_REC_MFT is not a dir */
105 if (is_dir && ino == MFT_REC_MFT) {
110 inode->i_generation = le16_to_cpu(rec->seq);
112 /* Enumerate all struct Attributes MFT. */
117 * To reduce tab pressure use goto instead of
118 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
123 attr = ni_enum_attr_ex(ni, attr, &le, NULL);
128 /* This is non primary attribute segment. Ignore if not MFT. */
129 if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
133 asize = le32_to_cpu(attr->size);
134 goto attr_unpack_run;
137 roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
138 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
139 asize = le32_to_cpu(attr->size);
142 * Really this check was done in 'ni_enum_attr_ex' -> ... 'mi_enum_attr'.
143 * There not critical to check this case again
145 if (attr->name_len &&
146 sizeof(short) * attr->name_len + le16_to_cpu(attr->name_off) >
151 t64 = le64_to_cpu(attr->nres.alloc_size);
152 if (le64_to_cpu(attr->nres.data_size) > t64 ||
153 le64_to_cpu(attr->nres.valid_size) > t64)
157 switch (attr->type) {
160 asize < sizeof(struct ATTR_STD_INFO) + roff ||
161 rsize < sizeof(struct ATTR_STD_INFO))
167 std5 = Add2Ptr(attr, roff);
170 nt2kernel(std5->cr_time, &ni->i_crtime);
172 nt2kernel(std5->a_time, &ts);
173 inode_set_atime_to_ts(inode, ts);
174 nt2kernel(std5->c_time, &ts);
175 inode_set_ctime_to_ts(inode, ts);
176 nt2kernel(std5->m_time, &ts);
177 inode_set_mtime_to_ts(inode, ts);
179 ni->std_fa = std5->fa;
181 if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
182 rsize >= sizeof(struct ATTR_STD_INFO5))
183 ni->std_security_id = std5->security_id;
187 if (attr->name_len || le || ino == MFT_REC_LOG)
190 err = ntfs_load_attr_list(ni, attr);
199 if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
200 rsize < SIZEOF_ATTRIBUTE_FILENAME)
204 fname = Add2Ptr(attr, roff);
205 if (fname->type == FILE_NAME_DOS)
209 if (name && name->len == fname->name_len &&
210 !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
218 /* Ignore data attribute in dir record. */
222 if (ino == MFT_REC_BADCLUST && !attr->non_res)
225 if (attr->name_len &&
226 ((ino != MFT_REC_BADCLUST || !attr->non_res ||
227 attr->name_len != ARRAY_SIZE(BAD_NAME) ||
228 memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
229 (ino != MFT_REC_SECURE || !attr->non_res ||
230 attr->name_len != ARRAY_SIZE(SDS_NAME) ||
231 memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
232 /* File contains stream attribute. Ignore it. */
236 if (is_attr_sparsed(attr))
237 ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
239 ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
241 if (is_attr_compressed(attr))
242 ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
244 ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
246 if (is_attr_encrypted(attr))
247 ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
249 ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
251 if (!attr->non_res) {
252 ni->i_valid = inode->i_size = rsize;
253 inode_set_bytes(inode, rsize);
256 mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
258 if (!attr->non_res) {
259 ni->ni_flags |= NI_FLAG_RESIDENT;
263 inode_set_bytes(inode, attr_ondisk_size(attr));
265 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
266 inode->i_size = le64_to_cpu(attr->nres.data_size);
267 if (!attr->nres.alloc_size)
270 run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run :
278 root = Add2Ptr(attr, roff);
280 if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
281 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
284 if (root->type != ATTR_NAME ||
285 root->rule != NTFS_COLLATION_TYPE_FILENAME)
292 ni->ni_flags |= NI_FLAG_DIR;
294 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
299 (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) :
304 if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
305 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
308 inode->i_size = le64_to_cpu(attr->nres.data_size);
309 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
310 inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
312 run = &ni->dir.alloc_run;
316 if (ino == MFT_REC_MFT) {
319 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
320 /* 0x20000000 = 2^32 / 8 */
321 if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
324 run = &sbi->mft.bitmap.run;
326 } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
327 !memcmp(attr_name(attr), I30_NAME,
330 run = &ni->dir.bitmap_run;
339 rp_fa = ni_parse_reparse(ni, attr, &rp);
344 * Assume one unicode symbol == one utf8.
346 inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
349 ni->i_valid = inode->i_size;
350 /* Clear directory bit. */
351 if (ni->ni_flags & NI_FLAG_DIR) {
352 indx_clear(&ni->dir);
353 memset(&ni->dir, 0, sizeof(ni->dir));
354 ni->ni_flags &= ~NI_FLAG_DIR;
356 run_close(&ni->file.run);
358 mode = S_IFLNK | 0777;
362 goto attr_unpack_run; // Double break.
366 case REPARSE_COMPRESSED:
369 case REPARSE_DEDUPLICATED:
375 if (!attr->name_len &&
376 resident_data_ex(attr, sizeof(struct EA_INFO))) {
377 ni->ni_flags |= NI_FLAG_EA;
379 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
381 inode->i_mode = mode;
382 ntfs_get_wsl_perm(inode);
383 mode = inode->i_mode;
392 roff = le16_to_cpu(attr->nres.run_off);
399 t64 = le64_to_cpu(attr->nres.svcn);
401 err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
402 t64, Add2Ptr(attr, roff), asize - roff);
413 if (!is_match && name) {
418 if (std5->fa & FILE_ATTRIBUTE_READONLY)
426 if (names != le16_to_cpu(rec->hard_links)) {
427 /* Correct minor error on the fly. Do not mark inode as dirty. */
428 ntfs_inode_warn(inode, "Correct links count -> %u.", names);
429 rec->hard_links = cpu_to_le16(names);
433 set_nlink(inode, links);
436 ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
439 * Dot and dot-dot should be included in count but was not
440 * included in enumeration.
441 * Usually a hard links to directories are disabled.
443 inode->i_op = &ntfs_dir_inode_operations;
444 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
445 &ntfs_legacy_dir_operations :
446 &ntfs_dir_operations;
448 } else if (S_ISLNK(mode)) {
449 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
450 inode->i_op = &ntfs_link_inode_operations;
452 inode_nohighmem(inode);
453 } else if (S_ISREG(mode)) {
454 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
455 inode->i_op = &ntfs_file_inode_operations;
456 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
457 &ntfs_legacy_file_operations :
458 &ntfs_file_operations;
459 inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
461 if (ino != MFT_REC_MFT)
462 init_rwsem(&ni->file.run_lock);
463 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
465 inode->i_op = &ntfs_special_inode_operations;
466 init_special_inode(inode, mode, inode->i_rdev);
467 } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
468 fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
469 /* Records in $Extend are not a files or general directories. */
470 inode->i_op = &ntfs_file_inode_operations;
476 if ((sbi->options->sys_immutable &&
477 (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
478 !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
479 inode->i_flags |= S_IMMUTABLE;
481 inode->i_flags &= ~S_IMMUTABLE;
484 inode->i_mode = mode;
485 if (!(ni->ni_flags & NI_FLAG_EA)) {
486 /* If no xattr then no security (stored in xattr). */
487 inode->i_flags |= S_NOSEC;
490 if (ino == MFT_REC_MFT && !sb->s_root)
493 unlock_new_inode(inode);
498 if (ino == MFT_REC_MFT && !sb->s_root)
508 * Return: 1 if match.
510 static int ntfs_test_inode(struct inode *inode, void *data)
512 struct MFT_REF *ref = data;
514 return ino_get(ref) == inode->i_ino;
517 static int ntfs_set_inode(struct inode *inode, void *data)
519 const struct MFT_REF *ref = data;
521 inode->i_ino = ino_get(ref);
525 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
526 const struct cpu_str *name)
530 inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
532 if (unlikely(!inode))
533 return ERR_PTR(-ENOMEM);
535 /* If this is a freshly allocated inode, need to read it now. */
536 if (inode->i_state & I_NEW)
537 inode = ntfs_read_mft(inode, name, ref);
538 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
539 /* Inode overlaps? */
540 _ntfs_bad_inode(inode);
543 if (IS_ERR(inode) && name)
544 ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR);
550 GET_BLOCK_GENERAL = 0,
551 GET_BLOCK_WRITE_BEGIN = 1,
552 GET_BLOCK_DIRECT_IO_R = 2,
553 GET_BLOCK_DIRECT_IO_W = 3,
557 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
558 struct buffer_head *bh, int create,
559 enum get_block_ctx ctx)
561 struct super_block *sb = inode->i_sb;
562 struct ntfs_sb_info *sbi = sb->s_fs_info;
563 struct ntfs_inode *ni = ntfs_i(inode);
564 struct folio *folio = bh->b_folio;
565 u8 cluster_bits = sbi->cluster_bits;
566 u32 block_size = sb->s_blocksize;
567 u64 bytes, lbo, valid;
573 /* Clear previous state. */
574 clear_buffer_new(bh);
575 clear_buffer_uptodate(bh);
577 if (is_resident(ni)) {
578 bh->b_blocknr = RESIDENT_LCN;
579 bh->b_size = block_size;
581 /* direct io (read) or bmap call */
585 err = attr_data_read_resident(ni, folio);
589 set_buffer_uptodate(bh);
594 vcn = vbo >> cluster_bits;
595 off = vbo & sbi->cluster_mask;
598 err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL,
599 create && sbi->cluster_size > PAGE_SIZE);
606 bytes = ((u64)len << cluster_bits) - off;
608 if (lcn == SPARSE_LCN) {
610 if (bh->b_size > bytes)
620 lbo = ((u64)lcn << cluster_bits) + off;
622 set_buffer_mapped(bh);
623 bh->b_bdev = sb->s_bdev;
624 bh->b_blocknr = lbo >> sb->s_blocksize_bits;
628 if (ctx == GET_BLOCK_DIRECT_IO_W) {
629 /* ntfs_direct_IO will update ni->i_valid. */
634 if (bytes > bh->b_size)
640 if (vbo + bytes > valid) {
641 ni->i_valid = vbo + bytes;
642 mark_inode_dirty(inode);
644 } else if (vbo >= valid) {
645 /* Read out of valid data. */
646 clear_buffer_mapped(bh);
647 } else if (vbo + bytes <= valid) {
649 } else if (vbo + block_size <= valid) {
650 /* Normal short read. */
654 * Read across valid size: vbo < valid && valid < vbo + block_size
659 u32 voff = valid - vbo;
661 bh->b_size = block_size;
662 off = vbo & (PAGE_SIZE - 1);
663 folio_set_bh(bh, folio, off);
665 if (bh_read(bh, 0) < 0) {
669 folio_zero_segment(folio, off + voff, off + block_size);
673 if (bh->b_size > bytes)
677 if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
678 static_assert(sizeof(size_t) < sizeof(loff_t));
679 if (bytes > 0x40000000u)
680 bh->b_size = 0x40000000u;
690 int ntfs_get_block(struct inode *inode, sector_t vbn,
691 struct buffer_head *bh_result, int create)
693 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
694 bh_result, create, GET_BLOCK_GENERAL);
697 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
698 struct buffer_head *bh_result, int create)
700 return ntfs_get_block_vbo(inode,
701 (u64)vsn << inode->i_sb->s_blocksize_bits,
702 bh_result, create, GET_BLOCK_BMAP);
705 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
707 return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
710 static int ntfs_read_folio(struct file *file, struct folio *folio)
713 struct address_space *mapping = folio->mapping;
714 struct inode *inode = mapping->host;
715 struct ntfs_inode *ni = ntfs_i(inode);
717 if (is_resident(ni)) {
719 err = attr_data_read_resident(ni, folio);
721 if (err != E_NTFS_NONRESIDENT) {
727 if (is_compressed(ni)) {
729 err = ni_readpage_cmpr(ni, folio);
734 /* Normal + sparse files. */
735 return mpage_read_folio(folio, ntfs_get_block);
738 static void ntfs_readahead(struct readahead_control *rac)
740 struct address_space *mapping = rac->mapping;
741 struct inode *inode = mapping->host;
742 struct ntfs_inode *ni = ntfs_i(inode);
746 if (is_resident(ni)) {
747 /* No readahead for resident. */
751 if (is_compressed(ni)) {
752 /* No readahead for compressed. */
757 pos = readahead_pos(rac);
759 if (valid < i_size_read(inode) && pos <= valid &&
760 valid < pos + readahead_length(rac)) {
761 /* Range cross 'valid'. Read it page by page. */
765 mpage_readahead(rac, ntfs_get_block);
768 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
769 struct buffer_head *bh_result, int create)
771 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
772 bh_result, create, GET_BLOCK_DIRECT_IO_R);
775 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
776 struct buffer_head *bh_result, int create)
778 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
779 bh_result, create, GET_BLOCK_DIRECT_IO_W);
782 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
784 struct file *file = iocb->ki_filp;
785 struct address_space *mapping = file->f_mapping;
786 struct inode *inode = mapping->host;
787 struct ntfs_inode *ni = ntfs_i(inode);
788 loff_t vbo = iocb->ki_pos;
790 int wr = iov_iter_rw(iter) & WRITE;
791 size_t iter_count = iov_iter_count(iter);
795 if (is_resident(ni)) {
796 /* Switch to buffered write. */
801 ret = blockdev_direct_IO(iocb, inode, iter,
802 wr ? ntfs_get_block_direct_IO_W :
803 ntfs_get_block_direct_IO_R);
807 else if (wr && ret == -EIOCBQUEUED)
808 end = vbo + iter_count;
814 if (end > valid && !S_ISBLK(inode->i_mode)) {
816 mark_inode_dirty(inode);
818 } else if (vbo < valid && valid < end) {
820 iov_iter_revert(iter, end - valid);
821 iov_iter_zero(end - valid, iter);
828 int ntfs_set_size(struct inode *inode, u64 new_size)
830 struct super_block *sb = inode->i_sb;
831 struct ntfs_sb_info *sbi = sb->s_fs_info;
832 struct ntfs_inode *ni = ntfs_i(inode);
835 /* Check for maximum file size. */
836 if (is_sparsed(ni) || is_compressed(ni)) {
837 if (new_size > sbi->maxbytes_sparse) {
841 } else if (new_size > sbi->maxbytes) {
847 down_write(&ni->file.run_lock);
849 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
850 &ni->i_valid, true, NULL);
852 up_write(&ni->file.run_lock);
855 mark_inode_dirty(inode);
861 static int ntfs_resident_writepage(struct folio *folio,
862 struct writeback_control *wbc, void *data)
864 struct address_space *mapping = data;
865 struct inode *inode = mapping->host;
866 struct ntfs_inode *ni = ntfs_i(inode);
869 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
873 ret = attr_data_write_resident(ni, folio);
876 if (ret != E_NTFS_NONRESIDENT)
878 mapping_set_error(mapping, ret);
882 static int ntfs_writepages(struct address_space *mapping,
883 struct writeback_control *wbc)
885 struct inode *inode = mapping->host;
887 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
890 if (is_resident(ntfs_i(inode)))
891 return write_cache_pages(mapping, wbc, ntfs_resident_writepage,
893 return mpage_writepages(mapping, wbc, ntfs_get_block);
896 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
897 struct buffer_head *bh_result, int create)
899 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
900 bh_result, create, GET_BLOCK_WRITE_BEGIN);
903 int ntfs_write_begin(struct file *file, struct address_space *mapping,
904 loff_t pos, u32 len, struct page **pagep, void **fsdata)
907 struct inode *inode = mapping->host;
908 struct ntfs_inode *ni = ntfs_i(inode);
910 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
914 if (is_resident(ni)) {
915 struct folio *folio = __filemap_get_folio(
916 mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN,
917 mapping_gfp_mask(mapping));
920 err = PTR_ERR(folio);
925 err = attr_data_read_resident(ni, folio);
929 *pagep = &folio->page;
935 if (err != E_NTFS_NONRESIDENT)
939 err = block_write_begin(mapping, pos, len, pagep,
940 ntfs_get_block_write_begin);
947 * ntfs_write_end - Address_space_operations::write_end.
949 int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
950 u32 len, u32 copied, struct page *page, void *fsdata)
952 struct folio *folio = page_folio(page);
953 struct inode *inode = mapping->host;
954 struct ntfs_inode *ni = ntfs_i(inode);
955 u64 valid = ni->i_valid;
959 if (is_resident(ni)) {
961 err = attr_data_write_resident(ni, folio);
964 struct buffer_head *head = folio_buffers(folio);
966 /* Clear any buffers in folio. */
968 struct buffer_head *bh = head;
971 clear_buffer_dirty(bh);
972 clear_buffer_mapped(bh);
973 set_buffer_uptodate(bh);
974 } while (head != (bh = bh->b_this_page));
976 folio_mark_uptodate(folio);
982 err = generic_write_end(file, mapping, pos, len, copied, page,
987 if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
988 inode_set_mtime_to_ts(inode,
989 inode_set_ctime_current(inode));
990 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
994 if (valid != ni->i_valid) {
995 /* ni->i_valid is changed in ntfs_get_block_vbo. */
999 if (pos + err > inode->i_size) {
1000 i_size_write(inode, pos + err);
1005 mark_inode_dirty(inode);
1011 int reset_log_file(struct inode *inode)
1015 u32 log_size = inode->i_size;
1016 struct address_space *mapping = inode->i_mapping;
1023 len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
1025 err = block_write_begin(mapping, pos, len, &page,
1026 ntfs_get_block_write_begin);
1030 kaddr = kmap_atomic(page);
1031 memset(kaddr, -1, len);
1032 kunmap_atomic(kaddr);
1033 flush_dcache_page(page);
1035 err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
1040 if (pos >= log_size)
1042 balance_dirty_pages_ratelimited(mapping);
1045 mark_inode_dirty_sync(inode);
1050 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1052 return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1055 int ntfs_sync_inode(struct inode *inode)
1057 return _ni_write_inode(inode, 1);
1061 * writeback_inode - Helper function for ntfs_flush_inodes().
1063 * This writes both the inode and the file data blocks, waiting
1064 * for in flight data blocks before the start of the call. It
1065 * does not wait for any io started during the call.
1067 static int writeback_inode(struct inode *inode)
1069 int ret = sync_inode_metadata(inode, 0);
1072 ret = filemap_fdatawrite(inode->i_mapping);
1079 * Write data and metadata corresponding to i1 and i2. The io is
1080 * started but we do not wait for any of it to finish.
1082 * filemap_flush() is used for the block device, so if there is a dirty
1083 * page for a block already in flight, we will not wait and start the
1086 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
1092 ret = writeback_inode(i1);
1094 ret = writeback_inode(i2);
1096 ret = filemap_flush(sb->s_bdev_file->f_mapping);
1101 * Helper function to read file.
1103 int inode_read_data(struct inode *inode, void *data, size_t bytes)
1106 struct address_space *mapping = inode->i_mapping;
1108 for (idx = 0; bytes; idx++) {
1109 size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1110 struct page *page = read_mapping_page(mapping, idx, NULL);
1114 return PTR_ERR(page);
1116 kaddr = kmap_atomic(page);
1117 memcpy(data, kaddr, op);
1118 kunmap_atomic(kaddr);
1123 data = Add2Ptr(data, PAGE_SIZE);
1129 * ntfs_reparse_bytes
1131 * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1132 * for unicode string of @uni_len length.
1134 static inline u32 ntfs_reparse_bytes(u32 uni_len)
1136 /* Header + unicode string + decorated unicode string. */
1137 return sizeof(short) * (2 * uni_len + 4) +
1138 offsetof(struct REPARSE_DATA_BUFFER,
1139 SymbolicLinkReparseBuffer.PathBuffer);
1142 static struct REPARSE_DATA_BUFFER *
1143 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1144 u32 size, u16 *nsize)
1147 struct REPARSE_DATA_BUFFER *rp;
1149 typeof(rp->SymbolicLinkReparseBuffer) *rs;
1151 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
1153 return ERR_PTR(-ENOMEM);
1155 rs = &rp->SymbolicLinkReparseBuffer;
1156 rp_name = rs->PathBuffer;
1158 /* Convert link name to UTF-16. */
1159 err = ntfs_nls_to_utf16(sbi, symname, size,
1160 (struct cpu_str *)(rp_name - 1), 2 * size,
1161 UTF16_LITTLE_ENDIAN);
1165 /* err = the length of unicode name of symlink. */
1166 *nsize = ntfs_reparse_bytes(err);
1168 if (*nsize > sbi->reparse.max_size) {
1173 /* Translate Linux '/' into Windows '\'. */
1174 for (i = 0; i < err; i++) {
1175 if (rp_name[i] == cpu_to_le16('/'))
1176 rp_name[i] = cpu_to_le16('\\');
1179 rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1180 rp->ReparseDataLength =
1181 cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1182 SymbolicLinkReparseBuffer));
1184 /* PrintName + SubstituteName. */
1185 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1186 rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
1187 rs->PrintNameLength = rs->SubstituteNameOffset;
1190 * TODO: Use relative path if possible to allow Windows to
1192 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
1196 memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
1198 /* Decorate SubstituteName. */
1200 rp_name[0] = cpu_to_le16('\\');
1201 rp_name[1] = cpu_to_le16('?');
1202 rp_name[2] = cpu_to_le16('?');
1203 rp_name[3] = cpu_to_le16('\\');
1208 return ERR_PTR(err);
1214 * Helper function for:
1219 * - ntfs_atomic_open
1221 * NOTE: if fnd != NULL (ntfs_atomic_open) then @dir is locked
1223 int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
1224 struct dentry *dentry, const struct cpu_str *uni,
1225 umode_t mode, dev_t dev, const char *symname, u32 size,
1226 struct ntfs_fnd *fnd)
1229 struct super_block *sb = dir->i_sb;
1230 struct ntfs_sb_info *sbi = sb->s_fs_info;
1231 const struct qstr *name = &dentry->d_name;
1233 struct ntfs_inode *dir_ni = ntfs_i(dir);
1234 struct ntfs_inode *ni = NULL;
1235 struct inode *inode = NULL;
1236 struct ATTRIB *attr;
1237 struct ATTR_STD_INFO5 *std5;
1238 struct ATTR_FILE_NAME *fname;
1239 struct MFT_REC *rec;
1240 u32 asize, dsize, sd_size;
1241 enum FILE_ATTRIBUTE fa;
1242 __le32 security_id = SECURITY_ID_INVALID;
1245 u16 t16, nsize = 0, aid = 0;
1246 struct INDEX_ROOT *root, *dir_root;
1247 struct NTFS_DE *e, *new_de = NULL;
1248 struct REPARSE_DATA_BUFFER *rp = NULL;
1249 bool rp_inserted = false;
1251 /* New file will be resident or non resident. */
1252 const bool new_file_resident = 1;
1255 ni_lock_dir(dir_ni);
1257 dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1263 if (S_ISDIR(mode)) {
1264 /* Use parent's directory attributes. */
1265 fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1266 FILE_ATTRIBUTE_ARCHIVE;
1268 * By default child directory inherits parent attributes.
1269 * Root directory is hidden + system.
1270 * Make an exception for children in root.
1272 if (dir->i_ino == MFT_REC_ROOT)
1273 fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1274 } else if (S_ISLNK(mode)) {
1275 /* It is good idea that link should be the same type (file/dir) as target */
1276 fa = FILE_ATTRIBUTE_REPARSE_POINT;
1279 * Linux: there are dir/file/symlink and so on.
1280 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1281 * It is good idea to create:
1282 * dir + reparse if 'symname' points to directory
1284 * file + reparse if 'symname' points to file
1285 * Unfortunately kern_path hangs if symname contains 'dir'.
1291 * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1292 * struct inode *target = d_inode(path.dentry);
1294 * if (S_ISDIR(target->i_mode))
1295 * fa |= FILE_ATTRIBUTE_DIRECTORY;
1296 * // if ( target->i_sb == sb ){
1297 * // use relative path?
1302 } else if (S_ISREG(mode)) {
1303 if (sbi->options->sparse) {
1304 /* Sparsed regular file, cause option 'sparse'. */
1305 fa = FILE_ATTRIBUTE_SPARSE_FILE |
1306 FILE_ATTRIBUTE_ARCHIVE;
1307 } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1308 /* Compressed regular file, if parent is compressed. */
1309 fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1311 /* Regular file, default attributes. */
1312 fa = FILE_ATTRIBUTE_ARCHIVE;
1315 fa = FILE_ATTRIBUTE_ARCHIVE;
1318 /* If option "hide_dot_files" then set hidden attribute for dot files. */
1319 if (sbi->options->hide_dot_files && name->name[0] == '.')
1320 fa |= FILE_ATTRIBUTE_HIDDEN;
1323 fa |= FILE_ATTRIBUTE_READONLY;
1325 /* Allocate PATH_MAX bytes. */
1326 new_de = __getname();
1332 if (unlikely(ntfs3_forced_shutdown(sb))) {
1337 /* Mark rw ntfs as dirty. it will be cleared at umount. */
1338 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1340 /* Step 1: allocate and fill new mft record. */
1341 err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1345 ni = ntfs_new_inode(sbi, ino, S_ISDIR(mode) ? RECORD_FLAG_DIR : 0);
1351 inode = &ni->vfs_inode;
1352 inode_init_owner(idmap, inode, dir, mode);
1353 mode = inode->i_mode;
1355 ni->i_crtime = current_time(inode);
1358 rec->hard_links = cpu_to_le16(1);
1359 attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1361 /* Get default security id. */
1362 sd = s_default_security;
1363 sd_size = sizeof(s_default_security);
1365 if (is_ntfs3(sbi)) {
1366 security_id = dir_ni->std_security_id;
1367 if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1368 security_id = sbi->security.def_security_id;
1370 if (security_id == SECURITY_ID_INVALID &&
1371 !ntfs_insert_security(sbi, sd, sd_size,
1372 &security_id, NULL))
1373 sbi->security.def_security_id = security_id;
1377 /* Insert standard info. */
1378 std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1380 if (security_id == SECURITY_ID_INVALID) {
1381 dsize = sizeof(struct ATTR_STD_INFO);
1383 dsize = sizeof(struct ATTR_STD_INFO5);
1384 std5->security_id = security_id;
1385 ni->std_security_id = security_id;
1387 asize = SIZEOF_RESIDENT + dsize;
1389 attr->type = ATTR_STD;
1390 attr->size = cpu_to_le32(asize);
1391 attr->id = cpu_to_le16(aid++);
1392 attr->res.data_off = SIZEOF_RESIDENT_LE;
1393 attr->res.data_size = cpu_to_le32(dsize);
1395 std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1396 kernel2nt(&ni->i_crtime);
1398 std5->fa = ni->std_fa = fa;
1400 attr = Add2Ptr(attr, asize);
1402 /* Insert file name. */
1403 err = fill_name_de(sbi, new_de, name, uni);
1407 mi_get_ref(&ni->mi, &new_de->ref);
1409 fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1411 if (sbi->options->windows_names &&
1412 !valid_windows_name(sbi, (struct le_str *)&fname->name_len)) {
1417 mi_get_ref(&dir_ni->mi, &fname->home);
1418 fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1419 fname->dup.a_time = std5->cr_time;
1420 fname->dup.alloc_size = fname->dup.data_size = 0;
1421 fname->dup.fa = std5->fa;
1422 fname->dup.ea_size = fname->dup.reparse = 0;
1424 dsize = le16_to_cpu(new_de->key_size);
1425 asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1427 attr->type = ATTR_NAME;
1428 attr->size = cpu_to_le32(asize);
1429 attr->res.data_off = SIZEOF_RESIDENT_LE;
1430 attr->res.flags = RESIDENT_FLAG_INDEXED;
1431 attr->id = cpu_to_le16(aid++);
1432 attr->res.data_size = cpu_to_le32(dsize);
1433 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1435 attr = Add2Ptr(attr, asize);
1437 if (security_id == SECURITY_ID_INVALID) {
1438 /* Insert security attribute. */
1439 asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1441 attr->type = ATTR_SECURE;
1442 attr->size = cpu_to_le32(asize);
1443 attr->id = cpu_to_le16(aid++);
1444 attr->res.data_off = SIZEOF_RESIDENT_LE;
1445 attr->res.data_size = cpu_to_le32(sd_size);
1446 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1448 attr = Add2Ptr(attr, asize);
1451 attr->id = cpu_to_le16(aid++);
1452 if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1454 * Regular directory or symlink to directory.
1455 * Create root attribute.
1457 dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1458 asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1460 attr->type = ATTR_ROOT;
1461 attr->size = cpu_to_le32(asize);
1463 attr->name_len = ARRAY_SIZE(I30_NAME);
1464 attr->name_off = SIZEOF_RESIDENT_LE;
1465 attr->res.data_off =
1466 cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1467 attr->res.data_size = cpu_to_le32(dsize);
1468 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1471 root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1472 memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1473 root->ihdr.de_off = cpu_to_le32(sizeof(struct INDEX_HDR));
1474 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1475 sizeof(struct NTFS_DE));
1476 root->ihdr.total = root->ihdr.used;
1478 e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1479 e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1480 e->flags = NTFS_IE_LAST;
1481 } else if (S_ISLNK(mode)) {
1484 * Create empty resident data attribute.
1486 asize = SIZEOF_RESIDENT;
1488 /* Insert empty ATTR_DATA */
1489 attr->type = ATTR_DATA;
1490 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1491 attr->name_off = SIZEOF_RESIDENT_LE;
1492 attr->res.data_off = SIZEOF_RESIDENT_LE;
1493 } else if (!new_file_resident && S_ISREG(mode)) {
1495 * Regular file. Create empty non resident data attribute.
1497 attr->type = ATTR_DATA;
1499 attr->nres.evcn = cpu_to_le64(-1ll);
1500 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1501 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1502 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1503 attr->flags = ATTR_FLAG_SPARSED;
1504 asize = SIZEOF_NONRESIDENT_EX + 8;
1505 } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1506 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1507 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1508 attr->flags = ATTR_FLAG_COMPRESSED;
1509 attr->nres.c_unit = NTFS_LZNT_CUNIT;
1510 asize = SIZEOF_NONRESIDENT_EX + 8;
1512 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1513 attr->name_off = SIZEOF_NONRESIDENT_LE;
1514 asize = SIZEOF_NONRESIDENT + 8;
1516 attr->nres.run_off = attr->name_off;
1519 * Node. Create empty resident data attribute.
1521 attr->type = ATTR_DATA;
1522 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1523 attr->name_off = SIZEOF_RESIDENT_LE;
1524 if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1525 attr->flags = ATTR_FLAG_SPARSED;
1526 else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1527 attr->flags = ATTR_FLAG_COMPRESSED;
1528 attr->res.data_off = SIZEOF_RESIDENT_LE;
1529 asize = SIZEOF_RESIDENT;
1530 ni->ni_flags |= NI_FLAG_RESIDENT;
1533 if (S_ISDIR(mode)) {
1534 ni->ni_flags |= NI_FLAG_DIR;
1535 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1538 } else if (S_ISLNK(mode)) {
1539 rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1548 * Insert ATTR_REPARSE.
1550 attr = Add2Ptr(attr, asize);
1551 attr->type = ATTR_REPARSE;
1552 attr->id = cpu_to_le16(aid++);
1554 /* Resident or non resident? */
1555 asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1556 t16 = PtrOffset(rec, attr);
1559 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
1560 * It is good idea to keep extended attributes resident.
1562 if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1564 CLST clst = bytes_to_cluster(sbi, nsize);
1566 /* Bytes per runs. */
1567 t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1570 attr->nres.evcn = cpu_to_le64(clst - 1);
1571 attr->name_off = SIZEOF_NONRESIDENT_LE;
1572 attr->nres.run_off = attr->name_off;
1573 attr->nres.data_size = cpu_to_le64(nsize);
1574 attr->nres.valid_size = attr->nres.data_size;
1575 attr->nres.alloc_size =
1576 cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1578 err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
1579 clst, NULL, ALLOCATE_DEF,
1580 &alen, 0, NULL, NULL);
1584 err = run_pack(&ni->file.run, 0, clst,
1585 Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1595 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1596 /* Write non resident data. */
1597 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp,
1602 attr->res.data_off = SIZEOF_RESIDENT_LE;
1603 attr->res.data_size = cpu_to_le32(nsize);
1604 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1606 /* Size of symlink equals the length of input string. */
1607 inode->i_size = size;
1609 attr->size = cpu_to_le32(asize);
1611 err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1619 attr = Add2Ptr(attr, asize);
1620 attr->type = ATTR_END;
1622 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1623 rec->next_attr_id = cpu_to_le16(aid);
1625 inode->i_generation = le16_to_cpu(rec->seq);
1627 if (S_ISDIR(mode)) {
1628 inode->i_op = &ntfs_dir_inode_operations;
1629 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
1630 &ntfs_legacy_dir_operations :
1631 &ntfs_dir_operations;
1632 } else if (S_ISLNK(mode)) {
1633 inode->i_op = &ntfs_link_inode_operations;
1634 inode->i_fop = NULL;
1635 inode->i_mapping->a_ops = &ntfs_aops;
1636 inode->i_size = size;
1637 inode_nohighmem(inode);
1638 } else if (S_ISREG(mode)) {
1639 inode->i_op = &ntfs_file_inode_operations;
1640 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
1641 &ntfs_legacy_file_operations :
1642 &ntfs_file_operations;
1643 inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
1645 init_rwsem(&ni->file.run_lock);
1647 inode->i_op = &ntfs_special_inode_operations;
1648 init_special_inode(inode, mode, dev);
1651 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1652 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1653 err = ntfs_init_acl(idmap, inode, dir);
1659 inode->i_flags |= S_NOSEC;
1663 * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute.
1664 * The packed size of extended attribute is stored in direntry too.
1665 * 'fname' here points to inside new_de.
1667 err = ntfs_save_wsl_perm(inode, &fname->dup.ea_size);
1672 * update ea_size in file_name attribute too.
1673 * Use ni_find_attr cause layout of MFT record may be changed
1674 * in ntfs_init_acl and ntfs_save_wsl_perm.
1676 attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL);
1678 struct ATTR_FILE_NAME *fn;
1680 fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
1682 fn->dup.ea_size = fname->dup.ea_size;
1685 /* We do not need to update parent directory later */
1686 ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
1688 /* Step 2: Add new name in index. */
1689 err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1694 * Call 'd_instantiate' after inode->i_op is set
1695 * but before finish_open.
1697 d_instantiate(dentry, inode);
1699 /* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */
1700 inode_set_atime_to_ts(inode, ni->i_crtime);
1701 inode_set_ctime_to_ts(inode, ni->i_crtime);
1702 inode_set_mtime_to_ts(inode, ni->i_crtime);
1703 inode_set_mtime_to_ts(dir, ni->i_crtime);
1704 inode_set_ctime_to_ts(dir, ni->i_crtime);
1706 mark_inode_dirty(dir);
1707 mark_inode_dirty(inode);
1713 attr = ni_find_attr(ni, NULL, NULL, ATTR_EA, NULL, 0, NULL, NULL);
1714 if (attr && attr->non_res) {
1715 /* Delete ATTR_EA, if non-resident. */
1716 attr_set_size(ni, ATTR_EA, NULL, 0, NULL, 0, NULL, false, NULL);
1720 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1724 run_deallocate(sbi, &ni->file.run, false);
1727 clear_rec_inuse(rec);
1729 ni->mi.dirty = false;
1730 discard_new_inode(inode);
1732 ntfs_mark_rec_free(sbi, ino, false);
1743 unlock_new_inode(inode);
1748 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1751 struct ntfs_inode *ni = ntfs_i(inode);
1752 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1755 /* Allocate PATH_MAX bytes. */
1760 /* Mark rw ntfs as dirty. It will be cleared at umount. */
1761 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1763 /* Construct 'de'. */
1764 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1768 err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1777 * inode_operations::unlink
1778 * inode_operations::rmdir
1780 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1783 struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1784 struct inode *inode = d_inode(dentry);
1785 struct ntfs_inode *ni = ntfs_i(inode);
1786 struct ntfs_inode *dir_ni = ntfs_i(dir);
1787 struct NTFS_DE *de, *de2 = NULL;
1790 if (ntfs_is_meta_file(sbi, ni->mi.rno))
1793 /* Allocate PATH_MAX bytes. */
1800 if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1805 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1810 err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1814 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1815 mark_inode_dirty(dir);
1816 inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
1818 mark_inode_dirty(inode);
1819 } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1820 _ntfs_bad_inode(inode);
1822 if (ni_is_dirty(dir))
1823 mark_inode_dirty(dir);
1824 if (ni_is_dirty(inode))
1825 mark_inode_dirty(inode);
1834 void ntfs_evict_inode(struct inode *inode)
1836 truncate_inode_pages_final(&inode->i_data);
1838 invalidate_inode_buffers(inode);
1841 ni_clear(ntfs_i(inode));
1845 * ntfs_translate_junction
1847 * Translate a Windows junction target to the Linux equivalent.
1848 * On junctions, targets are always absolute (they include the drive
1849 * letter). We have no way of knowing if the target is for the current
1850 * mounted device or not so we just assume it is.
1852 static int ntfs_translate_junction(const struct super_block *sb,
1853 const struct dentry *link_de, char *target,
1854 int target_len, int target_max)
1856 int tl_len, err = target_len;
1857 char *link_path_buffer = NULL, *link_path;
1858 char *translated = NULL;
1862 link_path_buffer = kmalloc(PATH_MAX, GFP_NOFS);
1863 if (!link_path_buffer) {
1867 /* Get link path, relative to mount point */
1868 link_path = dentry_path_raw(link_de, link_path_buffer, PATH_MAX);
1869 if (IS_ERR(link_path)) {
1870 ntfs_err(sb, "Error getting link path");
1875 translated = kmalloc(PATH_MAX, GFP_NOFS);
1881 /* Make translated path a relative path to mount point */
1882 strcpy(translated, "./");
1883 ++link_path; /* Skip leading / */
1884 for (tl_len = sizeof("./") - 1; *link_path; ++link_path) {
1885 if (*link_path == '/') {
1886 if (PATH_MAX - tl_len < sizeof("../")) {
1888 "Link path %s has too many components",
1893 strcpy(translated + tl_len, "../");
1894 tl_len += sizeof("../") - 1;
1898 /* Skip drive letter */
1899 target_start = target;
1900 while (*target_start && *target_start != ':')
1903 if (!*target_start) {
1904 ntfs_err(sb, "Link target (%s) missing drive separator",
1910 /* Skip drive separator and leading /, if exists */
1911 target_start += 1 + (target_start[1] == '/');
1912 copy_len = target_len - (target_start - target);
1914 if (PATH_MAX - tl_len <= copy_len) {
1915 ntfs_err(sb, "Link target %s too large for buffer (%d <= %d)",
1916 target_start, PATH_MAX - tl_len, copy_len);
1921 /* translated path has a trailing / and target_start does not */
1922 strcpy(translated + tl_len, target_start);
1924 if (target_max <= tl_len) {
1925 ntfs_err(sb, "Target path %s too large for buffer (%d <= %d)",
1926 translated, target_max, tl_len);
1930 strcpy(target, translated);
1934 kfree(link_path_buffer);
1939 static noinline int ntfs_readlink_hlp(const struct dentry *link_de,
1940 struct inode *inode, char *buffer,
1943 int i, err = -EINVAL;
1944 struct ntfs_inode *ni = ntfs_i(inode);
1945 struct super_block *sb = inode->i_sb;
1946 struct ntfs_sb_info *sbi = sb->s_fs_info;
1949 void *to_free = NULL;
1950 struct REPARSE_DATA_BUFFER *rp;
1951 const __le16 *uname;
1952 struct ATTRIB *attr;
1954 /* Reparse data present. Try to parse it. */
1955 static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1956 static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1960 attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1964 if (!attr->non_res) {
1965 rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1968 size = le32_to_cpu(attr->res.data_size);
1970 size = le64_to_cpu(attr->nres.data_size);
1974 if (size > sbi->reparse.max_size || size <= sizeof(u32))
1978 rp = kmalloc(size, GFP_NOFS);
1984 /* Read into temporal buffer. */
1985 err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
1990 /* Microsoft Tag. */
1991 switch (rp->ReparseTag) {
1992 case IO_REPARSE_TAG_MOUNT_POINT:
1993 /* Mount points and junctions. */
1994 /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
1995 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1996 MountPointReparseBuffer.PathBuffer))
1999 offsetof(struct REPARSE_DATA_BUFFER,
2000 MountPointReparseBuffer.PathBuffer) +
2001 le16_to_cpu(rp->MountPointReparseBuffer
2003 ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
2006 case IO_REPARSE_TAG_SYMLINK:
2007 /* FolderSymbolicLink */
2008 /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
2009 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
2010 SymbolicLinkReparseBuffer.PathBuffer))
2013 rp, offsetof(struct REPARSE_DATA_BUFFER,
2014 SymbolicLinkReparseBuffer.PathBuffer) +
2015 le16_to_cpu(rp->SymbolicLinkReparseBuffer
2018 rp->SymbolicLinkReparseBuffer.PrintNameLength);
2021 case IO_REPARSE_TAG_CLOUD:
2022 case IO_REPARSE_TAG_CLOUD_1:
2023 case IO_REPARSE_TAG_CLOUD_2:
2024 case IO_REPARSE_TAG_CLOUD_3:
2025 case IO_REPARSE_TAG_CLOUD_4:
2026 case IO_REPARSE_TAG_CLOUD_5:
2027 case IO_REPARSE_TAG_CLOUD_6:
2028 case IO_REPARSE_TAG_CLOUD_7:
2029 case IO_REPARSE_TAG_CLOUD_8:
2030 case IO_REPARSE_TAG_CLOUD_9:
2031 case IO_REPARSE_TAG_CLOUD_A:
2032 case IO_REPARSE_TAG_CLOUD_B:
2033 case IO_REPARSE_TAG_CLOUD_C:
2034 case IO_REPARSE_TAG_CLOUD_D:
2035 case IO_REPARSE_TAG_CLOUD_E:
2036 case IO_REPARSE_TAG_CLOUD_F:
2037 err = sizeof("OneDrive") - 1;
2040 memcpy(buffer, "OneDrive", err);
2044 if (IsReparseTagMicrosoft(rp->ReparseTag)) {
2045 /* Unknown Microsoft Tag. */
2048 if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
2049 size <= sizeof(struct REPARSE_POINT)) {
2054 uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
2055 ulen = le16_to_cpu(rp->ReparseDataLength) -
2056 sizeof(struct REPARSE_POINT);
2059 /* Convert nlen from bytes to UNICODE chars. */
2062 /* Check that name is available. */
2063 if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
2066 /* If name is already zero terminated then truncate it now. */
2067 if (!uname[ulen - 1])
2070 err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
2075 /* Translate Windows '\' into Linux '/'. */
2076 for (i = 0; i < err; i++) {
2077 if (buffer[i] == '\\')
2081 /* Always set last zero. */
2084 /* If this is a junction, translate the link target. */
2085 if (rp->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT)
2086 err = ntfs_translate_junction(sb, link_de, buffer, err, buflen);
2093 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
2094 struct delayed_call *done)
2100 return ERR_PTR(-ECHILD);
2102 ret = kmalloc(PAGE_SIZE, GFP_NOFS);
2104 return ERR_PTR(-ENOMEM);
2106 err = ntfs_readlink_hlp(de, inode, ret, PAGE_SIZE);
2109 return ERR_PTR(err);
2112 set_delayed_call(done, kfree_link, ret);
2118 const struct inode_operations ntfs_link_inode_operations = {
2119 .get_link = ntfs_get_link,
2120 .setattr = ntfs3_setattr,
2121 .listxattr = ntfs_listxattr,
2124 const struct address_space_operations ntfs_aops = {
2125 .read_folio = ntfs_read_folio,
2126 .readahead = ntfs_readahead,
2127 .writepages = ntfs_writepages,
2128 .write_begin = ntfs_write_begin,
2129 .write_end = ntfs_write_end,
2130 .direct_IO = ntfs_direct_IO,
2132 .dirty_folio = block_dirty_folio,
2133 .migrate_folio = buffer_migrate_folio,
2134 .invalidate_folio = block_invalidate_folio,
2137 const struct address_space_operations ntfs_aops_cmpr = {
2138 .read_folio = ntfs_read_folio,
2139 .readahead = ntfs_readahead,
2140 .dirty_folio = block_dirty_folio,