1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
34 static inline u64 get_pre_allocated(u64 size)
40 if (size <= NTFS_CLUMP_MIN) {
41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 } else if (size >= NTFS_CLUMP_MAX) {
44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 clump = 1u << align_shift;
52 ret = (((size + clump - 1) >> align_shift)) << align_shift;
58 * attr_load_runs - Load all runs stored in @attr.
60 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61 struct runs_tree *run, const CLST *vcn)
64 CLST svcn = le64_to_cpu(attr->nres.svcn);
65 CLST evcn = le64_to_cpu(attr->nres.evcn);
69 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
72 if (vcn && (evcn < *vcn || *vcn < svcn))
75 asize = le32_to_cpu(attr->size);
76 run_off = le16_to_cpu(attr->nres.run_off);
81 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
91 * run_deallocate_ex - Deallocate clusters.
93 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94 CLST vcn, CLST len, CLST *done, bool trim)
97 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
103 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
105 run_truncate(run, vcn0);
119 if (lcn != SPARSE_LCN) {
121 /* mark bitmap range [lcn + clen) as free and trim clusters. */
122 mark_as_free_ex(sbi, lcn, clen, trim);
131 vcn_next = vcn + clen;
132 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
134 /* Save memory - don't load entire run. */
147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
149 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
150 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
151 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
152 CLST *new_lcn, CLST *new_len)
155 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
156 size_t cnt = run->count;
159 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
162 if (err == -ENOSPC && pre) {
173 /* Return the first fragment. */
180 /* Add new fragment into run storage. */
181 if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
182 /* Undo last 'ntfs_look_for_free_space' */
183 mark_as_free_ex(sbi, lcn, len, false);
188 if (opt & ALLOCATE_ZERO) {
189 u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
191 err = blkdev_issue_zeroout(sbi->sb->s_bdev,
192 (sector_t)lcn << shift,
193 (sector_t)flen << shift,
201 if (flen >= len || (opt & ALLOCATE_MFT) ||
202 (fr && run->count - cnt >= fr)) {
211 /* Undo 'ntfs_look_for_free_space' */
213 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
214 run_truncate(run, vcn0);
221 * attr_make_nonresident
223 * If page is not NULL - it is already contains resident data
224 * and locked (called from ni_write_frame()).
226 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
227 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
228 u64 new_size, struct runs_tree *run,
229 struct ATTRIB **ins_attr, struct page *page)
231 struct ntfs_sb_info *sbi;
232 struct ATTRIB *attr_s;
234 u32 used, asize, rsize, aoff;
248 used = le32_to_cpu(rec->used);
249 asize = le32_to_cpu(attr->size);
250 next = Add2Ptr(attr, asize);
251 aoff = PtrOffset(rec, attr);
252 rsize = le32_to_cpu(attr->res.data_size);
253 is_data = attr->type == ATTR_DATA && !attr->name_len;
255 /* len - how many clusters required to store 'rsize' bytes */
256 if (is_attr_compressed(attr)) {
257 u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT;
258 len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT;
260 len = bytes_to_cluster(sbi, rsize);
265 /* Make a copy of original attribute. */
266 attr_s = kmemdup(attr, asize, GFP_NOFS);
273 /* Empty resident -> Empty nonresident. */
276 const char *data = resident_data(attr);
278 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
279 ALLOCATE_DEF, &alen, 0, NULL,
285 /* Empty resident -> Non empty nonresident. */
286 } else if (!is_data) {
287 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
291 struct address_space *mapping = ni->vfs_inode.i_mapping;
294 folio = __filemap_get_folio(
295 mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
296 mapping_gfp_mask(mapping));
298 err = PTR_ERR(folio);
301 folio_fill_tail(folio, 0, data, rsize);
302 folio_mark_uptodate(folio);
303 folio_mark_dirty(folio);
309 /* Remove original attribute. */
311 memmove(attr, Add2Ptr(attr, asize), used - aoff);
312 rec->used = cpu_to_le32(used);
315 al_remove_le(ni, le);
317 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
318 attr_s->name_len, run, 0, alen,
319 attr_s->flags, &attr, NULL, NULL);
324 attr->nres.data_size = cpu_to_le64(rsize);
325 attr->nres.valid_size = attr->nres.data_size;
330 ni->ni_flags &= ~NI_FLAG_RESIDENT;
332 /* Resident attribute becomes non resident. */
336 attr = Add2Ptr(rec, aoff);
337 memmove(next, attr, used - aoff);
338 memcpy(attr, attr_s, asize);
339 rec->used = cpu_to_le32(used + asize);
342 /* Undo: do not trim new allocated clusters. */
343 run_deallocate(sbi, run, false);
352 * attr_set_size_res - Helper for attr_set_size().
354 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
355 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
356 u64 new_size, struct runs_tree *run,
357 struct ATTRIB **ins_attr)
359 struct ntfs_sb_info *sbi = mi->sbi;
360 struct MFT_REC *rec = mi->mrec;
361 u32 used = le32_to_cpu(rec->used);
362 u32 asize = le32_to_cpu(attr->size);
363 u32 aoff = PtrOffset(rec, attr);
364 u32 rsize = le32_to_cpu(attr->res.data_size);
365 u32 tail = used - aoff - asize;
366 char *next = Add2Ptr(attr, asize);
367 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
370 memmove(next + dsize, next, tail);
371 } else if (dsize > 0) {
372 if (used + dsize > sbi->max_bytes_per_attr)
373 return attr_make_nonresident(ni, attr, le, mi, new_size,
374 run, ins_attr, NULL);
376 memmove(next + dsize, next, tail);
377 memset(next, 0, dsize);
380 if (new_size > rsize)
381 memset(Add2Ptr(resident_data(attr), rsize), 0,
384 rec->used = cpu_to_le32(used + dsize);
385 attr->size = cpu_to_le32(asize + dsize);
386 attr->res.data_size = cpu_to_le32(new_size);
394 * attr_set_size - Change the size of attribute.
397 * - Sparse/compressed: No allocated clusters.
398 * - Normal: Append allocated and preallocated new clusters.
400 * - No deallocate if @keep_prealloc is set.
402 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
403 const __le16 *name, u8 name_len, struct runs_tree *run,
404 u64 new_size, const u64 *new_valid, bool keep_prealloc,
408 struct ntfs_sb_info *sbi = ni->mi.sbi;
409 u8 cluster_bits = sbi->cluster_bits;
410 bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
412 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
413 struct ATTRIB *attr = NULL, *attr_b;
414 struct ATTR_LIST_ENTRY *le, *le_b;
415 struct mft_inode *mi, *mi_b;
416 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
417 CLST next_svcn, pre_alloc = -1, done = 0;
418 bool is_ext, is_bad = false;
426 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
433 if (!attr_b->non_res) {
434 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
439 /* Return if file is still resident. */
440 if (!attr_b->non_res) {
445 /* Layout of records may be changed, so do a full search. */
449 is_ext = is_attr_ext(attr_b);
450 align = sbi->cluster_size;
452 align <<= attr_b->nres.c_unit;
454 old_valid = le64_to_cpu(attr_b->nres.valid_size);
455 old_size = le64_to_cpu(attr_b->nres.data_size);
456 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
459 old_alen = old_alloc >> cluster_bits;
461 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
462 new_alen = new_alloc >> cluster_bits;
464 if (keep_prealloc && new_size < old_size) {
465 attr_b->nres.data_size = cpu_to_le64(new_size);
466 mi_b->dirty = dirty = true;
472 svcn = le64_to_cpu(attr_b->nres.svcn);
473 evcn = le64_to_cpu(attr_b->nres.evcn);
475 if (svcn <= vcn && vcn <= evcn) {
484 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
492 svcn = le64_to_cpu(attr->nres.svcn);
493 evcn = le64_to_cpu(attr->nres.evcn);
497 * attr,mi,le - last attribute segment (containing 'vcn').
498 * attr_b,mi_b,le_b - base (primary) attribute segment.
502 err = attr_load_runs(attr, ni, run, NULL);
506 if (new_size > old_size) {
510 if (new_alloc <= old_alloc) {
511 attr_b->nres.data_size = cpu_to_le64(new_size);
512 mi_b->dirty = dirty = true;
517 * Add clusters. In simple case we have to:
518 * - allocate space (vcn, lcn, len)
519 * - update packed run in 'mi'
520 * - update attr->nres.evcn
521 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
523 to_allocate = new_alen - old_alen;
524 add_alloc_in_same_attr_seg:
527 /* MFT allocates clusters from MFT zone. */
530 /* No preallocate for sparse/compress. */
532 } else if (pre_alloc == -1) {
534 if (type == ATTR_DATA && !name_len &&
535 sbi->options->prealloc) {
536 pre_alloc = bytes_to_cluster(
537 sbi, get_pre_allocated(
542 /* Get the last LCN to allocate from. */
544 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
548 if (lcn == SPARSE_LCN)
553 free = wnd_zeroes(&sbi->used.bitmap);
554 if (to_allocate > free) {
559 if (pre_alloc && to_allocate + pre_alloc > free)
566 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
573 /* ~3 bytes per fragment. */
574 err = attr_allocate_clusters(
575 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
576 is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
579 le32_to_cpu(rec->used) + 8) /
589 if (to_allocate > alen)
595 err = mi_pack_runs(mi, attr, run, vcn - svcn);
599 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
600 new_alloc_tmp = (u64)next_svcn << cluster_bits;
601 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
602 mi_b->dirty = dirty = true;
604 if (next_svcn >= vcn && !to_allocate) {
605 /* Normal way. Update attribute and exit. */
606 attr_b->nres.data_size = cpu_to_le64(new_size);
610 /* At least two MFT to avoid recursive loop. */
611 if (is_mft && next_svcn == vcn &&
612 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
613 new_size = new_alloc_tmp;
614 attr_b->nres.data_size = attr_b->nres.alloc_size;
618 if (le32_to_cpu(rec->used) < sbi->record_size) {
619 old_alen = next_svcn;
621 goto add_alloc_in_same_attr_seg;
624 attr_b->nres.data_size = attr_b->nres.alloc_size;
625 if (new_alloc_tmp < old_valid)
626 attr_b->nres.valid_size = attr_b->nres.data_size;
628 if (type == ATTR_LIST) {
629 err = ni_expand_list(ni);
635 /* Layout of records is changed. */
639 if (!ni->attr_list.size) {
640 err = ni_create_attr_list(ni);
641 /* In case of error layout of records is not changed. */
644 /* Layout of records is changed. */
647 if (next_svcn >= vcn) {
648 /* This is MFT data, repeat. */
652 /* Insert new attribute segment. */
653 err = ni_insert_nonresident(ni, type, name, name_len, run,
654 next_svcn, vcn - next_svcn,
655 attr_b->flags, &attr, &mi, NULL);
658 * Layout of records maybe changed.
659 * Find base attribute to update.
662 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
670 /* ni_insert_nonresident failed. */
675 /* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */
676 if (ni->mi.rno != MFT_REC_MFT)
677 run_truncate_head(run, evcn + 1);
679 svcn = le64_to_cpu(attr->nres.svcn);
680 evcn = le64_to_cpu(attr->nres.evcn);
683 * Attribute is in consistency state.
684 * Save this point to restore to if next steps fail.
686 old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
687 attr_b->nres.valid_size = attr_b->nres.data_size =
688 attr_b->nres.alloc_size = cpu_to_le64(old_size);
689 mi_b->dirty = dirty = true;
693 if (new_size != old_size ||
694 (new_alloc != old_alloc && !keep_prealloc)) {
696 * Truncate clusters. In simple case we have to:
697 * - update packed run in 'mi'
698 * - update attr->nres.evcn
699 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
700 * - mark and trim clusters as free (vcn, lcn, len)
704 vcn = max(svcn, new_alen);
705 new_alloc_tmp = (u64)vcn << cluster_bits;
708 err = mi_pack_runs(mi, attr, run, vcn - svcn);
711 } else if (le && le->vcn) {
712 u16 le_sz = le16_to_cpu(le->size);
715 * NOTE: List entries for one attribute are always
716 * the same size. We deal with last entry (vcn==0)
717 * and it is not first in entries array
718 * (list entry for std attribute always first).
719 * So it is safe to step back.
721 mi_remove_attr(NULL, mi, attr);
723 if (!al_remove_le(ni, le)) {
728 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
730 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
734 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
736 if (vcn == new_alen) {
737 attr_b->nres.data_size = cpu_to_le64(new_size);
738 if (new_size < old_valid)
739 attr_b->nres.valid_size =
740 attr_b->nres.data_size;
743 le64_to_cpu(attr_b->nres.data_size))
744 attr_b->nres.data_size =
745 attr_b->nres.alloc_size;
747 le64_to_cpu(attr_b->nres.valid_size))
748 attr_b->nres.valid_size =
749 attr_b->nres.alloc_size;
751 mi_b->dirty = dirty = true;
753 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
759 /* dlen - really deallocated clusters. */
760 le64_sub_cpu(&attr_b->nres.total_size,
761 ((u64)dlen << cluster_bits));
764 run_truncate(run, vcn);
766 if (new_alloc_tmp <= new_alloc)
769 old_size = new_alloc_tmp;
780 if (le->type != type || le->name_len != name_len ||
781 memcmp(le_name(le), name, name_len * sizeof(short))) {
786 err = ni_load_mi(ni, le, &mi);
790 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
800 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
802 if (attr_b->nres.valid_size != valid) {
803 attr_b->nres.valid_size = valid;
812 if (((type == ATTR_DATA && !name_len) ||
813 (type == ATTR_ALLOC && name == I30_NAME))) {
814 /* Update inode_set_bytes. */
815 if (attr_b->non_res) {
816 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
817 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
818 inode_set_bytes(&ni->vfs_inode, new_alloc);
823 /* Don't forget to update duplicate information in parent. */
825 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
826 mark_inode_dirty(&ni->vfs_inode);
834 attr_b->nres.data_size = cpu_to_le64(old_size);
835 attr_b->nres.valid_size = cpu_to_le64(old_valid);
836 attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
838 /* Restore 'attr' and 'mi'. */
842 if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
843 svcn <= le64_to_cpu(attr_b->nres.evcn)) {
852 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
859 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
863 run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
865 run_truncate(run, vcn);
869 _ntfs_bad_inode(&ni->vfs_inode);
875 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
877 * @new == NULL means just to get current mapping for 'vcn'
878 * @new != NULL means allocate real cluster if 'vcn' maps to hole
879 * @zero - zeroout new allocated clusters
882 * - @new != NULL is called only for sparsed or compressed attributes.
883 * - new allocated clusters are zeroed via blkdev_issue_zeroout.
885 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
886 CLST *len, bool *new, bool zero)
889 struct runs_tree *run = &ni->file.run;
890 struct ntfs_sb_info *sbi;
892 struct ATTRIB *attr, *attr_b;
893 struct ATTR_LIST_ENTRY *le, *le_b;
894 struct mft_inode *mi, *mi_b;
895 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
898 u64 total_size, total_size0;
904 /* Try to find in cache. */
905 down_read(&ni->file.run_lock);
906 if (!run_lookup_entry(run, vcn, lcn, len, NULL))
908 up_read(&ni->file.run_lock);
910 if (*len && (*lcn != SPARSE_LCN || !new))
911 return 0; /* Fast normal way without allocation. */
913 /* No cluster in cache or we need to allocate cluster in hole. */
915 cluster_bits = sbi->cluster_bits;
918 down_write(&ni->file.run_lock);
920 /* Repeat the code above (under write lock). */
921 if (!run_lookup_entry(run, vcn, lcn, len, NULL))
925 if (*lcn != SPARSE_LCN || !new)
926 goto out; /* normal way without allocation. */
932 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
938 if (!attr_b->non_res) {
944 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
955 svcn = le64_to_cpu(attr_b->nres.svcn);
956 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
962 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
963 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
969 svcn = le64_to_cpu(attr->nres.svcn);
970 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
973 /* Load in cache actual information. */
974 err = attr_load_runs(attr, ni, run, NULL);
978 /* Check for compressed frame. */
979 err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
985 /* if frame is compressed - don't touch it. */
986 *lcn = COMPRESSED_LCN;
987 /* length to the end of frame. */
988 *len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1));
994 if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
995 if (*lcn != SPARSE_LCN || !new)
996 goto ok; /* Slow normal way without allocation. */
1001 /* Here we may return -ENOENT.
1002 * In any case caller gets zero length. */
1007 if (!is_attr_ext(attr_b)) {
1008 /* The code below only for sparsed or compressed attributes. */
1015 fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
1016 /* Allocate frame aligned clusters.
1017 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1018 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1019 if (attr_b->nres.c_unit) {
1020 CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1021 CLST cmask = ~(clst_per_frame - 1);
1023 /* Get frame aligned vcn and to_alloc. */
1025 to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1026 if (fr < clst_per_frame)
1027 fr = clst_per_frame;
1030 /* Check if 'vcn' and 'vcn0' in different attribute segments. */
1031 if (vcn < svcn || evcn1 <= vcn) {
1032 struct ATTRIB *attr2;
1033 /* Load runs for truncated vcn. */
1034 attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL,
1040 evcn1 = le64_to_cpu(attr2->nres.evcn) + 1;
1041 err = attr_load_runs(attr2, ni, run, NULL);
1047 if (vcn + to_alloc > asize)
1048 to_alloc = asize - vcn;
1050 /* Get the last LCN to allocate from. */
1054 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1059 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1063 /* Allocate and zeroout new clusters. */
1064 err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1065 zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1073 /* Save 'total_size0' to restore if error. */
1074 total_size0 = le64_to_cpu(attr_b->nres.total_size);
1075 total_size = total_size0 + ((u64)alen << cluster_bits);
1078 if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1082 if (*lcn == SPARSE_LCN) {
1083 /* Internal error. Should not happened. */
1088 /* Check case when vcn0 + len overlaps new allocated clusters. */
1089 if (vcn0 + *len > end)
1094 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1098 attr_b->nres.total_size = cpu_to_le64(total_size);
1099 inode_set_bytes(&ni->vfs_inode, total_size);
1100 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1103 mark_inode_dirty(&ni->vfs_inode);
1105 /* Stored [vcn : next_svcn) from [vcn : end). */
1106 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1109 if (next_svcn == evcn1) {
1110 /* Normal way. Update attribute and exit. */
1113 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1114 if (!ni->attr_list.size) {
1115 err = ni_create_attr_list(ni);
1118 /* Layout of records is changed. */
1120 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1135 * The code below may require additional cluster (to extend attribute list)
1136 * and / or one MFT record
1137 * It is too complex to undo operations if -ENOSPC occurs deep inside
1138 * in 'ni_insert_nonresident'.
1139 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1141 if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1150 /* Estimate next attribute. */
1151 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1154 /* Insert new attribute segment. */
1158 /* Try to update existed attribute segment. */
1159 alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1160 evcn = le64_to_cpu(attr->nres.evcn);
1162 if (end < next_svcn)
1164 while (end > evcn) {
1165 /* Remove segment [svcn : evcn). */
1166 mi_remove_attr(NULL, mi, attr);
1168 if (!al_remove_le(ni, le)) {
1173 if (evcn + 1 >= alloc) {
1174 /* Last attribute segment. */
1179 if (ni_load_mi(ni, le, &mi)) {
1184 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1189 svcn = le64_to_cpu(attr->nres.svcn);
1190 evcn = le64_to_cpu(attr->nres.evcn);
1196 err = attr_load_runs(attr, ni, run, &end);
1201 attr->nres.svcn = cpu_to_le64(next_svcn);
1202 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1206 le->vcn = cpu_to_le64(next_svcn);
1207 ni->attr_list.dirty = true;
1209 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1212 if (evcn1 > next_svcn) {
1213 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1214 next_svcn, evcn1 - next_svcn,
1215 attr_b->flags, &attr, &mi, NULL);
1220 run_truncate_around(run, vcn);
1222 if (err && step > 1) {
1223 /* Too complex to restore. */
1224 _ntfs_bad_inode(&ni->vfs_inode);
1226 up_write(&ni->file.run_lock);
1233 attr_b->nres.total_size = cpu_to_le64(total_size0);
1234 inode_set_bytes(&ni->vfs_inode, total_size0);
1236 if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1237 !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1238 mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1239 _ntfs_bad_inode(&ni->vfs_inode);
1244 int attr_data_read_resident(struct ntfs_inode *ni, struct folio *folio)
1247 struct ATTRIB *attr;
1251 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1256 return E_NTFS_NONRESIDENT;
1258 vbo = folio->index << PAGE_SHIFT;
1259 data_size = le32_to_cpu(attr->res.data_size);
1260 if (vbo > data_size)
1263 len = min(data_size - vbo, folio_size(folio));
1265 folio_fill_tail(folio, 0, resident_data(attr) + vbo, len);
1266 folio_mark_uptodate(folio);
1271 int attr_data_write_resident(struct ntfs_inode *ni, struct folio *folio)
1274 struct mft_inode *mi;
1275 struct ATTRIB *attr;
1278 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1282 if (attr->non_res) {
1283 /* Return special error code to check this case. */
1284 return E_NTFS_NONRESIDENT;
1287 vbo = folio->index << PAGE_SHIFT;
1288 data_size = le32_to_cpu(attr->res.data_size);
1289 if (vbo < data_size) {
1290 char *data = resident_data(attr);
1291 size_t len = min(data_size - vbo, folio_size(folio));
1293 memcpy_from_folio(data + vbo, folio, 0, len);
1296 ni->i_valid = data_size;
1302 * attr_load_runs_vcn - Load runs with VCN.
1304 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1305 const __le16 *name, u8 name_len, struct runs_tree *run,
1308 struct ATTRIB *attr;
1314 /* Is record corrupted? */
1318 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1320 /* Is record corrupted? */
1324 svcn = le64_to_cpu(attr->nres.svcn);
1325 evcn = le64_to_cpu(attr->nres.evcn);
1327 if (evcn < vcn || vcn < svcn) {
1328 /* Is record corrupted? */
1332 ro = le16_to_cpu(attr->nres.run_off);
1334 if (ro > le32_to_cpu(attr->size))
1337 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1338 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1345 * attr_load_runs_range - Load runs for given range [from to).
1347 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1348 const __le16 *name, u8 name_len, struct runs_tree *run,
1351 struct ntfs_sb_info *sbi = ni->mi.sbi;
1352 u8 cluster_bits = sbi->cluster_bits;
1354 CLST vcn_last = (to - 1) >> cluster_bits;
1358 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1359 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1360 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1364 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1371 #ifdef CONFIG_NTFS3_LZX_XPRESS
1373 * attr_wof_frame_info
1375 * Read header of Xpress/LZX file to get info about frame.
1377 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1378 struct runs_tree *run, u64 frame, u64 frames,
1379 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1381 struct ntfs_sb_info *sbi = ni->mi.sbi;
1382 u64 vbo[2], off[2], wof_size;
1386 struct folio *folio;
1391 if (ni->vfs_inode.i_size < 0x100000000ull) {
1392 /* File starts with array of 32 bit offsets. */
1393 bytes_per_off = sizeof(__le32);
1394 vbo[1] = frame << 2;
1395 *vbo_data = frames << 2;
1397 /* File starts with array of 64 bit offsets. */
1398 bytes_per_off = sizeof(__le64);
1399 vbo[1] = frame << 3;
1400 *vbo_data = frames << 3;
1404 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1405 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1407 if (!attr->non_res) {
1408 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1409 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1412 addr = resident_data(attr);
1414 if (bytes_per_off == sizeof(__le32)) {
1415 off32 = Add2Ptr(addr, vbo[1]);
1416 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1417 off[1] = le32_to_cpu(off32[0]);
1419 off64 = Add2Ptr(addr, vbo[1]);
1420 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1421 off[1] = le64_to_cpu(off64[0]);
1424 *vbo_data += off[0];
1425 *ondisk_size = off[1] - off[0];
1429 wof_size = le64_to_cpu(attr->nres.data_size);
1430 down_write(&ni->file.run_lock);
1431 folio = ni->file.offs_folio;
1433 folio = folio_alloc(GFP_KERNEL, 0);
1439 ni->file.offs_folio = folio;
1442 addr = folio_address(folio);
1445 voff = vbo[1] & (PAGE_SIZE - 1);
1446 vbo[0] = vbo[1] - bytes_per_off;
1456 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1458 if (index != folio->index) {
1459 struct page *page = &folio->page;
1460 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1461 u64 to = min(from + PAGE_SIZE, wof_size);
1463 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1464 ARRAY_SIZE(WOF_NAME), run,
1469 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1470 to - from, REQ_OP_READ);
1475 folio->index = index;
1479 if (bytes_per_off == sizeof(__le32)) {
1480 off32 = Add2Ptr(addr, voff);
1481 off[1] = le32_to_cpu(*off32);
1483 off64 = Add2Ptr(addr, voff);
1484 off[1] = le64_to_cpu(*off64);
1487 if (bytes_per_off == sizeof(__le32)) {
1488 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1489 off[0] = le32_to_cpu(*off32);
1491 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1492 off[0] = le64_to_cpu(*off64);
1495 /* Two values in one page. */
1496 if (bytes_per_off == sizeof(__le32)) {
1497 off32 = Add2Ptr(addr, voff);
1498 off[0] = le32_to_cpu(off32[-1]);
1499 off[1] = le32_to_cpu(off32[0]);
1501 off64 = Add2Ptr(addr, voff);
1502 off[0] = le64_to_cpu(off64[-1]);
1503 off[1] = le64_to_cpu(off64[0]);
1509 *vbo_data += off[0];
1510 *ondisk_size = off[1] - off[0];
1513 folio_unlock(folio);
1515 up_write(&ni->file.run_lock);
1521 * attr_is_frame_compressed - Used to detect compressed frame.
1523 * attr - base (primary) attribute segment.
1524 * run - run to use, usually == &ni->file.run.
1525 * Only base segments contains valid 'attr->nres.c_unit'
1527 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1528 CLST frame, CLST *clst_data, struct runs_tree *run)
1532 CLST clen, lcn, vcn, alen, slen, vcn_next;
1537 if (!is_attr_compressed(attr))
1543 clst_frame = 1u << attr->nres.c_unit;
1544 vcn = frame * clst_frame;
1546 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1547 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1548 attr->name_len, run, vcn);
1552 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1556 if (lcn == SPARSE_LCN) {
1557 /* Sparsed frame. */
1561 if (clen >= clst_frame) {
1563 * The frame is not compressed 'cause
1564 * it does not contain any sparse clusters.
1566 *clst_data = clst_frame;
1570 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1575 * The frame is compressed if *clst_data + slen >= clst_frame.
1576 * Check next fragments.
1578 while ((vcn += clen) < alen) {
1581 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1583 err = attr_load_runs_vcn(ni, attr->type,
1585 attr->name_len, run, vcn_next);
1590 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1594 if (lcn == SPARSE_LCN) {
1599 * Data_clusters + sparse_clusters =
1600 * not enough for frame.
1607 if (*clst_data + slen >= clst_frame) {
1610 * There is no sparsed clusters in this frame
1611 * so it is not compressed.
1613 *clst_data = clst_frame;
1615 /* Frame is compressed. */
1625 * attr_allocate_frame - Allocate/free clusters for @frame.
1627 * Assumed: down_write(&ni->file.run_lock);
1629 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1633 struct runs_tree *run = &ni->file.run;
1634 struct ntfs_sb_info *sbi = ni->mi.sbi;
1635 struct ATTRIB *attr = NULL, *attr_b;
1636 struct ATTR_LIST_ENTRY *le, *le_b;
1637 struct mft_inode *mi, *mi_b;
1638 CLST svcn, evcn1, next_svcn, len;
1639 CLST vcn, end, clst_data;
1640 u64 total_size, valid_size, data_size;
1643 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1647 if (!is_attr_ext(attr_b))
1650 vcn = frame << NTFS_LZNT_CUNIT;
1651 total_size = le64_to_cpu(attr_b->nres.total_size);
1653 svcn = le64_to_cpu(attr_b->nres.svcn);
1654 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1655 data_size = le64_to_cpu(attr_b->nres.data_size);
1657 if (svcn <= vcn && vcn < evcn1) {
1666 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1672 svcn = le64_to_cpu(attr->nres.svcn);
1673 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1676 err = attr_load_runs(attr, ni, run, NULL);
1680 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
1684 total_size -= (u64)clst_data << sbi->cluster_bits;
1686 len = bytes_to_cluster(sbi, compr_size);
1688 if (len == clst_data)
1691 if (len < clst_data) {
1692 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1697 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1702 end = vcn + clst_data;
1703 /* Run contains updated range [vcn + len : end). */
1705 CLST alen, hint = 0;
1706 /* Get the last LCN to allocate from. */
1707 if (vcn + clst_data &&
1708 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1713 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1714 hint + 1, len - clst_data, NULL,
1715 ALLOCATE_DEF, &alen, 0, NULL,
1721 /* Run contains updated range [vcn + clst_data : end). */
1724 total_size += (u64)len << sbi->cluster_bits;
1727 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1731 attr_b->nres.total_size = cpu_to_le64(total_size);
1732 inode_set_bytes(&ni->vfs_inode, total_size);
1733 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1736 mark_inode_dirty(&ni->vfs_inode);
1738 /* Stored [vcn : next_svcn) from [vcn : end). */
1739 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1742 if (next_svcn == evcn1) {
1743 /* Normal way. Update attribute and exit. */
1746 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1747 if (!ni->attr_list.size) {
1748 err = ni_create_attr_list(ni);
1751 /* Layout of records is changed. */
1753 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1769 /* Estimate next attribute. */
1770 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1773 CLST alloc = bytes_to_cluster(
1774 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1775 CLST evcn = le64_to_cpu(attr->nres.evcn);
1777 if (end < next_svcn)
1779 while (end > evcn) {
1780 /* Remove segment [svcn : evcn). */
1781 mi_remove_attr(NULL, mi, attr);
1783 if (!al_remove_le(ni, le)) {
1788 if (evcn + 1 >= alloc) {
1789 /* Last attribute segment. */
1794 if (ni_load_mi(ni, le, &mi)) {
1799 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1805 svcn = le64_to_cpu(attr->nres.svcn);
1806 evcn = le64_to_cpu(attr->nres.evcn);
1812 err = attr_load_runs(attr, ni, run, &end);
1817 attr->nres.svcn = cpu_to_le64(next_svcn);
1818 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1822 le->vcn = cpu_to_le64(next_svcn);
1823 ni->attr_list.dirty = true;
1826 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1829 if (evcn1 > next_svcn) {
1830 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1831 next_svcn, evcn1 - next_svcn,
1832 attr_b->flags, &attr, &mi, NULL);
1837 run_truncate_around(run, vcn);
1840 if (new_valid > data_size)
1841 new_valid = data_size;
1843 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1844 if (new_valid != valid_size) {
1845 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1854 * attr_collapse_range - Collapse range in file.
1856 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1859 struct runs_tree *run = &ni->file.run;
1860 struct ntfs_sb_info *sbi = ni->mi.sbi;
1861 struct ATTRIB *attr = NULL, *attr_b;
1862 struct ATTR_LIST_ENTRY *le, *le_b;
1863 struct mft_inode *mi, *mi_b;
1864 CLST svcn, evcn1, len, dealloc, alen;
1866 u64 valid_size, data_size, alloc_size, total_size;
1874 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1878 if (!attr_b->non_res) {
1879 /* Attribute is resident. Nothing to do? */
1883 data_size = le64_to_cpu(attr_b->nres.data_size);
1884 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1885 a_flags = attr_b->flags;
1887 if (is_attr_ext(attr_b)) {
1888 total_size = le64_to_cpu(attr_b->nres.total_size);
1889 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1891 total_size = alloc_size;
1892 mask = sbi->cluster_mask;
1895 if ((vbo & mask) || (bytes & mask)) {
1896 /* Allow to collapse only cluster aligned ranges. */
1900 if (vbo > data_size)
1903 down_write(&ni->file.run_lock);
1905 if (vbo + bytes >= data_size) {
1906 u64 new_valid = min(ni->i_valid, vbo);
1908 /* Simple truncate file at 'vbo'. */
1909 truncate_setsize(&ni->vfs_inode, vbo);
1910 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1911 &new_valid, true, NULL);
1913 if (!err && new_valid < ni->i_valid)
1914 ni->i_valid = new_valid;
1920 * Enumerate all attribute segments and collapse.
1922 alen = alloc_size >> sbi->cluster_bits;
1923 vcn = vbo >> sbi->cluster_bits;
1924 len = bytes >> sbi->cluster_bits;
1928 svcn = le64_to_cpu(attr_b->nres.svcn);
1929 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1931 if (svcn <= vcn && vcn < evcn1) {
1940 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1947 svcn = le64_to_cpu(attr->nres.svcn);
1948 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1954 attr->nres.svcn = cpu_to_le64(svcn - len);
1955 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1957 le->vcn = attr->nres.svcn;
1958 ni->attr_list.dirty = true;
1961 } else if (svcn < vcn || end < evcn1) {
1962 CLST vcn1, eat, next_svcn;
1964 /* Collapse a part of this attribute segment. */
1965 err = attr_load_runs(attr, ni, run, &svcn);
1968 vcn1 = max(vcn, svcn);
1969 eat = min(end, evcn1) - vcn1;
1971 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1976 if (!run_collapse_range(run, vcn1, eat)) {
1983 attr->nres.svcn = cpu_to_le64(vcn);
1985 le->vcn = attr->nres.svcn;
1986 ni->attr_list.dirty = true;
1990 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1994 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1995 if (next_svcn + eat < evcn1) {
1996 err = ni_insert_nonresident(
1997 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1998 evcn1 - eat - next_svcn, a_flags, &attr,
2003 /* Layout of records maybe changed. */
2007 /* Free all allocated memory. */
2008 run_truncate(run, 0);
2011 u16 roff = le16_to_cpu(attr->nres.run_off);
2013 if (roff > le32_to_cpu(attr->size)) {
2018 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2019 evcn1 - 1, svcn, Add2Ptr(attr, roff),
2020 le32_to_cpu(attr->size) - roff);
2022 /* Delete this attribute segment. */
2023 mi_remove_attr(NULL, mi, attr);
2027 le_sz = le16_to_cpu(le->size);
2028 if (!al_remove_le(ni, le)) {
2037 /* Load next record that contains this attribute. */
2038 if (ni_load_mi(ni, le, &mi)) {
2043 /* Look for required attribute. */
2044 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2052 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2058 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2065 svcn = le64_to_cpu(attr->nres.svcn);
2066 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2071 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2080 valid_size = ni->i_valid;
2081 if (vbo + bytes <= valid_size)
2082 valid_size -= bytes;
2083 else if (vbo < valid_size)
2086 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2087 attr_b->nres.data_size = cpu_to_le64(data_size);
2088 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2089 total_size -= (u64)dealloc << sbi->cluster_bits;
2090 if (is_attr_ext(attr_b))
2091 attr_b->nres.total_size = cpu_to_le64(total_size);
2094 /* Update inode size. */
2095 ni->i_valid = valid_size;
2096 i_size_write(&ni->vfs_inode, data_size);
2097 inode_set_bytes(&ni->vfs_inode, total_size);
2098 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2099 mark_inode_dirty(&ni->vfs_inode);
2102 up_write(&ni->file.run_lock);
2104 _ntfs_bad_inode(&ni->vfs_inode);
2112 * Not for normal files.
2114 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2117 struct runs_tree *run = &ni->file.run;
2118 struct ntfs_sb_info *sbi = ni->mi.sbi;
2119 struct ATTRIB *attr = NULL, *attr_b;
2120 struct ATTR_LIST_ENTRY *le, *le_b;
2121 struct mft_inode *mi, *mi_b;
2122 CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2123 u64 total_size, alloc_size;
2126 struct runs_tree run2;
2132 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2136 if (!attr_b->non_res) {
2137 u32 data_size = le32_to_cpu(attr_b->res.data_size);
2140 if (vbo > data_size)
2144 to = min_t(u64, vbo + bytes, data_size);
2145 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2149 if (!is_attr_ext(attr_b))
2152 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2153 total_size = le64_to_cpu(attr_b->nres.total_size);
2155 if (vbo >= alloc_size) {
2156 /* NOTE: It is allowed. */
2160 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2163 if (bytes > alloc_size)
2167 if ((vbo & mask) || (bytes & mask)) {
2168 /* We have to zero a range(s). */
2169 if (frame_size == NULL) {
2170 /* Caller insists range is aligned. */
2173 *frame_size = mask + 1;
2174 return E_NTFS_NOTALIGNED;
2177 down_write(&ni->file.run_lock);
2179 run_truncate(run, 0);
2182 * Enumerate all attribute segments and punch hole where necessary.
2184 alen = alloc_size >> sbi->cluster_bits;
2185 vcn = vbo >> sbi->cluster_bits;
2186 len = bytes >> sbi->cluster_bits;
2190 svcn = le64_to_cpu(attr_b->nres.svcn);
2191 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2192 a_flags = attr_b->flags;
2194 if (svcn <= vcn && vcn < evcn1) {
2203 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2210 svcn = le64_to_cpu(attr->nres.svcn);
2211 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2214 while (svcn < end) {
2215 CLST vcn1, zero, hole2 = hole;
2217 err = attr_load_runs(attr, ni, run, &svcn);
2220 vcn1 = max(vcn, svcn);
2221 zero = min(end, evcn1) - vcn1;
2224 * Check range [vcn1 + zero).
2225 * Calculate how many clusters there are.
2226 * Don't do any destructive actions.
2228 err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2232 /* Check if required range is already hole. */
2236 /* Make a clone of run to undo. */
2237 err = run_clone(run, &run2);
2241 /* Make a hole range (sparse) [vcn1 + zero). */
2242 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2247 /* Update run in attribute segment. */
2248 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2251 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2252 if (next_svcn < evcn1) {
2253 /* Insert new attribute segment. */
2254 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2256 evcn1 - next_svcn, a_flags,
2261 /* Layout of records maybe changed. */
2265 /* Real deallocate. Should not fail. */
2266 run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2269 /* Free all allocated memory. */
2270 run_truncate(run, 0);
2275 /* Get next attribute segment. */
2276 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2282 svcn = le64_to_cpu(attr->nres.svcn);
2283 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2291 attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2299 total_size -= (u64)hole << sbi->cluster_bits;
2300 attr_b->nres.total_size = cpu_to_le64(total_size);
2303 /* Update inode size. */
2304 inode_set_bytes(&ni->vfs_inode, total_size);
2305 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2306 mark_inode_dirty(&ni->vfs_inode);
2310 up_write(&ni->file.run_lock);
2314 _ntfs_bad_inode(&ni->vfs_inode);
2319 * Restore packed runs.
2320 * 'mi_pack_runs' should not fail, cause we restore original.
2322 if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2329 * attr_insert_range - Insert range (hole) in file.
2330 * Not for normal files.
2332 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2335 struct runs_tree *run = &ni->file.run;
2336 struct ntfs_sb_info *sbi = ni->mi.sbi;
2337 struct ATTRIB *attr = NULL, *attr_b;
2338 struct ATTR_LIST_ENTRY *le, *le_b;
2339 struct mft_inode *mi, *mi_b;
2340 CLST vcn, svcn, evcn1, len, next_svcn;
2341 u64 data_size, alloc_size;
2349 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2353 if (!is_attr_ext(attr_b)) {
2354 /* It was checked above. See fallocate. */
2358 if (!attr_b->non_res) {
2359 data_size = le32_to_cpu(attr_b->res.data_size);
2360 alloc_size = data_size;
2361 mask = sbi->cluster_mask; /* cluster_size - 1 */
2363 data_size = le64_to_cpu(attr_b->nres.data_size);
2364 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2365 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2368 if (vbo >= data_size) {
2370 * Insert range after the file size is not allowed.
2371 * If the offset is equal to or greater than the end of
2372 * file, an error is returned. For such operations (i.e., inserting
2373 * a hole at the end of file), ftruncate(2) should be used.
2378 if ((vbo & mask) || (bytes & mask)) {
2379 /* Allow to insert only frame aligned ranges. */
2384 * valid_size <= data_size <= alloc_size
2385 * Check alloc_size for maximum possible.
2387 if (bytes > sbi->maxbytes_sparse - alloc_size)
2390 vcn = vbo >> sbi->cluster_bits;
2391 len = bytes >> sbi->cluster_bits;
2393 down_write(&ni->file.run_lock);
2395 if (!attr_b->non_res) {
2396 err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2397 data_size + bytes, NULL, false, NULL);
2400 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2410 if (!attr_b->non_res) {
2411 /* Still resident. */
2412 char *data = Add2Ptr(attr_b,
2413 le16_to_cpu(attr_b->res.data_off));
2415 memmove(data + bytes, data, bytes);
2416 memset(data, 0, bytes);
2420 /* Resident files becomes nonresident. */
2421 data_size = le64_to_cpu(attr_b->nres.data_size);
2422 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2426 * Enumerate all attribute segments and shift start vcn.
2428 a_flags = attr_b->flags;
2429 svcn = le64_to_cpu(attr_b->nres.svcn);
2430 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2432 if (svcn <= vcn && vcn < evcn1) {
2441 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2448 svcn = le64_to_cpu(attr->nres.svcn);
2449 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2452 run_truncate(run, 0); /* clear cached values. */
2453 err = attr_load_runs(attr, ni, run, NULL);
2457 if (!run_insert_range(run, vcn, len)) {
2462 /* Try to pack in current record as much as possible. */
2463 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2467 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2469 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2470 attr->type == ATTR_DATA && !attr->name_len) {
2471 le64_add_cpu(&attr->nres.svcn, len);
2472 le64_add_cpu(&attr->nres.evcn, len);
2474 le->vcn = attr->nres.svcn;
2475 ni->attr_list.dirty = true;
2480 if (next_svcn < evcn1 + len) {
2481 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2482 next_svcn, evcn1 + len - next_svcn,
2483 a_flags, NULL, NULL, NULL);
2486 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2494 /* ni_insert_nonresident failed. Try to undo. */
2495 goto undo_insert_range;
2500 * Update primary attribute segment.
2502 if (vbo <= ni->i_valid)
2503 ni->i_valid += bytes;
2505 attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2506 attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2508 /* ni->valid may be not equal valid_size (temporary). */
2509 if (ni->i_valid > data_size + bytes)
2510 attr_b->nres.valid_size = attr_b->nres.data_size;
2512 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2516 i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2517 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2518 mark_inode_dirty(&ni->vfs_inode);
2521 run_truncate(run, 0); /* clear cached values. */
2523 up_write(&ni->file.run_lock);
2528 _ntfs_bad_inode(&ni->vfs_inode);
2532 svcn = le64_to_cpu(attr_b->nres.svcn);
2533 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2535 if (svcn <= vcn && vcn < evcn1) {
2543 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2549 svcn = le64_to_cpu(attr->nres.svcn);
2550 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2553 if (attr_load_runs(attr, ni, run, NULL))
2556 if (!run_collapse_range(run, vcn, len))
2559 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2562 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2563 attr->type == ATTR_DATA && !attr->name_len) {
2564 le64_sub_cpu(&attr->nres.svcn, len);
2565 le64_sub_cpu(&attr->nres.evcn, len);
2567 le->vcn = attr->nres.svcn;
2568 ni->attr_list.dirty = true;
2577 * attr_force_nonresident
2579 * Convert default data attribute into non resident form.
2581 int attr_force_nonresident(struct ntfs_inode *ni)
2584 struct ATTRIB *attr;
2585 struct ATTR_LIST_ENTRY *le = NULL;
2586 struct mft_inode *mi;
2588 attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
2590 ntfs_bad_inode(&ni->vfs_inode, "no data attribute");
2594 if (attr->non_res) {
2595 /* Already non resident. */
2599 down_write(&ni->file.run_lock);
2600 err = attr_make_nonresident(ni, attr, le, mi,
2601 le32_to_cpu(attr->res.data_size),
2602 &ni->file.run, &attr, NULL);
2603 up_write(&ni->file.run_lock);
2609 * Change the compression of data attribute
2611 int attr_set_compress(struct ntfs_inode *ni, bool compr)
2613 struct ATTRIB *attr;
2614 struct mft_inode *mi;
2616 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
2620 if (is_attr_compressed(attr) == !!compr) {
2621 /* Already required compressed state. */
2625 if (attr->non_res) {
2630 if (attr->nres.data_size) {
2632 * There are rare cases when it possible to change
2633 * compress state without big changes.
2634 * TODO: Process these cases.
2639 run_off = le16_to_cpu(attr->nres.run_off);
2640 run_size = le32_to_cpu(attr->size) - run_off;
2641 run = Add2Ptr(attr, run_off);
2644 /* remove field 'attr->nres.total_size'. */
2645 memmove(run - 8, run, run_size);
2649 if (!mi_resize_attr(mi, attr, compr ? +8 : -8)) {
2651 * Ignore rare case when there are no 8 bytes in record with attr.
2652 * TODO: split attribute.
2658 /* Make a gap for 'attr->nres.total_size'. */
2659 memmove(run + 8, run, run_size);
2661 attr->nres.total_size = attr->nres.alloc_size;
2663 attr->nres.run_off = cpu_to_le16(run_off);
2666 /* Update data attribute flags. */
2668 attr->flags |= ATTR_FLAG_COMPRESSED;
2669 attr->nres.c_unit = NTFS_LZNT_CUNIT;
2671 attr->flags &= ~ATTR_FLAG_COMPRESSED;
2672 attr->nres.c_unit = 0;