]>
Commit | Line | Data |
---|---|---|
4342306f KK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * | |
4 | * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. | |
5 | * | |
6 | * TODO: try to use extents tree (instead of array) | |
7 | */ | |
8 | ||
9 | #include <linux/blkdev.h> | |
4342306f | 10 | #include <linux/fs.h> |
528c9b3d | 11 | #include <linux/log2.h> |
4342306f KK |
12 | |
13 | #include "debug.h" | |
14 | #include "ntfs.h" | |
15 | #include "ntfs_fs.h" | |
16 | ||
e8b8e97f | 17 | /* runs_tree is a continues memory. Try to avoid big size. */ |
4342306f KK |
18 | #define NTFS3_RUN_MAX_BYTES 0x10000 |
19 | ||
20 | struct ntfs_run { | |
e8b8e97f KA |
21 | CLST vcn; /* Virtual cluster number. */ |
22 | CLST len; /* Length in clusters. */ | |
23 | CLST lcn; /* Logical cluster number. */ | |
4342306f KK |
24 | }; |
25 | ||
26 | /* | |
e8b8e97f | 27 | * run_lookup - Lookup the index of a MCB entry that is first <= vcn. |
4342306f | 28 | * |
e8b8e97f KA |
29 | * Case of success it will return non-zero value and set |
30 | * @index parameter to index of entry been found. | |
31 | * Case of entry missing from list 'index' will be set to | |
4342306f KK |
32 | * point to insertion position for the entry question. |
33 | */ | |
54033c13 | 34 | static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index) |
4342306f KK |
35 | { |
36 | size_t min_idx, max_idx, mid_idx; | |
37 | struct ntfs_run *r; | |
38 | ||
39 | if (!run->count) { | |
40 | *index = 0; | |
41 | return false; | |
42 | } | |
43 | ||
44 | min_idx = 0; | |
45 | max_idx = run->count - 1; | |
46 | ||
e8b8e97f | 47 | /* Check boundary cases specially, 'cause they cover the often requests. */ |
4342306f KK |
48 | r = run->runs; |
49 | if (vcn < r->vcn) { | |
50 | *index = 0; | |
51 | return false; | |
52 | } | |
53 | ||
54 | if (vcn < r->vcn + r->len) { | |
55 | *index = 0; | |
56 | return true; | |
57 | } | |
58 | ||
59 | r += max_idx; | |
60 | if (vcn >= r->vcn + r->len) { | |
61 | *index = run->count; | |
62 | return false; | |
63 | } | |
64 | ||
65 | if (vcn >= r->vcn) { | |
66 | *index = max_idx; | |
67 | return true; | |
68 | } | |
69 | ||
70 | do { | |
71 | mid_idx = min_idx + ((max_idx - min_idx) >> 1); | |
72 | r = run->runs + mid_idx; | |
73 | ||
74 | if (vcn < r->vcn) { | |
75 | max_idx = mid_idx - 1; | |
76 | if (!mid_idx) | |
77 | break; | |
78 | } else if (vcn >= r->vcn + r->len) { | |
79 | min_idx = mid_idx + 1; | |
80 | } else { | |
81 | *index = mid_idx; | |
82 | return true; | |
83 | } | |
84 | } while (min_idx <= max_idx); | |
85 | ||
86 | *index = max_idx + 1; | |
87 | return false; | |
88 | } | |
89 | ||
90 | /* | |
e8b8e97f | 91 | * run_consolidate - Consolidate runs starting from a given one. |
4342306f KK |
92 | */ |
93 | static void run_consolidate(struct runs_tree *run, size_t index) | |
94 | { | |
95 | size_t i; | |
96 | struct ntfs_run *r = run->runs + index; | |
97 | ||
98 | while (index + 1 < run->count) { | |
99 | /* | |
100 | * I should merge current run with next | |
101 | * if start of the next run lies inside one being tested. | |
102 | */ | |
103 | struct ntfs_run *n = r + 1; | |
104 | CLST end = r->vcn + r->len; | |
105 | CLST dl; | |
106 | ||
107 | /* Stop if runs are not aligned one to another. */ | |
108 | if (n->vcn > end) | |
109 | break; | |
110 | ||
111 | dl = end - n->vcn; | |
112 | ||
113 | /* | |
114 | * If range at index overlaps with next one | |
115 | * then I will either adjust it's start position | |
116 | * or (if completely matches) dust remove one from the list. | |
117 | */ | |
118 | if (dl > 0) { | |
119 | if (n->len <= dl) | |
120 | goto remove_next_range; | |
121 | ||
122 | n->len -= dl; | |
123 | n->vcn += dl; | |
124 | if (n->lcn != SPARSE_LCN) | |
125 | n->lcn += dl; | |
126 | dl = 0; | |
127 | } | |
128 | ||
129 | /* | |
130 | * Stop if sparse mode does not match | |
131 | * both current and next runs. | |
132 | */ | |
133 | if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) { | |
134 | index += 1; | |
135 | r = n; | |
136 | continue; | |
137 | } | |
138 | ||
139 | /* | |
140 | * Check if volume block | |
141 | * of a next run lcn does not match | |
142 | * last volume block of the current run. | |
143 | */ | |
144 | if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len) | |
145 | break; | |
146 | ||
147 | /* | |
148 | * Next and current are siblings. | |
149 | * Eat/join. | |
150 | */ | |
151 | r->len += n->len - dl; | |
152 | ||
153 | remove_next_range: | |
154 | i = run->count - (index + 1); | |
155 | if (i > 1) | |
156 | memmove(n, n + 1, sizeof(*n) * (i - 1)); | |
157 | ||
158 | run->count -= 1; | |
159 | } | |
160 | } | |
161 | ||
e8b8e97f KA |
162 | /* |
163 | * run_is_mapped_full | |
164 | * | |
165 | * Return: True if range [svcn - evcn] is mapped. | |
166 | */ | |
4342306f KK |
167 | bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn) |
168 | { | |
169 | size_t i; | |
170 | const struct ntfs_run *r, *end; | |
171 | CLST next_vcn; | |
172 | ||
173 | if (!run_lookup(run, svcn, &i)) | |
174 | return false; | |
175 | ||
176 | end = run->runs + run->count; | |
177 | r = run->runs + i; | |
178 | ||
179 | for (;;) { | |
180 | next_vcn = r->vcn + r->len; | |
181 | if (next_vcn > evcn) | |
182 | return true; | |
183 | ||
184 | if (++r >= end) | |
185 | return false; | |
186 | ||
187 | if (r->vcn != next_vcn) | |
188 | return false; | |
189 | } | |
190 | } | |
191 | ||
192 | bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn, | |
193 | CLST *len, size_t *index) | |
194 | { | |
195 | size_t idx; | |
196 | CLST gap; | |
197 | struct ntfs_run *r; | |
198 | ||
199 | /* Fail immediately if nrun was not touched yet. */ | |
200 | if (!run->runs) | |
201 | return false; | |
202 | ||
203 | if (!run_lookup(run, vcn, &idx)) | |
204 | return false; | |
205 | ||
206 | r = run->runs + idx; | |
207 | ||
208 | if (vcn >= r->vcn + r->len) | |
209 | return false; | |
210 | ||
211 | gap = vcn - r->vcn; | |
212 | if (r->len <= gap) | |
213 | return false; | |
214 | ||
215 | *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap); | |
216 | ||
217 | if (len) | |
218 | *len = r->len - gap; | |
219 | if (index) | |
220 | *index = idx; | |
221 | ||
222 | return true; | |
223 | } | |
224 | ||
225 | /* | |
e8b8e97f | 226 | * run_truncate_head - Decommit the range before vcn. |
4342306f KK |
227 | */ |
228 | void run_truncate_head(struct runs_tree *run, CLST vcn) | |
229 | { | |
230 | size_t index; | |
231 | struct ntfs_run *r; | |
232 | ||
233 | if (run_lookup(run, vcn, &index)) { | |
234 | r = run->runs + index; | |
235 | ||
236 | if (vcn > r->vcn) { | |
237 | CLST dlen = vcn - r->vcn; | |
238 | ||
239 | r->vcn = vcn; | |
240 | r->len -= dlen; | |
241 | if (r->lcn != SPARSE_LCN) | |
242 | r->lcn += dlen; | |
243 | } | |
244 | ||
245 | if (!index) | |
246 | return; | |
247 | } | |
248 | r = run->runs; | |
249 | memmove(r, r + index, sizeof(*r) * (run->count - index)); | |
250 | ||
251 | run->count -= index; | |
252 | ||
253 | if (!run->count) { | |
195c52bd | 254 | kvfree(run->runs); |
4342306f KK |
255 | run->runs = NULL; |
256 | run->allocated = 0; | |
257 | } | |
258 | } | |
259 | ||
260 | /* | |
e8b8e97f | 261 | * run_truncate - Decommit the range after vcn. |
4342306f KK |
262 | */ |
263 | void run_truncate(struct runs_tree *run, CLST vcn) | |
264 | { | |
265 | size_t index; | |
266 | ||
267 | /* | |
268 | * If I hit the range then | |
269 | * I have to truncate one. | |
270 | * If range to be truncated is becoming empty | |
271 | * then it will entirely be removed. | |
272 | */ | |
273 | if (run_lookup(run, vcn, &index)) { | |
274 | struct ntfs_run *r = run->runs + index; | |
275 | ||
276 | r->len = vcn - r->vcn; | |
277 | ||
278 | if (r->len > 0) | |
279 | index += 1; | |
280 | } | |
281 | ||
282 | /* | |
e8b8e97f KA |
283 | * At this point 'index' is set to position that |
284 | * should be thrown away (including index itself) | |
4342306f KK |
285 | * Simple one - just set the limit. |
286 | */ | |
287 | run->count = index; | |
288 | ||
e8b8e97f | 289 | /* Do not reallocate array 'runs'. Only free if possible. */ |
4342306f | 290 | if (!index) { |
195c52bd | 291 | kvfree(run->runs); |
4342306f KK |
292 | run->runs = NULL; |
293 | run->allocated = 0; | |
294 | } | |
295 | } | |
296 | ||
e8b8e97f KA |
297 | /* |
298 | * run_truncate_around - Trim head and tail if necessary. | |
299 | */ | |
4342306f KK |
300 | void run_truncate_around(struct runs_tree *run, CLST vcn) |
301 | { | |
302 | run_truncate_head(run, vcn); | |
303 | ||
304 | if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2) | |
305 | run_truncate(run, (run->runs + (run->count >> 1))->vcn); | |
306 | } | |
307 | ||
308 | /* | |
309 | * run_add_entry | |
310 | * | |
e8b8e97f KA |
311 | * Sets location to known state. |
312 | * Run to be added may overlap with existing location. | |
313 | * | |
314 | * Return: false if of memory. | |
4342306f KK |
315 | */ |
316 | bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, | |
317 | bool is_mft) | |
318 | { | |
319 | size_t used, index; | |
320 | struct ntfs_run *r; | |
321 | bool inrange; | |
322 | CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0; | |
323 | bool should_add_tail = false; | |
324 | ||
325 | /* | |
326 | * Lookup the insertion point. | |
327 | * | |
328 | * Execute bsearch for the entry containing | |
329 | * start position question. | |
330 | */ | |
331 | inrange = run_lookup(run, vcn, &index); | |
332 | ||
333 | /* | |
334 | * Shortcut here would be case of | |
335 | * range not been found but one been added | |
336 | * continues previous run. | |
e8b8e97f | 337 | * This case I can directly make use of |
4342306f KK |
338 | * existing range as my start point. |
339 | */ | |
340 | if (!inrange && index > 0) { | |
341 | struct ntfs_run *t = run->runs + index - 1; | |
342 | ||
343 | if (t->vcn + t->len == vcn && | |
344 | (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) && | |
345 | (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) { | |
346 | inrange = true; | |
347 | index -= 1; | |
348 | } | |
349 | } | |
350 | ||
351 | /* | |
352 | * At this point 'index' either points to the range | |
353 | * containing start position or to the insertion position | |
354 | * for a new range. | |
355 | * So first let's check if range I'm probing is here already. | |
356 | */ | |
357 | if (!inrange) { | |
358 | requires_new_range: | |
359 | /* | |
360 | * Range was not found. | |
361 | * Insert at position 'index' | |
362 | */ | |
363 | used = run->count * sizeof(struct ntfs_run); | |
364 | ||
365 | /* | |
366 | * Check allocated space. | |
367 | * If one is not enough to get one more entry | |
e8b8e97f | 368 | * then it will be reallocated. |
4342306f KK |
369 | */ |
370 | if (run->allocated < used + sizeof(struct ntfs_run)) { | |
371 | size_t bytes; | |
372 | struct ntfs_run *new_ptr; | |
373 | ||
e8b8e97f | 374 | /* Use power of 2 for 'bytes'. */ |
4342306f KK |
375 | if (!used) { |
376 | bytes = 64; | |
377 | } else if (used <= 16 * PAGE_SIZE) { | |
528c9b3d | 378 | if (is_power_of_2(run->allocated)) |
4342306f KK |
379 | bytes = run->allocated << 1; |
380 | else | |
381 | bytes = (size_t)1 | |
382 | << (2 + blksize_bits(used)); | |
383 | } else { | |
384 | bytes = run->allocated + (16 * PAGE_SIZE); | |
385 | } | |
386 | ||
387 | WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES); | |
388 | ||
195c52bd | 389 | new_ptr = kvmalloc(bytes, GFP_KERNEL); |
4342306f KK |
390 | |
391 | if (!new_ptr) | |
392 | return false; | |
393 | ||
394 | r = new_ptr + index; | |
395 | memcpy(new_ptr, run->runs, | |
396 | index * sizeof(struct ntfs_run)); | |
397 | memcpy(r + 1, run->runs + index, | |
398 | sizeof(struct ntfs_run) * (run->count - index)); | |
399 | ||
195c52bd | 400 | kvfree(run->runs); |
4342306f KK |
401 | run->runs = new_ptr; |
402 | run->allocated = bytes; | |
403 | ||
404 | } else { | |
405 | size_t i = run->count - index; | |
406 | ||
407 | r = run->runs + index; | |
408 | ||
409 | /* memmove appears to be a bottle neck here... */ | |
410 | if (i > 0) | |
411 | memmove(r + 1, r, sizeof(struct ntfs_run) * i); | |
412 | } | |
413 | ||
414 | r->vcn = vcn; | |
415 | r->lcn = lcn; | |
416 | r->len = len; | |
417 | run->count += 1; | |
418 | } else { | |
419 | r = run->runs + index; | |
420 | ||
421 | /* | |
e8b8e97f KA |
422 | * If one of ranges was not allocated then we |
423 | * have to split location we just matched and | |
424 | * insert current one. | |
425 | * A common case this requires tail to be reinserted | |
4342306f KK |
426 | * a recursive call. |
427 | */ | |
428 | if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) || | |
429 | (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) { | |
430 | CLST to_eat = vcn - r->vcn; | |
431 | CLST Tovcn = to_eat + len; | |
432 | ||
433 | should_add_tail = Tovcn < r->len; | |
434 | ||
435 | if (should_add_tail) { | |
436 | tail_lcn = r->lcn == SPARSE_LCN | |
437 | ? SPARSE_LCN | |
438 | : (r->lcn + Tovcn); | |
439 | tail_vcn = r->vcn + Tovcn; | |
440 | tail_len = r->len - Tovcn; | |
441 | } | |
442 | ||
443 | if (to_eat > 0) { | |
444 | r->len = to_eat; | |
445 | inrange = false; | |
446 | index += 1; | |
447 | goto requires_new_range; | |
448 | } | |
449 | ||
e8b8e97f | 450 | /* lcn should match one were going to add. */ |
4342306f KK |
451 | r->lcn = lcn; |
452 | } | |
453 | ||
454 | /* | |
e8b8e97f | 455 | * If existing range fits then were done. |
4342306f KK |
456 | * Otherwise extend found one and fall back to range jocode. |
457 | */ | |
458 | if (r->vcn + r->len < vcn + len) | |
459 | r->len += len - ((r->vcn + r->len) - vcn); | |
460 | } | |
461 | ||
462 | /* | |
463 | * And normalize it starting from insertion point. | |
464 | * It's possible that no insertion needed case if | |
465 | * start point lies within the range of an entry | |
466 | * that 'index' points to. | |
467 | */ | |
468 | if (inrange && index > 0) | |
469 | index -= 1; | |
470 | run_consolidate(run, index); | |
471 | run_consolidate(run, index + 1); | |
472 | ||
473 | /* | |
e8b8e97f KA |
474 | * A special case. |
475 | * We have to add extra range a tail. | |
4342306f KK |
476 | */ |
477 | if (should_add_tail && | |
478 | !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft)) | |
479 | return false; | |
480 | ||
481 | return true; | |
482 | } | |
483 | ||
e8b8e97f KA |
484 | /* run_collapse_range |
485 | * | |
486 | * Helper for attr_collapse_range(), | |
487 | * which is helper for fallocate(collapse_range). | |
488 | */ | |
4342306f KK |
489 | bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) |
490 | { | |
491 | size_t index, eat; | |
492 | struct ntfs_run *r, *e, *eat_start, *eat_end; | |
493 | CLST end; | |
494 | ||
495 | if (WARN_ON(!run_lookup(run, vcn, &index))) | |
e8b8e97f | 496 | return true; /* Should never be here. */ |
4342306f KK |
497 | |
498 | e = run->runs + run->count; | |
499 | r = run->runs + index; | |
500 | end = vcn + len; | |
501 | ||
502 | if (vcn > r->vcn) { | |
503 | if (r->vcn + r->len <= end) { | |
e8b8e97f | 504 | /* Collapse tail of run .*/ |
4342306f KK |
505 | r->len = vcn - r->vcn; |
506 | } else if (r->lcn == SPARSE_LCN) { | |
e8b8e97f | 507 | /* Collapse a middle part of sparsed run. */ |
4342306f KK |
508 | r->len -= len; |
509 | } else { | |
e8b8e97f | 510 | /* Collapse a middle part of normal run, split. */ |
4342306f KK |
511 | if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) |
512 | return false; | |
513 | return run_collapse_range(run, vcn, len); | |
514 | } | |
515 | ||
516 | r += 1; | |
517 | } | |
518 | ||
519 | eat_start = r; | |
520 | eat_end = r; | |
521 | ||
522 | for (; r < e; r++) { | |
523 | CLST d; | |
524 | ||
525 | if (r->vcn >= end) { | |
526 | r->vcn -= len; | |
527 | continue; | |
528 | } | |
529 | ||
530 | if (r->vcn + r->len <= end) { | |
e8b8e97f | 531 | /* Eat this run. */ |
4342306f KK |
532 | eat_end = r + 1; |
533 | continue; | |
534 | } | |
535 | ||
536 | d = end - r->vcn; | |
537 | if (r->lcn != SPARSE_LCN) | |
538 | r->lcn += d; | |
539 | r->len -= d; | |
540 | r->vcn -= len - d; | |
541 | } | |
542 | ||
543 | eat = eat_end - eat_start; | |
544 | memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r)); | |
545 | run->count -= eat; | |
546 | ||
547 | return true; | |
548 | } | |
549 | ||
aa30eccb KK |
550 | /* run_insert_range |
551 | * | |
552 | * Helper for attr_insert_range(), | |
553 | * which is helper for fallocate(insert_range). | |
554 | */ | |
555 | bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len) | |
556 | { | |
557 | size_t index; | |
558 | struct ntfs_run *r, *e; | |
559 | ||
560 | if (WARN_ON(!run_lookup(run, vcn, &index))) | |
561 | return false; /* Should never be here. */ | |
562 | ||
563 | e = run->runs + run->count; | |
564 | r = run->runs + index; | |
565 | ||
aa30eccb KK |
566 | if (vcn > r->vcn) |
567 | r += 1; | |
568 | ||
569 | for (; r < e; r++) | |
570 | r->vcn += len; | |
571 | ||
572 | r = run->runs + index; | |
573 | ||
574 | if (vcn > r->vcn) { | |
575 | /* split fragment. */ | |
576 | CLST len1 = vcn - r->vcn; | |
577 | CLST len2 = r->len - len1; | |
578 | CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1); | |
579 | ||
580 | r->len = len1; | |
581 | ||
582 | if (!run_add_entry(run, vcn + len, lcn2, len2, false)) | |
583 | return false; | |
584 | } | |
585 | ||
586 | if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) | |
587 | return false; | |
588 | ||
589 | return true; | |
590 | } | |
591 | ||
4342306f | 592 | /* |
e8b8e97f | 593 | * run_get_entry - Return index-th mapped region. |
4342306f KK |
594 | */ |
595 | bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, | |
596 | CLST *lcn, CLST *len) | |
597 | { | |
598 | const struct ntfs_run *r; | |
599 | ||
600 | if (index >= run->count) | |
601 | return false; | |
602 | ||
603 | r = run->runs + index; | |
604 | ||
605 | if (!r->len) | |
606 | return false; | |
607 | ||
608 | if (vcn) | |
609 | *vcn = r->vcn; | |
610 | if (lcn) | |
611 | *lcn = r->lcn; | |
612 | if (len) | |
613 | *len = r->len; | |
614 | return true; | |
615 | } | |
616 | ||
617 | /* | |
e8b8e97f | 618 | * run_packed_size - Calculate the size of packed int64. |
4342306f KK |
619 | */ |
620 | #ifdef __BIG_ENDIAN | |
621 | static inline int run_packed_size(const s64 n) | |
622 | { | |
623 | const u8 *p = (const u8 *)&n + sizeof(n) - 1; | |
624 | ||
625 | if (n >= 0) { | |
626 | if (p[-7] || p[-6] || p[-5] || p[-4]) | |
627 | p -= 4; | |
628 | if (p[-3] || p[-2]) | |
629 | p -= 2; | |
630 | if (p[-1]) | |
631 | p -= 1; | |
632 | if (p[0] & 0x80) | |
633 | p -= 1; | |
634 | } else { | |
635 | if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff || | |
636 | p[-4] != 0xff) | |
637 | p -= 4; | |
638 | if (p[-3] != 0xff || p[-2] != 0xff) | |
639 | p -= 2; | |
640 | if (p[-1] != 0xff) | |
641 | p -= 1; | |
642 | if (!(p[0] & 0x80)) | |
643 | p -= 1; | |
644 | } | |
645 | return (const u8 *)&n + sizeof(n) - p; | |
646 | } | |
647 | ||
e8b8e97f | 648 | /* Full trusted function. It does not check 'size' for errors. */ |
4342306f KK |
649 | static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v) |
650 | { | |
651 | const u8 *p = (u8 *)&v; | |
652 | ||
653 | switch (size) { | |
654 | case 8: | |
655 | run_buf[7] = p[0]; | |
656 | fallthrough; | |
657 | case 7: | |
658 | run_buf[6] = p[1]; | |
659 | fallthrough; | |
660 | case 6: | |
661 | run_buf[5] = p[2]; | |
662 | fallthrough; | |
663 | case 5: | |
664 | run_buf[4] = p[3]; | |
665 | fallthrough; | |
666 | case 4: | |
667 | run_buf[3] = p[4]; | |
668 | fallthrough; | |
669 | case 3: | |
670 | run_buf[2] = p[5]; | |
671 | fallthrough; | |
672 | case 2: | |
673 | run_buf[1] = p[6]; | |
674 | fallthrough; | |
675 | case 1: | |
676 | run_buf[0] = p[7]; | |
677 | } | |
678 | } | |
679 | ||
e8b8e97f | 680 | /* Full trusted function. It does not check 'size' for errors. */ |
4342306f KK |
681 | static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v) |
682 | { | |
683 | u8 *p = (u8 *)&v; | |
684 | ||
685 | switch (size) { | |
686 | case 8: | |
687 | p[0] = run_buf[7]; | |
688 | fallthrough; | |
689 | case 7: | |
690 | p[1] = run_buf[6]; | |
691 | fallthrough; | |
692 | case 6: | |
693 | p[2] = run_buf[5]; | |
694 | fallthrough; | |
695 | case 5: | |
696 | p[3] = run_buf[4]; | |
697 | fallthrough; | |
698 | case 4: | |
699 | p[4] = run_buf[3]; | |
700 | fallthrough; | |
701 | case 3: | |
702 | p[5] = run_buf[2]; | |
703 | fallthrough; | |
704 | case 2: | |
705 | p[6] = run_buf[1]; | |
706 | fallthrough; | |
707 | case 1: | |
708 | p[7] = run_buf[0]; | |
709 | } | |
710 | return v; | |
711 | } | |
712 | ||
713 | #else | |
714 | ||
715 | static inline int run_packed_size(const s64 n) | |
716 | { | |
717 | const u8 *p = (const u8 *)&n; | |
718 | ||
719 | if (n >= 0) { | |
720 | if (p[7] || p[6] || p[5] || p[4]) | |
721 | p += 4; | |
722 | if (p[3] || p[2]) | |
723 | p += 2; | |
724 | if (p[1]) | |
725 | p += 1; | |
726 | if (p[0] & 0x80) | |
727 | p += 1; | |
728 | } else { | |
729 | if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff || | |
730 | p[4] != 0xff) | |
731 | p += 4; | |
732 | if (p[3] != 0xff || p[2] != 0xff) | |
733 | p += 2; | |
734 | if (p[1] != 0xff) | |
735 | p += 1; | |
736 | if (!(p[0] & 0x80)) | |
737 | p += 1; | |
738 | } | |
739 | ||
740 | return 1 + p - (const u8 *)&n; | |
741 | } | |
742 | ||
e8b8e97f | 743 | /* Full trusted function. It does not check 'size' for errors. */ |
4342306f KK |
744 | static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v) |
745 | { | |
746 | const u8 *p = (u8 *)&v; | |
747 | ||
e8b8e97f | 748 | /* memcpy( run_buf, &v, size); Is it faster? */ |
4342306f KK |
749 | switch (size) { |
750 | case 8: | |
751 | run_buf[7] = p[7]; | |
752 | fallthrough; | |
753 | case 7: | |
754 | run_buf[6] = p[6]; | |
755 | fallthrough; | |
756 | case 6: | |
757 | run_buf[5] = p[5]; | |
758 | fallthrough; | |
759 | case 5: | |
760 | run_buf[4] = p[4]; | |
761 | fallthrough; | |
762 | case 4: | |
763 | run_buf[3] = p[3]; | |
764 | fallthrough; | |
765 | case 3: | |
766 | run_buf[2] = p[2]; | |
767 | fallthrough; | |
768 | case 2: | |
769 | run_buf[1] = p[1]; | |
770 | fallthrough; | |
771 | case 1: | |
772 | run_buf[0] = p[0]; | |
773 | } | |
774 | } | |
775 | ||
776 | /* full trusted function. It does not check 'size' for errors */ | |
777 | static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v) | |
778 | { | |
779 | u8 *p = (u8 *)&v; | |
780 | ||
e8b8e97f | 781 | /* memcpy( &v, run_buf, size); Is it faster? */ |
4342306f KK |
782 | switch (size) { |
783 | case 8: | |
784 | p[7] = run_buf[7]; | |
785 | fallthrough; | |
786 | case 7: | |
787 | p[6] = run_buf[6]; | |
788 | fallthrough; | |
789 | case 6: | |
790 | p[5] = run_buf[5]; | |
791 | fallthrough; | |
792 | case 5: | |
793 | p[4] = run_buf[4]; | |
794 | fallthrough; | |
795 | case 4: | |
796 | p[3] = run_buf[3]; | |
797 | fallthrough; | |
798 | case 3: | |
799 | p[2] = run_buf[2]; | |
800 | fallthrough; | |
801 | case 2: | |
802 | p[1] = run_buf[1]; | |
803 | fallthrough; | |
804 | case 1: | |
805 | p[0] = run_buf[0]; | |
806 | } | |
807 | return v; | |
808 | } | |
809 | #endif | |
810 | ||
811 | /* | |
e8b8e97f | 812 | * run_pack - Pack runs into buffer. |
4342306f | 813 | * |
e8b8e97f KA |
814 | * packed_vcns - How much runs we have packed. |
815 | * packed_size - How much bytes we have used run_buf. | |
4342306f KK |
816 | */ |
817 | int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf, | |
818 | u32 run_buf_size, CLST *packed_vcns) | |
819 | { | |
820 | CLST next_vcn, vcn, lcn; | |
821 | CLST prev_lcn = 0; | |
822 | CLST evcn1 = svcn + len; | |
e6d9398c | 823 | const struct ntfs_run *r, *r_end; |
4342306f KK |
824 | int packed_size = 0; |
825 | size_t i; | |
4342306f KK |
826 | s64 dlcn; |
827 | int offset_size, size_size, tmp; | |
828 | ||
4342306f KK |
829 | *packed_vcns = 0; |
830 | ||
831 | if (!len) | |
832 | goto out; | |
833 | ||
e6d9398c KK |
834 | /* Check all required entries [svcn, encv1) available. */ |
835 | if (!run_lookup(run, svcn, &i)) | |
836 | return -ENOENT; | |
4342306f | 837 | |
e6d9398c KK |
838 | r_end = run->runs + run->count; |
839 | r = run->runs + i; | |
4342306f | 840 | |
e6d9398c KK |
841 | for (next_vcn = r->vcn + r->len; next_vcn < evcn1; |
842 | next_vcn = r->vcn + r->len) { | |
843 | if (++r >= r_end || r->vcn != next_vcn) | |
844 | return -ENOENT; | |
845 | } | |
846 | ||
847 | /* Repeat cycle above and pack runs. Assume no errors. */ | |
848 | r = run->runs + i; | |
849 | len = svcn - r->vcn; | |
850 | vcn = svcn; | |
851 | lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len); | |
852 | len = r->len - len; | |
4342306f KK |
853 | |
854 | for (;;) { | |
855 | next_vcn = vcn + len; | |
856 | if (next_vcn > evcn1) | |
857 | len = evcn1 - vcn; | |
858 | ||
e8b8e97f | 859 | /* How much bytes required to pack len. */ |
4342306f KK |
860 | size_size = run_packed_size(len); |
861 | ||
e8b8e97f | 862 | /* offset_size - How much bytes is packed dlcn. */ |
4342306f KK |
863 | if (lcn == SPARSE_LCN) { |
864 | offset_size = 0; | |
865 | dlcn = 0; | |
866 | } else { | |
867 | /* NOTE: lcn can be less than prev_lcn! */ | |
868 | dlcn = (s64)lcn - prev_lcn; | |
869 | offset_size = run_packed_size(dlcn); | |
870 | prev_lcn = lcn; | |
871 | } | |
872 | ||
873 | tmp = run_buf_size - packed_size - 2 - offset_size; | |
874 | if (tmp <= 0) | |
875 | goto out; | |
876 | ||
e8b8e97f | 877 | /* Can we store this entire run. */ |
4342306f KK |
878 | if (tmp < size_size) |
879 | goto out; | |
880 | ||
881 | if (run_buf) { | |
e8b8e97f | 882 | /* Pack run header. */ |
4342306f KK |
883 | run_buf[0] = ((u8)(size_size | (offset_size << 4))); |
884 | run_buf += 1; | |
885 | ||
e8b8e97f | 886 | /* Pack the length of run. */ |
4342306f KK |
887 | run_pack_s64(run_buf, size_size, len); |
888 | ||
889 | run_buf += size_size; | |
e8b8e97f | 890 | /* Pack the offset from previous LCN. */ |
4342306f KK |
891 | run_pack_s64(run_buf, offset_size, dlcn); |
892 | run_buf += offset_size; | |
893 | } | |
894 | ||
895 | packed_size += 1 + offset_size + size_size; | |
896 | *packed_vcns += len; | |
897 | ||
898 | if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1) | |
899 | goto out; | |
900 | ||
e6d9398c KK |
901 | r += 1; |
902 | vcn = r->vcn; | |
903 | lcn = r->lcn; | |
904 | len = r->len; | |
4342306f KK |
905 | } |
906 | ||
907 | out: | |
e8b8e97f | 908 | /* Store last zero. */ |
4342306f KK |
909 | if (run_buf) |
910 | run_buf[0] = 0; | |
911 | ||
912 | return packed_size + 1; | |
4342306f KK |
913 | } |
914 | ||
915 | /* | |
e8b8e97f | 916 | * run_unpack - Unpack packed runs from @run_buf. |
4342306f | 917 | * |
e8b8e97f | 918 | * Return: Error if negative, or real used bytes. |
4342306f KK |
919 | */ |
920 | int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, | |
921 | CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, | |
922 | u32 run_buf_size) | |
923 | { | |
924 | u64 prev_lcn, vcn64, lcn, next_vcn; | |
925 | const u8 *run_last, *run_0; | |
926 | bool is_mft = ino == MFT_REC_MFT; | |
927 | ||
e8b8e97f | 928 | /* Check for empty. */ |
4342306f KK |
929 | if (evcn + 1 == svcn) |
930 | return 0; | |
931 | ||
932 | if (evcn < svcn) | |
933 | return -EINVAL; | |
934 | ||
935 | run_0 = run_buf; | |
936 | run_last = run_buf + run_buf_size; | |
937 | prev_lcn = 0; | |
938 | vcn64 = svcn; | |
939 | ||
e8b8e97f KA |
940 | /* Read all runs the chain. */ |
941 | /* size_size - How much bytes is packed len. */ | |
4342306f | 942 | while (run_buf < run_last) { |
e8b8e97f | 943 | /* size_size - How much bytes is packed len. */ |
4342306f | 944 | u8 size_size = *run_buf & 0xF; |
e8b8e97f | 945 | /* offset_size - How much bytes is packed dlcn. */ |
4342306f KK |
946 | u8 offset_size = *run_buf++ >> 4; |
947 | u64 len; | |
948 | ||
949 | if (!size_size) | |
950 | break; | |
951 | ||
952 | /* | |
953 | * Unpack runs. | |
e8b8e97f KA |
954 | * NOTE: Runs are stored little endian order |
955 | * "len" is unsigned value, "dlcn" is signed. | |
4342306f KK |
956 | * Large positive number requires to store 5 bytes |
957 | * e.g.: 05 FF 7E FF FF 00 00 00 | |
958 | */ | |
959 | if (size_size > 8) | |
960 | return -EINVAL; | |
961 | ||
962 | len = run_unpack_s64(run_buf, size_size, 0); | |
e8b8e97f | 963 | /* Skip size_size. */ |
4342306f KK |
964 | run_buf += size_size; |
965 | ||
966 | if (!len) | |
967 | return -EINVAL; | |
968 | ||
969 | if (!offset_size) | |
970 | lcn = SPARSE_LCN64; | |
971 | else if (offset_size <= 8) { | |
972 | s64 dlcn; | |
973 | ||
e8b8e97f | 974 | /* Initial value of dlcn is -1 or 0. */ |
4342306f KK |
975 | dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0; |
976 | dlcn = run_unpack_s64(run_buf, offset_size, dlcn); | |
e8b8e97f | 977 | /* Skip offset_size. */ |
4342306f KK |
978 | run_buf += offset_size; |
979 | ||
980 | if (!dlcn) | |
981 | return -EINVAL; | |
982 | lcn = prev_lcn + dlcn; | |
983 | prev_lcn = lcn; | |
984 | } else | |
985 | return -EINVAL; | |
986 | ||
987 | next_vcn = vcn64 + len; | |
e8b8e97f | 988 | /* Check boundary. */ |
4342306f KK |
989 | if (next_vcn > evcn + 1) |
990 | return -EINVAL; | |
991 | ||
992 | #ifndef CONFIG_NTFS3_64BIT_CLUSTER | |
993 | if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) { | |
994 | ntfs_err( | |
995 | sbi->sb, | |
f8d87ed9 | 996 | "This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n" |
4342306f KK |
997 | "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n" |
998 | "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case", | |
999 | vcn64, lcn, len); | |
1000 | return -EOPNOTSUPP; | |
1001 | } | |
1002 | #endif | |
1003 | if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) { | |
e8b8e97f | 1004 | /* LCN range is out of volume. */ |
4342306f KK |
1005 | return -EINVAL; |
1006 | } | |
1007 | ||
1008 | if (!run) | |
e8b8e97f | 1009 | ; /* Called from check_attr(fslog.c) to check run. */ |
4342306f | 1010 | else if (run == RUN_DEALLOCATE) { |
e8b8e97f KA |
1011 | /* |
1012 | * Called from ni_delete_all to free clusters | |
1013 | * without storing in run. | |
1014 | */ | |
4342306f KK |
1015 | if (lcn != SPARSE_LCN64) |
1016 | mark_as_free_ex(sbi, lcn, len, true); | |
1017 | } else if (vcn64 >= vcn) { | |
1018 | if (!run_add_entry(run, vcn64, lcn, len, is_mft)) | |
1019 | return -ENOMEM; | |
1020 | } else if (next_vcn > vcn) { | |
1021 | u64 dlen = vcn - vcn64; | |
1022 | ||
1023 | if (!run_add_entry(run, vcn, lcn + dlen, len - dlen, | |
1024 | is_mft)) | |
1025 | return -ENOMEM; | |
1026 | } | |
1027 | ||
1028 | vcn64 = next_vcn; | |
1029 | } | |
1030 | ||
1031 | if (vcn64 != evcn + 1) { | |
e8b8e97f | 1032 | /* Not expected length of unpacked runs. */ |
4342306f KK |
1033 | return -EINVAL; |
1034 | } | |
1035 | ||
1036 | return run_buf - run_0; | |
1037 | } | |
1038 | ||
1039 | #ifdef NTFS3_CHECK_FREE_CLST | |
1040 | /* | |
e8b8e97f KA |
1041 | * run_unpack_ex - Unpack packed runs from "run_buf". |
1042 | * | |
1043 | * Checks unpacked runs to be used in bitmap. | |
4342306f | 1044 | * |
e8b8e97f | 1045 | * Return: Error if negative, or real used bytes. |
4342306f KK |
1046 | */ |
1047 | int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, | |
1048 | CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, | |
1049 | u32 run_buf_size) | |
1050 | { | |
1051 | int ret, err; | |
1052 | CLST next_vcn, lcn, len; | |
1053 | size_t index; | |
1054 | bool ok; | |
1055 | struct wnd_bitmap *wnd; | |
1056 | ||
1057 | ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size); | |
1058 | if (ret <= 0) | |
1059 | return ret; | |
1060 | ||
1061 | if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE) | |
1062 | return ret; | |
1063 | ||
1064 | if (ino == MFT_REC_BADCLUST) | |
1065 | return ret; | |
1066 | ||
1067 | next_vcn = vcn = svcn; | |
1068 | wnd = &sbi->used.bitmap; | |
1069 | ||
1070 | for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index); | |
1071 | next_vcn <= evcn; | |
1072 | ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) { | |
1073 | if (!ok || next_vcn != vcn) | |
1074 | return -EINVAL; | |
1075 | ||
1076 | next_vcn = vcn + len; | |
1077 | ||
1078 | if (lcn == SPARSE_LCN) | |
1079 | continue; | |
1080 | ||
1081 | if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) | |
1082 | continue; | |
1083 | ||
1084 | down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); | |
e8b8e97f | 1085 | /* Check for free blocks. */ |
4342306f KK |
1086 | ok = wnd_is_used(wnd, lcn, len); |
1087 | up_read(&wnd->rw_lock); | |
1088 | if (ok) | |
1089 | continue; | |
1090 | ||
e8b8e97f | 1091 | /* Looks like volume is corrupted. */ |
4342306f KK |
1092 | ntfs_set_state(sbi, NTFS_DIRTY_ERROR); |
1093 | ||
1094 | if (down_write_trylock(&wnd->rw_lock)) { | |
e8b8e97f | 1095 | /* Mark all zero bits as used in range [lcn, lcn+len). */ |
4342306f KK |
1096 | CLST i, lcn_f = 0, len_f = 0; |
1097 | ||
1098 | err = 0; | |
1099 | for (i = 0; i < len; i++) { | |
1100 | if (wnd_is_free(wnd, lcn + i, 1)) { | |
1101 | if (!len_f) | |
1102 | lcn_f = lcn + i; | |
1103 | len_f += 1; | |
1104 | } else if (len_f) { | |
1105 | err = wnd_set_used(wnd, lcn_f, len_f); | |
1106 | len_f = 0; | |
1107 | if (err) | |
1108 | break; | |
1109 | } | |
1110 | } | |
1111 | ||
1112 | if (len_f) | |
1113 | err = wnd_set_used(wnd, lcn_f, len_f); | |
1114 | ||
1115 | up_write(&wnd->rw_lock); | |
1116 | if (err) | |
1117 | return err; | |
1118 | } | |
1119 | } | |
1120 | ||
1121 | return ret; | |
1122 | } | |
1123 | #endif | |
1124 | ||
1125 | /* | |
1126 | * run_get_highest_vcn | |
1127 | * | |
e8b8e97f KA |
1128 | * Return the highest vcn from a mapping pairs array |
1129 | * it used while replaying log file. | |
4342306f KK |
1130 | */ |
1131 | int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn) | |
1132 | { | |
1133 | u64 vcn64 = vcn; | |
1134 | u8 size_size; | |
1135 | ||
1136 | while ((size_size = *run_buf & 0xF)) { | |
1137 | u8 offset_size = *run_buf++ >> 4; | |
1138 | u64 len; | |
1139 | ||
1140 | if (size_size > 8 || offset_size > 8) | |
1141 | return -EINVAL; | |
1142 | ||
1143 | len = run_unpack_s64(run_buf, size_size, 0); | |
1144 | if (!len) | |
1145 | return -EINVAL; | |
1146 | ||
1147 | run_buf += size_size + offset_size; | |
1148 | vcn64 += len; | |
1149 | ||
1150 | #ifndef CONFIG_NTFS3_64BIT_CLUSTER | |
1151 | if (vcn64 > 0x100000000ull) | |
1152 | return -EINVAL; | |
1153 | #endif | |
1154 | } | |
1155 | ||
1156 | *highest_vcn = vcn64 - 1; | |
1157 | return 0; | |
1158 | } | |
20abc64f KK |
1159 | |
1160 | /* | |
1161 | * run_clone | |
1162 | * | |
1163 | * Make a copy of run | |
1164 | */ | |
1165 | int run_clone(const struct runs_tree *run, struct runs_tree *new_run) | |
1166 | { | |
1167 | size_t bytes = run->count * sizeof(struct ntfs_run); | |
1168 | ||
1169 | if (bytes > new_run->allocated) { | |
1170 | struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL); | |
1171 | ||
1172 | if (!new_ptr) | |
1173 | return -ENOMEM; | |
1174 | ||
1175 | kvfree(new_run->runs); | |
1176 | new_run->runs = new_ptr; | |
1177 | new_run->allocated = bytes; | |
1178 | } | |
1179 | ||
1180 | memcpy(new_run->runs, run->runs, bytes); | |
1181 | new_run->count = run->count; | |
1182 | return 0; | |
1183 | } |