1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2023 Red Hat
8 #include <linux/min_heap.h>
9 #include <linux/minmax.h>
12 #include "memory-alloc.h"
13 #include "permassert.h"
15 #include "block-map.h"
16 #include "completion.h"
17 #include "constants.h"
18 #include "encodings.h"
20 #include "io-submitter.h"
21 #include "recovery-journal.h"
22 #include "slab-depot.h"
25 #include "wait-queue.h"
28 * An explicitly numbered block mapping. Numbering the mappings allows them to be sorted by logical
29 * block number during repair while still preserving the relative order of journal entries with
30 * the same logical block number.
32 struct numbered_block_mapping {
33 struct block_map_slot block_map_slot;
34 struct block_map_entry block_map_entry;
35 /* A serial number to use during replay */
40 * The absolute position of an entry in the recovery journal, including the sector number and the
41 * entry number within the sector.
43 struct recovery_point {
44 /* Block sequence number */
45 sequence_number_t sequence_number;
49 journal_entry_count_t entry_count;
50 /* Whether or not the increment portion of the current entry has been applied */
51 bool increment_applied;
54 struct repair_completion {
55 /* The completion header */
56 struct vdo_completion completion;
58 /* A buffer to hold the data read off disk */
61 /* For loading the journal */
62 data_vio_count_t vio_count;
63 data_vio_count_t vios_complete;
66 /* The number of entries to be applied to the block map */
67 size_t block_map_entry_count;
68 /* The sequence number of the first valid block for block map recovery */
69 sequence_number_t block_map_head;
70 /* The sequence number of the first valid block for slab journal replay */
71 sequence_number_t slab_journal_head;
72 /* The sequence number of the last valid block of the journal (if known) */
73 sequence_number_t tail;
75 * The highest sequence number of the journal. During recovery (vs read-only rebuild), not
76 * the same as the tail, since the tail ignores blocks after the first hole.
78 sequence_number_t highest_tail;
80 /* The number of logical blocks currently known to be in use */
81 block_count_t logical_blocks_used;
82 /* The number of block map data blocks known to be allocated */
83 block_count_t block_map_data_blocks;
85 /* These fields are for playing the journal into the block map */
86 /* The entry data for the block map recovery */
87 struct numbered_block_mapping *entries;
88 /* The number of entries in the entry array */
90 /* number of pending (non-ready) requests*/
91 page_count_t outstanding;
92 /* number of page completions */
93 page_count_t page_count;
96 * a heap wrapping journal_entries. It re-orders and sorts journal entries in ascending LBN
97 * order, then original journal order. This permits efficient iteration over the journal
100 struct min_heap replay_heap;
101 /* Fields tracking progress through the journal entries. */
102 struct numbered_block_mapping *current_entry;
103 struct numbered_block_mapping *current_unfetched_entry;
104 /* Current requested page's PBN */
105 physical_block_number_t pbn;
107 /* These fields are only used during recovery. */
108 /* A location just beyond the last valid entry of the journal */
109 struct recovery_point tail_recovery_point;
110 /* The location of the next recovery journal entry to apply */
111 struct recovery_point next_recovery_point;
112 /* The journal point to give to the next synthesized decref */
113 struct journal_point next_journal_point;
114 /* The number of entries played into slab journals */
115 size_t entries_added_to_slab_journals;
117 /* These fields are only used during read-only rebuild */
118 page_count_t page_to_fetch;
119 /* the number of leaf pages in the block map */
120 page_count_t leaf_pages;
121 /* the last slot of the block map */
122 struct block_map_slot last_slot;
125 * The page completions used for playing the journal into the block map, and, during
126 * read-only rebuild, for rebuilding the reference counts from the block map.
128 struct vdo_page_completion page_completions[];
132 * This is a min_heap callback function that orders numbered_block_mappings using the
133 * 'block_map_slot' field as the primary key and the mapping 'number' field as the secondary key.
134 * Using the mapping number preserves the journal order of entries for the same slot, allowing us
135 * to sort by slot while still ensuring we replay all entries with the same slot in the exact order
136 * as they appeared in the journal.
138 static bool mapping_is_less_than(const void *item1, const void *item2)
140 const struct numbered_block_mapping *mapping1 =
141 (const struct numbered_block_mapping *) item1;
142 const struct numbered_block_mapping *mapping2 =
143 (const struct numbered_block_mapping *) item2;
145 if (mapping1->block_map_slot.pbn != mapping2->block_map_slot.pbn)
146 return mapping1->block_map_slot.pbn < mapping2->block_map_slot.pbn;
148 if (mapping1->block_map_slot.slot != mapping2->block_map_slot.slot)
149 return mapping1->block_map_slot.slot < mapping2->block_map_slot.slot;
151 if (mapping1->number != mapping2->number)
152 return mapping1->number < mapping2->number;
157 static void swap_mappings(void *item1, void *item2)
159 struct numbered_block_mapping *mapping1 = item1;
160 struct numbered_block_mapping *mapping2 = item2;
162 swap(*mapping1, *mapping2);
165 static const struct min_heap_callbacks repair_min_heap = {
166 .elem_size = sizeof(struct numbered_block_mapping),
167 .less = mapping_is_less_than,
168 .swp = swap_mappings,
171 static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair)
173 struct min_heap *heap = &repair->replay_heap;
174 struct numbered_block_mapping *last;
180 * Swap the next heap element with the last one on the heap, popping it off the heap,
181 * restore the heap invariant, and return a pointer to the popped element.
183 last = &repair->entries[--heap->nr];
184 swap_mappings(heap->data, last);
185 min_heapify(heap, 0, &repair_min_heap);
190 * as_repair_completion() - Convert a generic completion to a repair_completion.
191 * @completion: The completion to convert.
193 * Return: The repair_completion.
195 static inline struct repair_completion * __must_check
196 as_repair_completion(struct vdo_completion *completion)
198 vdo_assert_completion_type(completion, VDO_REPAIR_COMPLETION);
199 return container_of(completion, struct repair_completion, completion);
202 static void prepare_repair_completion(struct repair_completion *repair,
203 vdo_action_fn callback, enum vdo_zone_type zone_type)
205 struct vdo_completion *completion = &repair->completion;
206 const struct thread_config *thread_config = &completion->vdo->thread_config;
207 thread_id_t thread_id;
209 /* All blockmap access is done on single thread, so use logical zone 0. */
210 thread_id = ((zone_type == VDO_ZONE_TYPE_LOGICAL) ?
211 thread_config->logical_threads[0] :
212 thread_config->admin_thread);
213 vdo_reset_completion(completion);
214 vdo_set_completion_callback(completion, callback, thread_id);
217 static void launch_repair_completion(struct repair_completion *repair,
218 vdo_action_fn callback, enum vdo_zone_type zone_type)
220 prepare_repair_completion(repair, callback, zone_type);
221 vdo_launch_completion(&repair->completion);
224 static void uninitialize_vios(struct repair_completion *repair)
226 while (repair->vio_count > 0)
227 free_vio_components(&repair->vios[--repair->vio_count]);
229 vdo_free(vdo_forget(repair->vios));
232 static void free_repair_completion(struct repair_completion *repair)
238 * We do this here because this function is the only common bottleneck for all clean up
241 repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false;
243 uninitialize_vios(repair);
244 vdo_free(vdo_forget(repair->journal_data));
245 vdo_free(vdo_forget(repair->entries));
249 static void finish_repair(struct vdo_completion *completion)
251 struct vdo_completion *parent = completion->parent;
252 struct vdo *vdo = completion->vdo;
253 struct repair_completion *repair = as_repair_completion(completion);
255 vdo_assert_on_admin_thread(vdo, __func__);
257 if (vdo->load_state != VDO_REBUILD_FOR_UPGRADE)
258 vdo->states.vdo.complete_recoveries++;
260 vdo_initialize_recovery_journal_post_repair(vdo->recovery_journal,
261 vdo->states.vdo.complete_recoveries,
262 repair->highest_tail,
263 repair->logical_blocks_used,
264 repair->block_map_data_blocks);
265 free_repair_completion(vdo_forget(repair));
267 if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
268 vdo_log_info("Read-only rebuild complete");
269 vdo_launch_completion(parent);
273 /* FIXME: shouldn't this say either "recovery" or "repair"? */
274 vdo_log_info("Rebuild complete");
277 * Now that we've freed the repair completion and its vast array of journal entries, we
278 * can allocate refcounts.
280 vdo_continue_completion(parent, vdo_allocate_reference_counters(vdo->depot));
284 * abort_repair() - Handle a repair error.
285 * @completion: The repair completion.
287 static void abort_repair(struct vdo_completion *completion)
289 struct vdo_completion *parent = completion->parent;
290 int result = completion->result;
291 struct repair_completion *repair = as_repair_completion(completion);
293 if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state))
294 vdo_log_info("Read-only rebuild aborted");
296 vdo_log_warning("Recovery aborted");
298 free_repair_completion(vdo_forget(repair));
299 vdo_continue_completion(parent, result);
303 * abort_on_error() - Abort a repair if there is an error.
304 * @result: The result to check.
305 * @repair: The repair completion.
307 * Return: true if the result was an error.
309 static bool __must_check abort_on_error(int result, struct repair_completion *repair)
311 if (result == VDO_SUCCESS)
314 vdo_fail_completion(&repair->completion, result);
319 * drain_slab_depot() - Flush out all dirty refcounts blocks now that they have been rebuilt or
322 static void drain_slab_depot(struct vdo_completion *completion)
324 struct vdo *vdo = completion->vdo;
325 struct repair_completion *repair = as_repair_completion(completion);
326 const struct admin_state_code *operation;
328 vdo_assert_on_admin_thread(vdo, __func__);
330 prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN);
331 if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
332 vdo_log_info("Saving rebuilt state");
333 operation = VDO_ADMIN_STATE_REBUILDING;
335 vdo_log_info("Replayed %zu journal entries into slab journals",
336 repair->entries_added_to_slab_journals);
337 operation = VDO_ADMIN_STATE_RECOVERING;
340 vdo_drain_slab_depot(vdo->depot, operation, completion);
344 * flush_block_map_updates() - Flush the block map now that all the reference counts are rebuilt.
345 * @completion: The repair completion.
347 * This callback is registered in finish_if_done().
349 static void flush_block_map_updates(struct vdo_completion *completion)
351 vdo_assert_on_admin_thread(completion->vdo, __func__);
353 vdo_log_info("Flushing block map changes");
354 prepare_repair_completion(as_repair_completion(completion), drain_slab_depot,
355 VDO_ZONE_TYPE_ADMIN);
356 vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING,
360 static bool fetch_page(struct repair_completion *repair,
361 struct vdo_completion *completion);
364 * handle_page_load_error() - Handle an error loading a page.
365 * @completion: The vdo_page_completion.
367 static void handle_page_load_error(struct vdo_completion *completion)
369 struct repair_completion *repair = completion->parent;
371 repair->outstanding--;
372 vdo_set_completion_result(&repair->completion, completion->result);
373 vdo_release_page_completion(completion);
374 fetch_page(repair, completion);
378 * unmap_entry() - Unmap an invalid entry and indicate that its page must be written out.
379 * @page: The page containing the entries
380 * @completion: The page_completion for writing the page
381 * @slot: The slot to unmap
383 static void unmap_entry(struct block_map_page *page, struct vdo_completion *completion,
386 page->entries[slot] = UNMAPPED_BLOCK_MAP_ENTRY;
387 vdo_request_page_write(completion);
391 * remove_out_of_bounds_entries() - Unmap entries which outside the logical space.
392 * @page: The page containing the entries
393 * @completion: The page_completion for writing the page
394 * @start: The first slot to check
396 static void remove_out_of_bounds_entries(struct block_map_page *page,
397 struct vdo_completion *completion,
402 for (slot = start; slot < VDO_BLOCK_MAP_ENTRIES_PER_PAGE; slot++) {
403 struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
405 if (vdo_is_mapped_location(&mapping))
406 unmap_entry(page, completion, slot);
411 * process_slot() - Update the reference counts for a single entry.
412 * @page: The page containing the entries
413 * @completion: The page_completion for writing the page
414 * @slot: The slot to check
416 * Return: true if the entry was a valid mapping
418 static bool process_slot(struct block_map_page *page, struct vdo_completion *completion,
421 struct slab_depot *depot = completion->vdo->depot;
423 struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
425 if (!vdo_is_valid_location(&mapping)) {
426 /* This entry is invalid, so remove it from the page. */
427 unmap_entry(page, completion, slot);
431 if (!vdo_is_mapped_location(&mapping))
435 if (mapping.pbn == VDO_ZERO_BLOCK)
438 if (!vdo_is_physical_data_block(depot, mapping.pbn)) {
440 * This is a nonsense mapping. Remove it from the map so we're at least consistent
441 * and mark the page dirty.
443 unmap_entry(page, completion, slot);
447 result = vdo_adjust_reference_count_for_rebuild(depot, mapping.pbn,
448 VDO_JOURNAL_DATA_REMAPPING);
449 if (result == VDO_SUCCESS)
452 vdo_log_error_strerror(result,
453 "Could not adjust reference count for PBN %llu, slot %u mapped to PBN %llu",
454 (unsigned long long) vdo_get_block_map_page_pbn(page),
455 slot, (unsigned long long) mapping.pbn);
456 unmap_entry(page, completion, slot);
461 * rebuild_reference_counts_from_page() - Rebuild reference counts from a block map page.
462 * @repair: The repair completion.
463 * @completion: The page completion holding the page.
465 static void rebuild_reference_counts_from_page(struct repair_completion *repair,
466 struct vdo_completion *completion)
468 slot_number_t slot, last_slot;
469 struct block_map_page *page;
472 result = vdo_get_cached_page(completion, &page);
473 if (result != VDO_SUCCESS) {
474 vdo_set_completion_result(&repair->completion, result);
478 if (!page->header.initialized)
481 /* Remove any bogus entries which exist beyond the end of the logical space. */
482 if (vdo_get_block_map_page_pbn(page) == repair->last_slot.pbn) {
483 last_slot = repair->last_slot.slot;
484 remove_out_of_bounds_entries(page, completion, last_slot);
486 last_slot = VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
489 /* Inform the slab depot of all entries on this page. */
490 for (slot = 0; slot < last_slot; slot++) {
491 if (process_slot(page, completion, slot))
492 repair->logical_blocks_used++;
497 * page_loaded() - Process a page which has just been loaded.
498 * @completion: The vdo_page_completion for the fetched page.
500 * This callback is registered by fetch_page().
502 static void page_loaded(struct vdo_completion *completion)
504 struct repair_completion *repair = completion->parent;
506 repair->outstanding--;
507 rebuild_reference_counts_from_page(repair, completion);
508 vdo_release_page_completion(completion);
510 /* Advance progress to the next page, and fetch the next page we haven't yet requested. */
511 fetch_page(repair, completion);
514 static physical_block_number_t get_pbn_to_fetch(struct repair_completion *repair,
515 struct block_map *block_map)
517 physical_block_number_t pbn = VDO_ZERO_BLOCK;
519 if (repair->completion.result != VDO_SUCCESS)
520 return VDO_ZERO_BLOCK;
522 while ((pbn == VDO_ZERO_BLOCK) && (repair->page_to_fetch < repair->leaf_pages))
523 pbn = vdo_find_block_map_page_pbn(block_map, repair->page_to_fetch++);
525 if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn))
528 vdo_set_completion_result(&repair->completion, VDO_BAD_MAPPING);
529 return VDO_ZERO_BLOCK;
533 * fetch_page() - Fetch a page from the block map.
534 * @repair: The repair_completion.
535 * @completion: The page completion to use.
537 * Return true if the rebuild is complete
539 static bool fetch_page(struct repair_completion *repair,
540 struct vdo_completion *completion)
542 struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
543 struct block_map *block_map = repair->completion.vdo->block_map;
544 physical_block_number_t pbn = get_pbn_to_fetch(repair, block_map);
546 if (pbn != VDO_ZERO_BLOCK) {
547 repair->outstanding++;
549 * We must set the requeue flag here to ensure that we don't blow the stack if all
550 * the requested pages are already in the cache or get load errors.
552 vdo_get_page(page_completion, &block_map->zones[0], pbn, true, repair,
553 page_loaded, handle_page_load_error, true);
556 if (repair->outstanding > 0)
559 launch_repair_completion(repair, flush_block_map_updates, VDO_ZONE_TYPE_ADMIN);
564 * rebuild_from_leaves() - Rebuild reference counts from the leaf block map pages.
565 * @completion: The repair completion.
567 * Rebuilds reference counts from the leaf block map pages now that reference counts have been
568 * rebuilt from the interior tree pages (which have been loaded in the process). This callback is
569 * registered in rebuild_reference_counts().
571 static void rebuild_from_leaves(struct vdo_completion *completion)
574 struct repair_completion *repair = as_repair_completion(completion);
575 struct block_map *map = completion->vdo->block_map;
577 repair->logical_blocks_used = 0;
580 * The PBN calculation doesn't work until the tree pages have been loaded, so we can't set
581 * this value at the start of repair.
583 repair->leaf_pages = vdo_compute_block_map_page_count(map->entry_count);
584 repair->last_slot = (struct block_map_slot) {
585 .slot = map->entry_count % VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
586 .pbn = vdo_find_block_map_page_pbn(map, repair->leaf_pages - 1),
588 if (repair->last_slot.slot == 0)
589 repair->last_slot.slot = VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
591 for (i = 0; i < repair->page_count; i++) {
592 if (fetch_page(repair, &repair->page_completions[i].completion)) {
594 * The rebuild has already moved on, so it isn't safe nor is there a need
595 * to launch any more fetches.
603 * process_entry() - Process a single entry from the block map tree.
604 * @pbn: A pbn which holds a block map tree page.
605 * @completion: The parent completion of the traversal.
607 * Implements vdo_entry_callback_fn.
609 * Return: VDO_SUCCESS or an error.
611 static int process_entry(physical_block_number_t pbn, struct vdo_completion *completion)
613 struct repair_completion *repair = as_repair_completion(completion);
614 struct slab_depot *depot = completion->vdo->depot;
617 if ((pbn == VDO_ZERO_BLOCK) || !vdo_is_physical_data_block(depot, pbn)) {
618 return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
619 "PBN %llu out of range",
620 (unsigned long long) pbn);
623 result = vdo_adjust_reference_count_for_rebuild(depot, pbn,
624 VDO_JOURNAL_BLOCK_MAP_REMAPPING);
625 if (result != VDO_SUCCESS) {
626 return vdo_log_error_strerror(result,
627 "Could not adjust reference count for block map tree PBN %llu",
628 (unsigned long long) pbn);
631 repair->block_map_data_blocks++;
635 static void rebuild_reference_counts(struct vdo_completion *completion)
637 struct repair_completion *repair = as_repair_completion(completion);
638 struct vdo *vdo = completion->vdo;
639 struct vdo_page_cache *cache = &vdo->block_map->zones[0].page_cache;
641 /* We must allocate ref_counts before we can rebuild them. */
642 if (abort_on_error(vdo_allocate_reference_counters(vdo->depot), repair))
646 * Completion chaining from page cache hits can lead to stack overflow during the rebuild,
647 * so clear out the cache before this rebuild phase.
649 if (abort_on_error(vdo_invalidate_page_cache(cache), repair))
652 prepare_repair_completion(repair, rebuild_from_leaves, VDO_ZONE_TYPE_LOGICAL);
653 vdo_traverse_forest(vdo->block_map, process_entry, completion);
657 * increment_recovery_point() - Move the given recovery point forward by one entry.
659 static void increment_recovery_point(struct recovery_point *point)
661 if (++point->entry_count < RECOVERY_JOURNAL_ENTRIES_PER_SECTOR)
664 point->entry_count = 0;
665 if (point->sector_count < (VDO_SECTORS_PER_BLOCK - 1)) {
666 point->sector_count++;
670 point->sequence_number++;
671 point->sector_count = 1;
675 * advance_points() - Advance the current recovery and journal points.
676 * @repair: The repair_completion whose points are to be advanced.
677 * @entries_per_block: The number of entries in a recovery journal block.
679 static void advance_points(struct repair_completion *repair,
680 journal_entry_count_t entries_per_block)
682 if (!repair->next_recovery_point.increment_applied) {
683 repair->next_recovery_point.increment_applied = true;
687 increment_recovery_point(&repair->next_recovery_point);
688 vdo_advance_journal_point(&repair->next_journal_point, entries_per_block);
689 repair->next_recovery_point.increment_applied = false;
693 * before_recovery_point() - Check whether the first point precedes the second point.
694 * @first: The first recovery point.
695 * @second: The second recovery point.
697 * Return: true if the first point precedes the second point.
699 static bool __must_check before_recovery_point(const struct recovery_point *first,
700 const struct recovery_point *second)
702 if (first->sequence_number < second->sequence_number)
705 if (first->sequence_number > second->sequence_number)
708 if (first->sector_count < second->sector_count)
711 return ((first->sector_count == second->sector_count) &&
712 (first->entry_count < second->entry_count));
715 static struct packed_journal_sector * __must_check get_sector(struct recovery_journal *journal,
717 sequence_number_t sequence,
722 offset = ((vdo_get_recovery_journal_block_number(journal, sequence) * VDO_BLOCK_SIZE) +
723 (VDO_SECTOR_SIZE * sector_number));
724 return (struct packed_journal_sector *) (journal_data + offset);
728 * get_entry() - Unpack the recovery journal entry associated with the given recovery point.
729 * @repair: The repair completion.
730 * @point: The recovery point.
732 * Return: The unpacked contents of the matching recovery journal entry.
734 static struct recovery_journal_entry get_entry(const struct repair_completion *repair,
735 const struct recovery_point *point)
737 struct packed_journal_sector *sector;
739 sector = get_sector(repair->completion.vdo->recovery_journal,
740 repair->journal_data, point->sequence_number,
741 point->sector_count);
742 return vdo_unpack_recovery_journal_entry(§or->entries[point->entry_count]);
746 * validate_recovery_journal_entry() - Validate a recovery journal entry.
748 * @entry: The entry to validate.
750 * Return: VDO_SUCCESS or an error.
752 static int validate_recovery_journal_entry(const struct vdo *vdo,
753 const struct recovery_journal_entry *entry)
755 if ((entry->slot.pbn >= vdo->states.vdo.config.physical_blocks) ||
756 (entry->slot.slot >= VDO_BLOCK_MAP_ENTRIES_PER_PAGE) ||
757 !vdo_is_valid_location(&entry->mapping) ||
758 !vdo_is_valid_location(&entry->unmapping) ||
759 !vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) ||
760 !vdo_is_physical_data_block(vdo->depot, entry->unmapping.pbn)) {
761 return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
762 "Invalid entry: %s (%llu, %u) from %llu to %llu is not within bounds",
763 vdo_get_journal_operation_name(entry->operation),
764 (unsigned long long) entry->slot.pbn,
766 (unsigned long long) entry->unmapping.pbn,
767 (unsigned long long) entry->mapping.pbn);
770 if ((entry->operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) &&
771 (vdo_is_state_compressed(entry->mapping.state) ||
772 (entry->mapping.pbn == VDO_ZERO_BLOCK) ||
773 (entry->unmapping.state != VDO_MAPPING_STATE_UNMAPPED) ||
774 (entry->unmapping.pbn != VDO_ZERO_BLOCK))) {
775 return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
776 "Invalid entry: %s (%llu, %u) from %llu to %llu is not a valid tree mapping",
777 vdo_get_journal_operation_name(entry->operation),
778 (unsigned long long) entry->slot.pbn,
780 (unsigned long long) entry->unmapping.pbn,
781 (unsigned long long) entry->mapping.pbn);
788 * add_slab_journal_entries() - Replay recovery journal entries into the slab journals of the
789 * allocator currently being recovered.
790 * @completion: The allocator completion.
792 * Waits for slab journal tailblock space when necessary. This method is its own callback.
794 static void add_slab_journal_entries(struct vdo_completion *completion)
796 struct recovery_point *recovery_point;
797 struct repair_completion *repair = completion->parent;
798 struct vdo *vdo = completion->vdo;
799 struct recovery_journal *journal = vdo->recovery_journal;
800 struct block_allocator *allocator = vdo_as_block_allocator(completion);
802 /* Get ready in case we need to enqueue again. */
803 vdo_prepare_completion(completion, add_slab_journal_entries,
804 vdo_notify_slab_journals_are_recovered,
805 completion->callback_thread_id, repair);
806 for (recovery_point = &repair->next_recovery_point;
807 before_recovery_point(recovery_point, &repair->tail_recovery_point);
808 advance_points(repair, journal->entries_per_block)) {
810 physical_block_number_t pbn;
811 struct vdo_slab *slab;
812 struct recovery_journal_entry entry = get_entry(repair, recovery_point);
813 bool increment = !repair->next_recovery_point.increment_applied;
816 result = validate_recovery_journal_entry(vdo, &entry);
817 if (result != VDO_SUCCESS) {
818 vdo_enter_read_only_mode(vdo, result);
819 vdo_fail_completion(completion, result);
823 pbn = entry.mapping.pbn;
825 pbn = entry.unmapping.pbn;
828 if (pbn == VDO_ZERO_BLOCK)
831 slab = vdo_get_slab(vdo->depot, pbn);
832 if (slab->allocator != allocator)
835 if (!vdo_attempt_replay_into_slab(slab, pbn, entry.operation, increment,
836 &repair->next_journal_point,
840 repair->entries_added_to_slab_journals++;
843 vdo_notify_slab_journals_are_recovered(completion);
847 * vdo_replay_into_slab_journals() - Replay recovery journal entries in the slab journals of slabs
848 * owned by a given block_allocator.
849 * @allocator: The allocator whose slab journals are to be recovered.
850 * @context: The slab depot load context supplied by a recovery when it loads the depot.
852 void vdo_replay_into_slab_journals(struct block_allocator *allocator, void *context)
854 struct vdo_completion *completion = &allocator->completion;
855 struct repair_completion *repair = context;
856 struct vdo *vdo = completion->vdo;
858 vdo_assert_on_physical_zone_thread(vdo, allocator->zone_number, __func__);
859 if (repair->entry_count == 0) {
860 /* there's nothing to replay */
861 repair->logical_blocks_used = vdo->recovery_journal->logical_blocks_used;
862 repair->block_map_data_blocks = vdo->recovery_journal->block_map_data_blocks;
863 vdo_notify_slab_journals_are_recovered(completion);
867 repair->next_recovery_point = (struct recovery_point) {
868 .sequence_number = repair->slab_journal_head,
873 repair->next_journal_point = (struct journal_point) {
874 .sequence_number = repair->slab_journal_head,
878 vdo_log_info("Replaying entries into slab journals for zone %u",
879 allocator->zone_number);
880 completion->parent = repair;
881 add_slab_journal_entries(completion);
884 static void load_slab_depot(struct vdo_completion *completion)
886 struct repair_completion *repair = as_repair_completion(completion);
887 const struct admin_state_code *operation;
889 vdo_assert_on_admin_thread(completion->vdo, __func__);
891 if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) {
892 prepare_repair_completion(repair, rebuild_reference_counts,
893 VDO_ZONE_TYPE_LOGICAL);
894 operation = VDO_ADMIN_STATE_LOADING_FOR_REBUILD;
896 prepare_repair_completion(repair, drain_slab_depot, VDO_ZONE_TYPE_ADMIN);
897 operation = VDO_ADMIN_STATE_LOADING_FOR_RECOVERY;
900 vdo_load_slab_depot(completion->vdo->depot, operation, completion, repair);
903 static void flush_block_map(struct vdo_completion *completion)
905 struct repair_completion *repair = as_repair_completion(completion);
906 const struct admin_state_code *operation;
908 vdo_assert_on_admin_thread(completion->vdo, __func__);
910 vdo_log_info("Flushing block map changes");
911 prepare_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
912 operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ?
913 VDO_ADMIN_STATE_REBUILDING :
914 VDO_ADMIN_STATE_RECOVERING);
915 vdo_drain_block_map(completion->vdo->block_map, operation, completion);
918 static bool finish_if_done(struct repair_completion *repair)
920 /* Pages are still being launched or there is still work to do */
921 if (repair->launching || (repair->outstanding > 0))
924 if (repair->completion.result != VDO_SUCCESS) {
927 for (i = 0; i < repair->page_count; i++) {
928 struct vdo_page_completion *page_completion =
929 &repair->page_completions[i];
931 if (page_completion->ready)
932 vdo_release_page_completion(&page_completion->completion);
935 vdo_launch_completion(&repair->completion);
939 if (repair->current_entry >= repair->entries)
942 launch_repair_completion(repair, flush_block_map, VDO_ZONE_TYPE_ADMIN);
946 static void abort_block_map_recovery(struct repair_completion *repair, int result)
948 vdo_set_completion_result(&repair->completion, result);
949 finish_if_done(repair);
953 * find_entry_starting_next_page() - Find the first journal entry after a given entry which is not
954 * on the same block map page.
955 * @current_entry: The entry to search from.
956 * @needs_sort: Whether sorting is needed to proceed.
958 * Return: Pointer to the first later journal entry on a different block map page, or a pointer to
959 * just before the journal entries if no subsequent entry is on a different block map page.
961 static struct numbered_block_mapping *
962 find_entry_starting_next_page(struct repair_completion *repair,
963 struct numbered_block_mapping *current_entry, bool needs_sort)
967 /* If current_entry is invalid, return immediately. */
968 if (current_entry < repair->entries)
969 return current_entry;
971 current_page = current_entry->block_map_slot.pbn;
973 /* Decrement current_entry until it's out of bounds or on a different page. */
974 while ((current_entry >= repair->entries) &&
975 (current_entry->block_map_slot.pbn == current_page)) {
977 struct numbered_block_mapping *just_sorted_entry =
978 sort_next_heap_element(repair);
979 VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
980 "heap is returning elements in an unexpected order");
986 return current_entry;
990 * Apply a range of journal entries [starting_entry, ending_entry) journal
991 * entries to a block map page.
993 static void apply_journal_entries_to_page(struct block_map_page *page,
994 struct numbered_block_mapping *starting_entry,
995 struct numbered_block_mapping *ending_entry)
997 struct numbered_block_mapping *current_entry = starting_entry;
999 while (current_entry != ending_entry) {
1000 page->entries[current_entry->block_map_slot.slot] = current_entry->block_map_entry;
1005 static void recover_ready_pages(struct repair_completion *repair,
1006 struct vdo_completion *completion);
1008 static void block_map_page_loaded(struct vdo_completion *completion)
1010 struct repair_completion *repair = as_repair_completion(completion->parent);
1012 repair->outstanding--;
1013 if (!repair->launching)
1014 recover_ready_pages(repair, completion);
1017 static void handle_block_map_page_load_error(struct vdo_completion *completion)
1019 struct repair_completion *repair = as_repair_completion(completion->parent);
1021 repair->outstanding--;
1022 abort_block_map_recovery(repair, completion->result);
1025 static void fetch_block_map_page(struct repair_completion *repair,
1026 struct vdo_completion *completion)
1028 physical_block_number_t pbn;
1030 if (repair->current_unfetched_entry < repair->entries)
1031 /* Nothing left to fetch. */
1034 /* Fetch the next page we haven't yet requested. */
1035 pbn = repair->current_unfetched_entry->block_map_slot.pbn;
1036 repair->current_unfetched_entry =
1037 find_entry_starting_next_page(repair, repair->current_unfetched_entry,
1039 repair->outstanding++;
1040 vdo_get_page(((struct vdo_page_completion *) completion),
1041 &repair->completion.vdo->block_map->zones[0], pbn, true,
1042 &repair->completion, block_map_page_loaded,
1043 handle_block_map_page_load_error, false);
1046 static struct vdo_page_completion *get_next_page_completion(struct repair_completion *repair,
1047 struct vdo_page_completion *completion)
1050 if (completion == (&repair->page_completions[repair->page_count]))
1051 completion = &repair->page_completions[0];
1055 static void recover_ready_pages(struct repair_completion *repair,
1056 struct vdo_completion *completion)
1058 struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
1060 if (finish_if_done(repair))
1063 if (repair->pbn != page_completion->pbn)
1066 while (page_completion->ready) {
1067 struct numbered_block_mapping *start_of_next_page;
1068 struct block_map_page *page;
1071 result = vdo_get_cached_page(completion, &page);
1072 if (result != VDO_SUCCESS) {
1073 abort_block_map_recovery(repair, result);
1077 start_of_next_page =
1078 find_entry_starting_next_page(repair, repair->current_entry,
1080 apply_journal_entries_to_page(page, repair->current_entry,
1081 start_of_next_page);
1082 repair->current_entry = start_of_next_page;
1083 vdo_request_page_write(completion);
1084 vdo_release_page_completion(completion);
1086 if (finish_if_done(repair))
1089 repair->pbn = repair->current_entry->block_map_slot.pbn;
1090 fetch_block_map_page(repair, completion);
1091 page_completion = get_next_page_completion(repair, page_completion);
1092 completion = &page_completion->completion;
1096 static void recover_block_map(struct vdo_completion *completion)
1098 struct repair_completion *repair = as_repair_completion(completion);
1099 struct vdo *vdo = completion->vdo;
1100 struct numbered_block_mapping *first_sorted_entry;
1103 vdo_assert_on_logical_zone_thread(vdo, 0, __func__);
1105 /* Suppress block map errors. */
1106 vdo->block_map->zones[0].page_cache.rebuilding =
1107 vdo_state_requires_read_only_rebuild(vdo->load_state);
1109 if (repair->block_map_entry_count == 0) {
1110 vdo_log_info("Replaying 0 recovery entries into block map");
1111 vdo_free(vdo_forget(repair->journal_data));
1112 launch_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
1117 * Organize the journal entries into a binary heap so we can iterate over them in sorted
1118 * order incrementally, avoiding an expensive sort call.
1120 repair->replay_heap = (struct min_heap) {
1121 .data = repair->entries,
1122 .nr = repair->block_map_entry_count,
1123 .size = repair->block_map_entry_count,
1125 min_heapify_all(&repair->replay_heap, &repair_min_heap);
1127 vdo_log_info("Replaying %zu recovery entries into block map",
1128 repair->block_map_entry_count);
1130 repair->current_entry = &repair->entries[repair->block_map_entry_count - 1];
1131 first_sorted_entry = sort_next_heap_element(repair);
1132 VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
1133 "heap is returning elements in an unexpected order");
1135 /* Prevent any page from being processed until all pages have been launched. */
1136 repair->launching = true;
1137 repair->pbn = repair->current_entry->block_map_slot.pbn;
1138 repair->current_unfetched_entry = repair->current_entry;
1139 for (i = 0; i < repair->page_count; i++) {
1140 if (repair->current_unfetched_entry < repair->entries)
1143 fetch_block_map_page(repair, &repair->page_completions[i].completion);
1145 repair->launching = false;
1147 /* Process any ready pages. */
1148 recover_ready_pages(repair, &repair->page_completions[0].completion);
1152 * get_recovery_journal_block_header() - Get the block header for a block at a position in the
1153 * journal data and unpack it.
1154 * @journal: The recovery journal.
1155 * @data: The recovery journal data.
1156 * @sequence: The sequence number.
1158 * Return: The unpacked header.
1160 static struct recovery_block_header __must_check
1161 get_recovery_journal_block_header(struct recovery_journal *journal, char *data,
1162 sequence_number_t sequence)
1164 physical_block_number_t pbn =
1165 vdo_get_recovery_journal_block_number(journal, sequence);
1166 char *header = &data[pbn * VDO_BLOCK_SIZE];
1168 return vdo_unpack_recovery_block_header((struct packed_journal_header *) header);
1172 * is_valid_recovery_journal_block() - Determine whether the given header describes a valid block
1173 * for the given journal.
1174 * @journal: The journal to use.
1175 * @header: The unpacked block header to check.
1176 * @old_ok: Whether an old format header is valid.
1178 * A block is not valid if it is unformatted, or if it is older than the last successful recovery
1181 * Return: True if the header is valid.
1183 static bool __must_check is_valid_recovery_journal_block(const struct recovery_journal *journal,
1184 const struct recovery_block_header *header,
1187 if ((header->nonce != journal->nonce) ||
1188 (header->recovery_count != journal->recovery_count))
1191 if (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL_2)
1192 return (header->entry_count <= journal->entries_per_block);
1195 (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL) &&
1196 (header->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK));
1200 * is_exact_recovery_journal_block() - Determine whether the given header describes the exact block
1202 * @journal: The journal to use.
1203 * @header: The unpacked block header to check.
1204 * @sequence: The expected sequence number.
1205 * @type: The expected metadata type.
1207 * Return: True if the block matches.
1209 static bool __must_check is_exact_recovery_journal_block(const struct recovery_journal *journal,
1210 const struct recovery_block_header *header,
1211 sequence_number_t sequence,
1212 enum vdo_metadata_type type)
1214 return ((header->metadata_type == type) &&
1215 (header->sequence_number == sequence) &&
1216 (is_valid_recovery_journal_block(journal, header, true)));
1220 * find_recovery_journal_head_and_tail() - Find the tail and head of the journal.
1222 * Return: True if there were valid journal blocks.
1224 static bool find_recovery_journal_head_and_tail(struct repair_completion *repair)
1226 struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
1227 bool found_entries = false;
1228 physical_block_number_t i;
1231 * Ensure that we don't replay old entries since we know the tail recorded in the super
1232 * block must be a lower bound. Not doing so can result in extra data loss by setting the
1235 repair->highest_tail = journal->tail;
1236 for (i = 0; i < journal->size; i++) {
1237 struct recovery_block_header header =
1238 get_recovery_journal_block_header(journal, repair->journal_data, i);
1240 if (!is_valid_recovery_journal_block(journal, &header, true)) {
1241 /* This block is old or incorrectly formatted */
1245 if (vdo_get_recovery_journal_block_number(journal, header.sequence_number) != i) {
1246 /* This block is in the wrong location */
1250 if (header.sequence_number >= repair->highest_tail) {
1251 found_entries = true;
1252 repair->highest_tail = header.sequence_number;
1258 if (header.block_map_head > repair->block_map_head)
1259 repair->block_map_head = header.block_map_head;
1261 if (header.slab_journal_head > repair->slab_journal_head)
1262 repair->slab_journal_head = header.slab_journal_head;
1265 return found_entries;
1269 * unpack_entry() - Unpack a recovery journal entry in either format.
1271 * @packed: The entry to unpack.
1272 * @format: The expected format of the entry.
1273 * @entry: The unpacked entry.
1275 * Return: true if the entry should be applied.3
1277 static bool unpack_entry(struct vdo *vdo, char *packed, enum vdo_metadata_type format,
1278 struct recovery_journal_entry *entry)
1280 if (format == VDO_METADATA_RECOVERY_JOURNAL_2) {
1281 struct packed_recovery_journal_entry *packed_entry =
1282 (struct packed_recovery_journal_entry *) packed;
1284 *entry = vdo_unpack_recovery_journal_entry(packed_entry);
1286 physical_block_number_t low32, high4;
1288 struct packed_recovery_journal_entry_1 *packed_entry =
1289 (struct packed_recovery_journal_entry_1 *) packed;
1291 if (packed_entry->operation == VDO_JOURNAL_DATA_INCREMENT)
1292 entry->operation = VDO_JOURNAL_DATA_REMAPPING;
1293 else if (packed_entry->operation == VDO_JOURNAL_BLOCK_MAP_INCREMENT)
1294 entry->operation = VDO_JOURNAL_BLOCK_MAP_REMAPPING;
1298 low32 = __le32_to_cpu(packed_entry->pbn_low_word);
1299 high4 = packed_entry->pbn_high_nibble;
1300 entry->slot = (struct block_map_slot) {
1301 .pbn = ((high4 << 32) | low32),
1302 .slot = (packed_entry->slot_low | (packed_entry->slot_high << 6)),
1304 entry->mapping = vdo_unpack_block_map_entry(&packed_entry->block_map_entry);
1305 entry->unmapping = (struct data_location) {
1306 .pbn = VDO_ZERO_BLOCK,
1307 .state = VDO_MAPPING_STATE_UNMAPPED,
1311 return (validate_recovery_journal_entry(vdo, entry) == VDO_SUCCESS);
1315 * append_sector_entries() - Append an array of recovery journal entries from a journal block
1316 * sector to the array of numbered mappings in the repair completion,
1317 * numbering each entry in the order they are appended.
1318 * @repair: The repair completion.
1319 * @entries: The entries in the sector.
1320 * @format: The format of the sector.
1321 * @entry_count: The number of entries to append.
1323 static void append_sector_entries(struct repair_completion *repair, char *entries,
1324 enum vdo_metadata_type format,
1325 journal_entry_count_t entry_count)
1327 journal_entry_count_t i;
1328 struct vdo *vdo = repair->completion.vdo;
1329 off_t increment = ((format == VDO_METADATA_RECOVERY_JOURNAL_2)
1330 ? sizeof(struct packed_recovery_journal_entry)
1331 : sizeof(struct packed_recovery_journal_entry_1));
1333 for (i = 0; i < entry_count; i++, entries += increment) {
1334 struct recovery_journal_entry entry;
1336 if (!unpack_entry(vdo, entries, format, &entry))
1337 /* When recovering from read-only mode, ignore damaged entries. */
1340 repair->entries[repair->block_map_entry_count] =
1341 (struct numbered_block_mapping) {
1342 .block_map_slot = entry.slot,
1343 .block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
1344 entry.mapping.state),
1345 .number = repair->block_map_entry_count,
1347 repair->block_map_entry_count++;
1351 static journal_entry_count_t entries_per_sector(enum vdo_metadata_type format,
1354 if (format == VDO_METADATA_RECOVERY_JOURNAL_2)
1355 return RECOVERY_JOURNAL_ENTRIES_PER_SECTOR;
1357 return ((sector_number == (VDO_SECTORS_PER_BLOCK - 1))
1358 ? RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR
1359 : RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR);
1362 static void extract_entries_from_block(struct repair_completion *repair,
1363 struct recovery_journal *journal,
1364 sequence_number_t sequence,
1365 enum vdo_metadata_type format,
1366 journal_entry_count_t entries)
1369 struct recovery_block_header header =
1370 get_recovery_journal_block_header(journal, repair->journal_data,
1373 if (!is_exact_recovery_journal_block(journal, &header, sequence, format)) {
1374 /* This block is invalid, so skip it. */
1378 entries = min(entries, header.entry_count);
1379 for (i = 1; i < VDO_SECTORS_PER_BLOCK; i++) {
1380 struct packed_journal_sector *sector =
1381 get_sector(journal, repair->journal_data, sequence, i);
1382 journal_entry_count_t sector_entries =
1383 min(entries, entries_per_sector(format, i));
1385 if (vdo_is_valid_recovery_journal_sector(&header, sector, i)) {
1386 /* Only extract as many as the block header calls for. */
1387 append_sector_entries(repair, (char *) sector->entries, format,
1388 min_t(journal_entry_count_t,
1389 sector->entry_count,
1394 * Even if the sector wasn't full, count it as full when counting up to the
1395 * entry count the block header claims.
1397 entries -= sector_entries;
1401 static int parse_journal_for_rebuild(struct repair_completion *repair)
1404 sequence_number_t i;
1405 block_count_t count;
1406 enum vdo_metadata_type format;
1407 struct vdo *vdo = repair->completion.vdo;
1408 struct recovery_journal *journal = vdo->recovery_journal;
1409 journal_entry_count_t entries_per_block = journal->entries_per_block;
1411 format = get_recovery_journal_block_header(journal, repair->journal_data,
1412 repair->highest_tail).metadata_type;
1413 if (format == VDO_METADATA_RECOVERY_JOURNAL)
1414 entries_per_block = RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK;
1417 * Allocate an array of numbered_block_mapping structures large enough to transcribe every
1418 * packed_recovery_journal_entry from every valid journal block.
1420 count = ((repair->highest_tail - repair->block_map_head + 1) * entries_per_block);
1421 result = vdo_allocate(count, struct numbered_block_mapping, __func__,
1423 if (result != VDO_SUCCESS)
1426 for (i = repair->block_map_head; i <= repair->highest_tail; i++)
1427 extract_entries_from_block(repair, journal, i, format, entries_per_block);
1432 static int validate_heads(struct repair_completion *repair)
1434 /* Both reap heads must be behind the tail. */
1435 if ((repair->block_map_head <= repair->tail) &&
1436 (repair->slab_journal_head <= repair->tail))
1440 return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
1441 "Journal tail too early. block map head: %llu, slab journal head: %llu, tail: %llu",
1442 (unsigned long long) repair->block_map_head,
1443 (unsigned long long) repair->slab_journal_head,
1444 (unsigned long long) repair->tail);
1448 * extract_new_mappings() - Find all valid new mappings to be applied to the block map.
1450 * The mappings are extracted from the journal and stored in a sortable array so that all of the
1451 * mappings to be applied to a given block map page can be done in a single page fetch.
1453 static int extract_new_mappings(struct repair_completion *repair)
1456 struct vdo *vdo = repair->completion.vdo;
1457 struct recovery_point recovery_point = {
1458 .sequence_number = repair->block_map_head,
1464 * Allocate an array of numbered_block_mapping structs just large enough to transcribe
1465 * every packed_recovery_journal_entry from every valid journal block.
1467 result = vdo_allocate(repair->entry_count, struct numbered_block_mapping,
1468 __func__, &repair->entries);
1469 if (result != VDO_SUCCESS)
1472 for (; before_recovery_point(&recovery_point, &repair->tail_recovery_point);
1473 increment_recovery_point(&recovery_point)) {
1474 struct recovery_journal_entry entry = get_entry(repair, &recovery_point);
1476 result = validate_recovery_journal_entry(vdo, &entry);
1477 if (result != VDO_SUCCESS) {
1478 vdo_enter_read_only_mode(vdo, result);
1482 repair->entries[repair->block_map_entry_count] =
1483 (struct numbered_block_mapping) {
1484 .block_map_slot = entry.slot,
1485 .block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
1486 entry.mapping.state),
1487 .number = repair->block_map_entry_count,
1489 repair->block_map_entry_count++;
1492 result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count),
1493 "approximate entry count is an upper bound");
1494 if (result != VDO_SUCCESS)
1495 vdo_enter_read_only_mode(vdo, result);
1501 * compute_usages() - Compute the lbns in use and block map data blocks counts from the tail of
1504 static noinline int compute_usages(struct repair_completion *repair)
1507 * This function is declared noinline to avoid a spurious valgrind error regarding the
1508 * following structure being uninitialized.
1510 struct recovery_point recovery_point = {
1511 .sequence_number = repair->tail,
1516 struct vdo *vdo = repair->completion.vdo;
1517 struct recovery_journal *journal = vdo->recovery_journal;
1518 struct recovery_block_header header =
1519 get_recovery_journal_block_header(journal, repair->journal_data,
1522 repair->logical_blocks_used = header.logical_blocks_used;
1523 repair->block_map_data_blocks = header.block_map_data_blocks;
1525 for (; before_recovery_point(&recovery_point, &repair->tail_recovery_point);
1526 increment_recovery_point(&recovery_point)) {
1527 struct recovery_journal_entry entry = get_entry(repair, &recovery_point);
1530 result = validate_recovery_journal_entry(vdo, &entry);
1531 if (result != VDO_SUCCESS) {
1532 vdo_enter_read_only_mode(vdo, result);
1536 if (entry.operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
1537 repair->block_map_data_blocks++;
1541 if (vdo_is_mapped_location(&entry.mapping))
1542 repair->logical_blocks_used++;
1544 if (vdo_is_mapped_location(&entry.unmapping))
1545 repair->logical_blocks_used--;
1551 static int parse_journal_for_recovery(struct repair_completion *repair)
1554 sequence_number_t i, head;
1555 bool found_entries = false;
1556 struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
1558 head = min(repair->block_map_head, repair->slab_journal_head);
1559 for (i = head; i <= repair->highest_tail; i++) {
1560 struct recovery_block_header header;
1561 journal_entry_count_t block_entries;
1565 repair->tail_recovery_point = (struct recovery_point) {
1566 .sequence_number = i,
1571 header = get_recovery_journal_block_header(journal, repair->journal_data, i);
1572 if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) {
1573 /* This is an old format block, so we need to upgrade */
1574 vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
1575 "Recovery journal is in the old format, a read-only rebuild is required.");
1576 vdo_enter_read_only_mode(repair->completion.vdo,
1577 VDO_UNSUPPORTED_VERSION);
1578 return VDO_UNSUPPORTED_VERSION;
1581 if (!is_exact_recovery_journal_block(journal, &header, i,
1582 VDO_METADATA_RECOVERY_JOURNAL_2)) {
1583 /* A bad block header was found so this must be the end of the journal. */
1587 block_entries = header.entry_count;
1589 /* Examine each sector in turn to determine the last valid sector. */
1590 for (j = 1; j < VDO_SECTORS_PER_BLOCK; j++) {
1591 struct packed_journal_sector *sector =
1592 get_sector(journal, repair->journal_data, i, j);
1593 journal_entry_count_t sector_entries =
1594 min_t(journal_entry_count_t, sector->entry_count,
1597 /* A bad sector means that this block was torn. */
1598 if (!vdo_is_valid_recovery_journal_sector(&header, sector, j))
1601 if (sector_entries > 0) {
1602 found_entries = true;
1603 repair->tail_recovery_point.sector_count++;
1604 repair->tail_recovery_point.entry_count = sector_entries;
1605 block_entries -= sector_entries;
1606 repair->entry_count += sector_entries;
1609 /* If this sector is short, the later sectors can't matter. */
1610 if ((sector_entries < RECOVERY_JOURNAL_ENTRIES_PER_SECTOR) ||
1611 (block_entries == 0))
1615 /* If this block was not filled, or if it tore, no later block can matter. */
1616 if ((header.entry_count != journal->entries_per_block) || (block_entries > 0))
1621 return validate_heads(repair);
1623 /* Set the tail to the last valid tail block, if there is one. */
1624 if (repair->tail_recovery_point.sector_count == 0)
1627 result = validate_heads(repair);
1628 if (result != VDO_SUCCESS)
1631 vdo_log_info("Highest-numbered recovery journal block has sequence number %llu, and the highest-numbered usable block is %llu",
1632 (unsigned long long) repair->highest_tail,
1633 (unsigned long long) repair->tail);
1635 result = extract_new_mappings(repair);
1636 if (result != VDO_SUCCESS)
1639 return compute_usages(repair);
1642 static int parse_journal(struct repair_completion *repair)
1644 if (!find_recovery_journal_head_and_tail(repair))
1647 return (vdo_state_requires_read_only_rebuild(repair->completion.vdo->load_state) ?
1648 parse_journal_for_rebuild(repair) :
1649 parse_journal_for_recovery(repair));
1652 static void finish_journal_load(struct vdo_completion *completion)
1654 struct repair_completion *repair = completion->parent;
1656 if (++repair->vios_complete != repair->vio_count)
1659 vdo_log_info("Finished reading recovery journal");
1660 uninitialize_vios(repair);
1661 prepare_repair_completion(repair, recover_block_map, VDO_ZONE_TYPE_LOGICAL);
1662 vdo_continue_completion(&repair->completion, parse_journal(repair));
1665 static void handle_journal_load_error(struct vdo_completion *completion)
1667 struct repair_completion *repair = completion->parent;
1669 /* Preserve the error */
1670 vdo_set_completion_result(&repair->completion, completion->result);
1671 vio_record_metadata_io_error(as_vio(completion));
1672 completion->callback(completion);
1675 static void read_journal_endio(struct bio *bio)
1677 struct vio *vio = bio->bi_private;
1678 struct vdo *vdo = vio->completion.vdo;
1680 continue_vio_after_io(vio, finish_journal_load, vdo->thread_config.admin_thread);
1684 * vdo_repair() - Load the recovery journal and then recover or rebuild a vdo.
1685 * @parent: The completion to notify when the operation is complete
1687 void vdo_repair(struct vdo_completion *parent)
1691 struct repair_completion *repair;
1692 struct vdo *vdo = parent->vdo;
1693 struct recovery_journal *journal = vdo->recovery_journal;
1694 physical_block_number_t pbn = journal->origin;
1695 block_count_t remaining = journal->size;
1696 block_count_t vio_count = DIV_ROUND_UP(remaining, MAX_BLOCKS_PER_VIO);
1697 page_count_t page_count = min_t(page_count_t,
1698 vdo->device_config->cache_size >> 1,
1699 MAXIMUM_SIMULTANEOUS_VDO_BLOCK_MAP_RESTORATION_READS);
1701 vdo_assert_on_admin_thread(vdo, __func__);
1703 if (vdo->load_state == VDO_FORCE_REBUILD) {
1704 vdo_log_warning("Rebuilding reference counts to clear read-only mode");
1705 vdo->states.vdo.read_only_recoveries++;
1706 } else if (vdo->load_state == VDO_REBUILD_FOR_UPGRADE) {
1707 vdo_log_warning("Rebuilding reference counts for upgrade");
1709 vdo_log_warning("Device was dirty, rebuilding reference counts");
1712 result = vdo_allocate_extended(struct repair_completion, page_count,
1713 struct vdo_page_completion, __func__,
1715 if (result != VDO_SUCCESS) {
1716 vdo_fail_completion(parent, result);
1720 vdo_initialize_completion(&repair->completion, vdo, VDO_REPAIR_COMPLETION);
1721 repair->completion.error_handler = abort_repair;
1722 repair->completion.parent = parent;
1723 prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN);
1724 repair->page_count = page_count;
1726 result = vdo_allocate(remaining * VDO_BLOCK_SIZE, char, __func__,
1727 &repair->journal_data);
1728 if (abort_on_error(result, repair))
1731 result = vdo_allocate(vio_count, struct vio, __func__, &repair->vios);
1732 if (abort_on_error(result, repair))
1735 ptr = repair->journal_data;
1736 for (repair->vio_count = 0; repair->vio_count < vio_count; repair->vio_count++) {
1737 block_count_t blocks = min_t(block_count_t, remaining,
1738 MAX_BLOCKS_PER_VIO);
1740 result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL,
1741 VIO_PRIORITY_METADATA,
1742 repair, blocks, ptr,
1743 &repair->vios[repair->vio_count]);
1744 if (abort_on_error(result, repair))
1747 ptr += (blocks * VDO_BLOCK_SIZE);
1748 remaining -= blocks;
1751 for (vio_count = 0; vio_count < repair->vio_count;
1752 vio_count++, pbn += MAX_BLOCKS_PER_VIO) {
1753 vdo_submit_metadata_vio(&repair->vios[vio_count], pbn, read_journal_endio,
1754 handle_journal_load_error, REQ_OP_READ);