]> Git Repo - J-linux.git/commitdiff
alloc_tag: fix module allocation tags populated area calculation
authorSuren Baghdasaryan <[email protected]>
Sat, 30 Nov 2024 00:14:22 +0000 (16:14 -0800)
committerAndrew Morton <[email protected]>
Thu, 19 Dec 2024 03:04:46 +0000 (19:04 -0800)
vm_module_tags_populate() calculation of the populated area assumes that
area starts at a page boundary and therefore when new pages are allocation,
the end of the area is page-aligned as well. If the start of the area is
not page-aligned then allocating a page and incrementing the end of the
area by PAGE_SIZE leads to an area at the end but within the area boundary
which is not populated. Accessing this are will lead to a kernel panic.
Fix the calculation by down-aligning the start of the area and using that
as the location allocated pages are mapped to.

[[email protected]: fix vm_module_tags_populate's KASAN poisoning logic]
Link: https://lkml.kernel.org/r/[email protected]
[[email protected]: fix panic when CONFIG_KASAN enabled and CONFIG_KASAN_VMALLOC not enabled]
Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Fixes: 0f9b685626da ("alloc_tag: populate memory for module tags as needed")
Signed-off-by: Suren Baghdasaryan <[email protected]>
Reported-by: kernel test robot <[email protected]>
Closes: https://lore.kernel.org/oe-lkp/[email protected]
Acked-by: Yu Zhao <[email protected]>
Tested-by: Adrian Huang <[email protected]>
Cc: David Wang <[email protected]>
Cc: Kent Overstreet <[email protected]>
Cc: Mike Rapoport (Microsoft) <[email protected]>
Cc: Pasha Tatashin <[email protected]>
Cc: Sourav Panda <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
lib/alloc_tag.c

index 3a0413462e9fc2d02441608026810d715073903d..7dcebf118a3e64ac50a0ab9071623daca9a8aee8 100644 (file)
@@ -408,28 +408,52 @@ repeat:
 
 static int vm_module_tags_populate(void)
 {
-       unsigned long phys_size = vm_module_tags->nr_pages << PAGE_SHIFT;
+       unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
+                                (vm_module_tags->nr_pages << PAGE_SHIFT);
+       unsigned long new_end = module_tags.start_addr + module_tags.size;
 
-       if (phys_size < module_tags.size) {
+       if (phys_end < new_end) {
                struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
-               unsigned long addr = module_tags.start_addr + phys_size;
+               unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
+               unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
                unsigned long more_pages;
                unsigned long nr;
 
-               more_pages = ALIGN(module_tags.size - phys_size, PAGE_SIZE) >> PAGE_SHIFT;
+               more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
                nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
                                                 NUMA_NO_NODE, more_pages, next_page);
                if (nr < more_pages ||
-                   vmap_pages_range(addr, addr + (nr << PAGE_SHIFT), PAGE_KERNEL,
+                   vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
                                     next_page, PAGE_SHIFT) < 0) {
                        /* Clean up and error out */
                        for (int i = 0; i < nr; i++)
                                __free_page(next_page[i]);
                        return -ENOMEM;
                }
+
                vm_module_tags->nr_pages += nr;
+
+               /*
+                * Kasan allocates 1 byte of shadow for every 8 bytes of data.
+                * When kasan_alloc_module_shadow allocates shadow memory,
+                * its unit of allocation is a page.
+                * Therefore, here we need to align to MODULE_ALIGN.
+                */
+               if (old_shadow_end < new_shadow_end)
+                       kasan_alloc_module_shadow((void *)old_shadow_end,
+                                                 new_shadow_end - old_shadow_end,
+                                                 GFP_KERNEL);
        }
 
+       /*
+        * Mark the pages as accessible, now that they are mapped.
+        * With hardware tag-based KASAN, marking is skipped for
+        * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
+        */
+       kasan_unpoison_vmalloc((void *)module_tags.start_addr,
+                               new_end - module_tags.start_addr,
+                               KASAN_VMALLOC_PROT_NORMAL);
+
        return 0;
 }
 
This page took 0.046116 seconds and 4 git commands to generate.