]> Git Repo - linux.git/commitdiff
x86/mm/pat: Don't implicitly allow _PAGE_RW in kernel_map_pages_in_pgd()
authorSai Praneeth <[email protected]>
Wed, 17 Feb 2016 12:36:04 +0000 (12:36 +0000)
committerIngo Molnar <[email protected]>
Mon, 22 Feb 2016 07:26:28 +0000 (08:26 +0100)
As part of the preparation for the EFI_MEMORY_RO flag added in the UEFI
2.5 specification, we need the ability to map pages in kernel page
tables without _PAGE_RW being set.

Modify kernel_map_pages_in_pgd() to require its callers to pass _PAGE_RW
if the pages need to be mapped read/write. Otherwise, we'll map the
pages as read-only.

Signed-off-by: Sai Praneeth Prakhya <[email protected]>
Signed-off-by: Matt Fleming <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Brian Gerst <[email protected]>
Cc: Denys Vlasenko <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Lee, Chun-Yi <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Luis R. Rodriguez <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ravi Shankar <[email protected]>
Cc: Ricardo Neri <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Toshi Kani <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi_64.c

index bf312da41a6d54d2825c935fbb9c9552ac279c37..14c38ae804094461e510d0b4e874325335def03c 100644 (file)
@@ -1971,6 +1971,9 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
        if (!(page_flags & _PAGE_NX))
                cpa.mask_clr = __pgprot(_PAGE_NX);
 
+       if (!(page_flags & _PAGE_RW))
+               cpa.mask_clr = __pgprot(_PAGE_RW);
+
        cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
 
        retval = __change_page_attr_set_clr(&cpa, 0);
index b492521503fe3db968bdfc0f7ced68039dea98e5..b0965b27e47ff561159292fc7af5bc34d6e30c88 100644 (file)
@@ -233,7 +233,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * phys_efi_set_virtual_address_map().
         */
        pfn = pa_memmap >> PAGE_SHIFT;
-       if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
+       if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | _PAGE_RW)) {
                pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
                return 1;
        }
@@ -262,7 +262,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
                pfn = md->phys_addr >> PAGE_SHIFT;
                npages = md->num_pages;
 
-               if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, 0)) {
+               if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, _PAGE_RW)) {
                        pr_err("Failed to map 1:1 memory\n");
                        return 1;
                }
@@ -279,7 +279,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        text = __pa(_text);
        pfn = text >> PAGE_SHIFT;
 
-       if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
+       if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, _PAGE_RW)) {
                pr_err("Failed to map kernel text 1:1\n");
                return 1;
        }
@@ -294,7 +294,7 @@ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
 static void __init __map_region(efi_memory_desc_t *md, u64 va)
 {
-       unsigned long flags = 0;
+       unsigned long flags = _PAGE_RW;
        unsigned long pfn;
        pgd_t *pgd = efi_pgd;
 
This page took 0.05948 seconds and 4 git commands to generate.