]> Git Repo - linux.git/commitdiff
Merge tag 'v3.8-rc5' into x86/mm
authorH. Peter Anvin <[email protected]>
Sat, 26 Jan 2013 00:31:21 +0000 (16:31 -0800)
committerH. Peter Anvin <[email protected]>
Sat, 26 Jan 2013 00:31:21 +0000 (16:31 -0800)
The __pa() fixup series that follows touches KVM code that is not
present in the existing branch based on v3.7-rc5, so merge in the
current upstream from Linus.

Signed-off-by: H. Peter Anvin <[email protected]>
1  2 
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/setup.c
arch/x86/lguest/boot.c
arch/x86/mm/init_64.c
arch/x86/mm/pgtable.c

index f146a3c1081471d615aadf9b445ea2ea71d4e940,d5e0d717005abd1f9b610ff1ce39beaefaa080f0..0532f5d6e4efc66c264d1e7d47d44e615b833be3
@@@ -69,7 -69,7 +69,7 @@@ int acpi_suspend_lowlevel(void
  
  #ifndef CONFIG_64BIT
        header->pmode_entry = (u32)&wakeup_pmode_return;
 -      header->pmode_cr3 = (u32)__pa(&initial_page_table);
 +      header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
        saved_magic = 0x12345678;
  #else /* CONFIG_64BIT */
  #ifdef CONFIG_SMP
@@@ -101,6 -101,8 +101,8 @@@ static int __init acpi_sleep_setup(cha
  #endif
                if (strncmp(str, "nonvs", 5) == 0)
                        acpi_nvs_nosave();
+               if (strncmp(str, "nonvs_s3", 8) == 0)
+                       acpi_nvs_nosave_s3();
                if (strncmp(str, "old_ordering", 12) == 0)
                        acpi_old_suspend_ordering();
                str = strchr(str, ',');
index ae9196f31261799f149731666b2e78fa3d11f48f,9c2aa89a11cbf8d4a124d25bdea25d726cedba17..9a9110918ca719b3a0d2b6a7917899d2762a24bb
  #include <linux/hardirq.h>
  #include <linux/delay.h>
  
+ #include <asm/numachip/numachip.h>
  #include <asm/numachip/numachip_csr.h>
  #include <asm/smp.h>
  #include <asm/apic.h>
  #include <asm/ipi.h>
  #include <asm/apic_flat_64.h>
 +#include <asm/pgtable.h>
  
  static int numachip_system __read_mostly;
  
@@@ -180,6 -180,7 +181,7 @@@ static int __init numachip_system_init(
                return 0;
  
        x86_cpuinit.fixup_cpu_id = fixup_cpu_id;
+       x86_init.pci.arch_init = pci_numachip_init;
  
        map_csrs();
  
index 2249e7e44521a34a38851ea142a3092877a270f4,fcaabd0432c5dda0fa5e3c8f8b37473ee177c1d8..fdfefa27b94832fb672651d922c185ed4fa218d3
@@@ -168,7 -168,7 +168,7 @@@ int __cpuinit ppro_with_ram_bug(void
  #ifdef CONFIG_X86_F00F_BUG
  static void __cpuinit trap_init_f00f_bug(void)
  {
 -      __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
 +      __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
  
        /*
         * Update the IDT descriptor and reload the IDT so that
@@@ -612,10 -612,6 +612,6 @@@ static void __cpuinit intel_tlb_lookup(
  
  static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
  {
-       if (!cpu_has_invlpg) {
-               tlb_flushall_shift = -1;
-               return;
-       }
        switch ((c->x86 << 8) + c->x86_model) {
        case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
        case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
diff --combined arch/x86/kernel/setup.c
index 2702c5d4acd254f148261f66df80d97a6d130ac7,00f6c1472b850472e5f9759dd5ad9613f6c026be..8354399b3aae21082d94cb6478cf29ca93e4ee84
@@@ -143,11 -143,7 +143,7 @@@ int default_check_phys_apicid_present(i
  }
  #endif
  
- #ifndef CONFIG_DEBUG_BOOT_PARAMS
- struct boot_params __initdata boot_params;
- #else
  struct boot_params boot_params;
- #endif
  
  /*
   * Machine setup..
@@@ -300,8 -296,8 +296,8 @@@ static void __init cleanup_highmap(void
  static void __init reserve_brk(void)
  {
        if (_brk_end > _brk_start)
 -              memblock_reserve(__pa(_brk_start),
 -                               __pa(_brk_end) - __pa(_brk_start));
 +              memblock_reserve(__pa_symbol(_brk_start),
 +                               _brk_end - _brk_start);
  
        /* Mark brk area as locked down and no longer taking any
           new allocations */
@@@ -614,6 -610,83 +610,83 @@@ static __init void reserve_ibft_region(
  
  static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
  
+ static bool __init snb_gfx_workaround_needed(void)
+ {
+ #ifdef CONFIG_PCI
+       int i;
+       u16 vendor, devid;
+       static const __initconst u16 snb_ids[] = {
+               0x0102,
+               0x0112,
+               0x0122,
+               0x0106,
+               0x0116,
+               0x0126,
+               0x010a,
+       };
+       /* Assume no if something weird is going on with PCI */
+       if (!early_pci_allowed())
+               return false;
+       vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
+       if (vendor != 0x8086)
+               return false;
+       devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
+       for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
+               if (devid == snb_ids[i])
+                       return true;
+ #endif
+       return false;
+ }
+ /*
+  * Sandy Bridge graphics has trouble with certain ranges, exclude
+  * them from allocation.
+  */
+ static void __init trim_snb_memory(void)
+ {
+       static const __initconst unsigned long bad_pages[] = {
+               0x20050000,
+               0x20110000,
+               0x20130000,
+               0x20138000,
+               0x40004000,
+       };
+       int i;
+       if (!snb_gfx_workaround_needed())
+               return;
+       printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
+       /*
+        * Reserve all memory below the 1 MB mark that has not
+        * already been reserved.
+        */
+       memblock_reserve(0, 1<<20);
+       
+       for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
+               if (memblock_reserve(bad_pages[i], PAGE_SIZE))
+                       printk(KERN_WARNING "failed to reserve 0x%08lx\n",
+                              bad_pages[i]);
+       }
+ }
+ /*
+  * Here we put platform-specific memory range workarounds, i.e.
+  * memory known to be corrupt or otherwise in need to be reserved on
+  * specific platforms.
+  *
+  * If this gets used more widely it could use a real dispatch mechanism.
+  */
+ static void __init trim_platform_memory_ranges(void)
+ {
+       trim_snb_memory();
+ }
  static void __init trim_bios_range(void)
  {
        /*
         * take them out.
         */
        e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
  }
  
@@@ -761,12 -835,12 +835,12 @@@ void __init setup_arch(char **cmdline_p
        init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = _brk_end;
  
 -      code_resource.start = virt_to_phys(_text);
 -      code_resource.end = virt_to_phys(_etext)-1;
 -      data_resource.start = virt_to_phys(_etext);
 -      data_resource.end = virt_to_phys(_edata)-1;
 -      bss_resource.start = virt_to_phys(&__bss_start);
 -      bss_resource.end = virt_to_phys(&__bss_stop)-1;
 +      code_resource.start = __pa_symbol(_text);
 +      code_resource.end = __pa_symbol(_etext)-1;
 +      data_resource.start = __pa_symbol(_etext);
 +      data_resource.end = __pa_symbol(_edata)-1;
 +      bss_resource.start = __pa_symbol(__bss_start);
 +      bss_resource.end = __pa_symbol(__bss_stop)-1;
  
  #ifdef CONFIG_CMDLINE_BOOL
  #ifdef CONFIG_CMDLINE_OVERRIDE
  
        setup_real_mode();
  
+       trim_platform_memory_ranges();
        init_gbpages();
  
        /* max_pfn_mapped is updated here */
  
        reserve_initrd();
  
+ #if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
+       acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
+ #endif
        reserve_crashkernel();
  
        vsmp_init();
diff --combined arch/x86/lguest/boot.c
index 139dd353c2f21282cdb07c9f875823669c54013a,df4176cdbb321e8223dfdffb056b35981c3c63f8..1cbd89ca5569f2ce5d76a4bed1f21d6d52ed63e5
@@@ -552,8 -552,7 +552,8 @@@ static void lguest_write_cr3(unsigned l
        current_cr3 = cr3;
  
        /* These two page tables are simple, linear, and used during boot */
 -      if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
 +      if (cr3 != __pa_symbol(swapper_pg_dir) &&
 +          cr3 != __pa_symbol(initial_page_table))
                cr3_changed = true;
  }
  
@@@ -1413,7 -1412,7 +1413,7 @@@ __init void lguest_init(void
  
        /* We don't have features.  We have puppies!  Puppies! */
  #ifdef CONFIG_X86_MCE
-       mce_disabled = 1;
+       mca_cfg.disabled = true;
  #endif
  #ifdef CONFIG_ACPI
        acpi_disabled = 1;
diff --combined arch/x86/mm/init_64.c
index 0374a10f4fb7757d8194b082cb988049dc5f9557,2ead3c8a4c8419da92a61fbba35eb695e5205dde..287c6d6a9ef1ff1ba140801f875443e092c8c550
@@@ -630,7 -630,9 +630,9 @@@ void __init paging_init(void
         *       numa support is not compiled in, and later node_set_state
         *       will not set it back.
         */
-       node_clear_state(0, N_NORMAL_MEMORY);
+       node_clear_state(0, N_MEMORY);
+       if (N_MEMORY != N_NORMAL_MEMORY)
+               node_clear_state(0, N_NORMAL_MEMORY);
  
        zone_sizes_init();
  }
@@@ -770,10 -772,12 +772,10 @@@ void set_kernel_text_ro(void
  void mark_rodata_ro(void)
  {
        unsigned long start = PFN_ALIGN(_text);
 -      unsigned long rodata_start =
 -              ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
 +      unsigned long rodata_start = PFN_ALIGN(__start_rodata);
        unsigned long end = (unsigned long) &__end_rodata_hpage_align;
 -      unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
 -      unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
 -      unsigned long data_start = (unsigned long) &_sdata;
 +      unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
 +      unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
  
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
  #endif
  
        free_init_pages("unused kernel memory",
 -                      (unsigned long) page_address(virt_to_page(text_end)),
 -                      (unsigned long)
 -                               page_address(virt_to_page(rodata_start)));
 +                      (unsigned long) __va(__pa_symbol(text_end)),
 +                      (unsigned long) __va(__pa_symbol(rodata_start)));
 +
        free_init_pages("unused kernel memory",
 -                      (unsigned long) page_address(virt_to_page(rodata_end)),
 -                      (unsigned long) page_address(virt_to_page(data_start)));
 +                      (unsigned long) __va(__pa_symbol(rodata_end)),
 +                      (unsigned long) __va(__pa_symbol(_sdata)));
  }
  
  #endif
diff --combined arch/x86/mm/pgtable.c
index 8a828d773e5866f770f6f1d7b74134a7c6c84dc4,e27fbf887f3ba2b0fb41595710a9d8d9882aed1f..193350b51f90af74508a8ec97217085bf3acdbab
@@@ -137,7 -137,7 +137,7 @@@ static void pgd_dtor(pgd_t *pgd
   * against pageattr.c; it is the unique case in which a valid change
   * of kernel pagetables can't be lazily synchronized by vmalloc faults.
   * vmalloc faults work because attached pagetables are never freed.
-  * -- wli
+  * -- nyc
   */
  
  #ifdef CONFIG_X86_PAE
@@@ -301,6 -301,13 +301,13 @@@ void pgd_free(struct mm_struct *mm, pgd
        free_page((unsigned long)pgd);
  }
  
+ /*
+  * Used to set accessed or dirty bits in the page table entries
+  * on other architectures. On x86, the accessed and dirty bits
+  * are tracked by hardware. However, do_wp_page calls this function
+  * to also make the pte writeable at the same time the dirty bit is
+  * set. In that case we do actually need to write the PTE.
+  */
  int ptep_set_access_flags(struct vm_area_struct *vma,
                          unsigned long address, pte_t *ptep,
                          pte_t entry, int dirty)
        if (changed && dirty) {
                *ptep = entry;
                pte_update_defer(vma->vm_mm, address, ptep);
-               flush_tlb_page(vma, address);
        }
  
        return changed;
@@@ -328,12 -334,7 +334,12 @@@ int pmdp_set_access_flags(struct vm_are
        if (changed && dirty) {
                *pmdp = entry;
                pmd_update_defer(vma->vm_mm, address, pmdp);
 -              flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 +              /*
 +               * We had a write-protection fault here and changed the pmd
 +               * to to more permissive. No need to flush the TLB for that,
 +               * #PF is architecturally guaranteed to do that and in the
 +               * worst-case we'll generate a spurious fault.
 +               */
        }
  
        return changed;
This page took 0.134763 seconds and 4 git commands to generate.