no delay (0).
Format: integer
+ bootmem_debug [KNL] Enable bootmem allocator debug messages.
+
bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards)
bttv.radio= Most important insmod options are available as
kernel args too.
* [no]ncq: Turn on or off NCQ.
+ * nohrst, nosrst, norst: suppress hard, soft
+ and both resets.
+
If there are multiple matching configurations changing
the same attribute, the last one is used.
nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
+ nox2apic [X86-64,APIC] Do not enable x2APIC mode.
+
+ x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
+ default x2apic cluster mode on platforms
+ supporting x2apic.
+
noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x.
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
+
config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
- depends on X86_64 && SMP
+ depends on X86_64 && SMP && BROKEN
default n
help
Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
- if MAXSMP
- config NR_CPUS
- int
- default "4096"
- endif
-
- if !MAXSMP
config NR_CPUS
- int "Maximum number of CPUs (2-4096)"
- range 2 4096
+ int "Maximum number of CPUs (2-512)" if !MAXSMP
+ range 2 512
depends on SMP
+ default "4096" if MAXSMP
default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
default "8"
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 4096 and the
+ kernel will support. The maximum supported value is 512 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
- endif
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
local memory controller of the CPU and add some more
NUMA awareness to the kernel.
- For i386 this is currently highly experimental and should be only
+ For 32-bit this is currently highly experimental and should be only
used for kernel development. It might also cause boot failures.
- For x86_64 this is recommended on all multiprocessor Opteron systems.
+ For 64-bit this is recommended on all multiprocessor Opteron systems.
If the system is EM64T, you should say N unless your system is
EM64T NUMA.
into virtual nodes when booted with "numa=fake=N", where N is the
number of nodes. This is only useful for debugging.
- if MAXSMP
-
config NODES_SHIFT
- int
- default "9"
- endif
-
- if !MAXSMP
- config NODES_SHIFT
- int "Maximum NUMA Nodes (as a power of 2)"
+ int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
range 1 9 if X86_64
+ default "9" if MAXSMP
default "6" if X86_64
default "4" if X86_NUMAQ
default "3"
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accomodate various tables.
- endif
config HAVE_ARCH_BOOTMEM_NODE
def_bool y
strongly in flux, so no good recommendation can be made.
config CRASH_DUMP
- bool "kernel crash dumps (EXPERIMENTAL)"
+ bool "kernel crash dumps"
depends on X86_64 || (X86_32 && HIGHMEM)
help
Generate crash dump after being started by kexec.
workaround will setup a 1:1 mapping for the first
16M to make floppy (an ISA device) work.
+config INTR_REMAP
+ bool "Support for Interrupt Remapping (EXPERIMENTAL)"
+ depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
+ help
+ Supports Interrupt remapping for IO-APIC and MSI devices.
+ To use x2apic mode in the CPU's which support x2APIC enhancements or
+ to support platforms with CPU's having > 8 bit APIC ID, say Y.
+
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
#ifdef _SETUP
# include "boot.h"
- # include "bitops.h"
#endif
#include <linux/types.h>
- #include <asm/cpufeature.h>
#include <asm/processor-flags.h>
#include <asm/required-features.h>
#include <asm/msr-index.h>
- struct cpu_features {
- int level; /* Family, or 64 for x86-64 */
- int model;
- u32 flags[NCAPINTS];
- };
-
- static struct cpu_features cpu;
+ struct cpu_features cpu;
static u32 cpu_vendor[3];
static u32 err_flags[NCAPINTS];
{
REQUIRED_MASK0,
REQUIRED_MASK1,
- REQUIRED_MASK2,
- REQUIRED_MASK3,
+ 0, /* REQUIRED_MASK2 not implemented in this file */
+ 0, /* REQUIRED_MASK3 not implemented in this file */
REQUIRED_MASK4,
- REQUIRED_MASK5,
+ 0, /* REQUIRED_MASK5 not implemented in this file */
REQUIRED_MASK6,
- REQUIRED_MASK7,
+ 0, /* REQUIRED_MASK7 not implemented in this file */
};
#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
#warning ACPI uses CMPXCHG, i486 and later hardware
#endif
+ static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
+
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
struct acpi_mcfg_allocation *pci_mmcfg_config;
int pci_mmcfg_config_num;
+ static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
+ {
+ if (!strcmp(mcfg->header.oem_id, "SGI"))
+ acpi_mcfg_64bit_base_addr = TRUE;
+
+ return 0;
+ }
+
int __init acpi_parse_mcfg(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
}
memcpy(pci_mmcfg_config, &mcfg[1], config_size);
+
+ acpi_mcfg_oem_check(mcfg);
+
for (i = 0; i < pci_mmcfg_config_num; ++i) {
- if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
+ if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
+ !acpi_mcfg_64bit_base_addr) {
printk(KERN_ERR PREFIX
"MMCONFIG not in low 4GB of memory\n");
kfree(pci_mmcfg_config);
set_fixmap_nocache(FIX_APIC_BASE, address);
if (boot_cpu_physical_apicid == -1U) {
- boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+ boot_cpu_physical_apicid = read_apic_id();
#ifdef CONFIG_X86_32
apic_version[boot_cpu_physical_apicid] =
GET_APIC_VERSION(apic_read(APIC_LVR));
acpi_ioapic = 1;
smp_found_config = 1;
+#ifdef CONFIG_X86_32
setup_apic_routing();
+#endif
}
}
if (error == -EINVAL) {
return lapic_get_version() >= 0x14;
}
-void apic_wait_icr_idle(void)
+/*
+ * Paravirt kernels also might be using these below ops. So we still
+ * use generic apic_read()/apic_write(), which might be pointing to different
+ * ops in PARAVIRT case.
+ */
+void xapic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
-u32 safe_apic_wait_icr_idle(void)
+u32 safe_xapic_wait_icr_idle(void)
{
u32 send_status;
int timeout;
return send_status;
}
+void xapic_icr_write(u32 low, u32 id)
+{
+ apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
+ apic_write(APIC_ICR, low);
+}
+
+u64 xapic_icr_read(void)
+{
+ u32 icr1, icr2;
+
+ icr2 = apic_read(APIC_ICR2);
+ icr1 = apic_read(APIC_ICR);
+
+ return icr1 | ((u64)icr2 << 32);
+}
+
+static struct apic_ops xapic_ops = {
+ .read = native_apic_mem_read,
+ .write = native_apic_mem_write,
+ .icr_read = xapic_icr_read,
+ .icr_write = xapic_icr_write,
+ .wait_icr_idle = xapic_wait_icr_idle,
+ .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
+};
+
+struct apic_ops __read_mostly *apic_ops = &xapic_ops;
+EXPORT_SYMBOL_GPL(apic_ops);
+
/**
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
*/
* default configuration (or the MP table is broken).
*/
if (boot_cpu_physical_apicid == -1U)
- boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+ boot_cpu_physical_apicid = read_apic_id();
}
* might be zero if read from MP tables. Get it from LAPIC.
*/
#ifdef CONFIG_CRASH_DUMP
- boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+ boot_cpu_physical_apicid = read_apic_id();
#endif
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
}
}
- unsigned int __cpuinitdata maxcpus = NR_CPUS;
-
void __cpuinit generic_processor_info(int apicid, int version)
{
int cpu;
return;
}
- if (num_processors >= maxcpus) {
- printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
- " Processor ignored.\n", maxcpus);
- return;
- }
-
num_processors++;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
#include <linux/clockchips.h>
#include <linux/acpi_pmtmr.h>
#include <linux/module.h>
+#include <linux/dmar.h>
#include <asm/atomic.h>
#include <asm/smp.h>
#include <asm/proto.h>
#include <asm/timex.h>
#include <asm/apic.h>
+#include <asm/i8259.h>
#include <mach_ipi.h>
#include <mach_apic.h>
static int disable_apic_timer __cpuinitdata;
static int apic_calibrate_pmtmr __initdata;
int disable_apic;
+int disable_x2apic;
+int x2apic;
+
+/* x2apic enabled before OS handover */
+int x2apic_preenabled;
/* Local APIC timer works in C2 */
int local_apic_timer_c2_ok;
unsigned long mp_lapic_addr;
- unsigned int __cpuinitdata maxcpus = NR_CPUS;
/*
* Get the LAPIC version
*/
return lapic_get_version() >= 0x14;
}
-void apic_wait_icr_idle(void)
+void xapic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
-u32 safe_apic_wait_icr_idle(void)
+u32 safe_xapic_wait_icr_idle(void)
{
u32 send_status;
int timeout;
return send_status;
}
+void xapic_icr_write(u32 low, u32 id)
+{
+ apic_write(APIC_ICR2, id << 24);
+ apic_write(APIC_ICR, low);
+}
+
+u64 xapic_icr_read(void)
+{
+ u32 icr1, icr2;
+
+ icr2 = apic_read(APIC_ICR2);
+ icr1 = apic_read(APIC_ICR);
+
+ return (icr1 | ((u64)icr2 << 32));
+}
+
+static struct apic_ops xapic_ops = {
+ .read = native_apic_mem_read,
+ .write = native_apic_mem_write,
+ .icr_read = xapic_icr_read,
+ .icr_write = xapic_icr_write,
+ .wait_icr_idle = xapic_wait_icr_idle,
+ .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
+};
+
+struct apic_ops __read_mostly *apic_ops = &xapic_ops;
+
+EXPORT_SYMBOL_GPL(apic_ops);
+
+static void x2apic_wait_icr_idle(void)
+{
+ /* no need to wait for icr idle in x2apic */
+ return;
+}
+
+static u32 safe_x2apic_wait_icr_idle(void)
+{
+ /* no need to wait for icr idle in x2apic */
+ return 0;
+}
+
+void x2apic_icr_write(u32 low, u32 id)
+{
+ wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
+}
+
+u64 x2apic_icr_read(void)
+{
+ unsigned long val;
+
+ rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+ return val;
+}
+
+static struct apic_ops x2apic_ops = {
+ .read = native_apic_msr_read,
+ .write = native_apic_msr_write,
+ .icr_read = x2apic_icr_read,
+ .icr_write = x2apic_icr_write,
+ .wait_icr_idle = x2apic_wait_icr_idle,
+ .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
+};
+
/**
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
*/
/*
* The ID register is read/write in a real APIC.
*/
- reg0 = read_apic_id();
+ reg0 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
- reg1 = read_apic_id();
+ reg1 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
apic_write(APIC_ID, reg0);
if (reg1 != (reg0 ^ APIC_ID_MASK))
apic_pm_activate();
}
+void check_x2apic(void)
+{
+ int msr, msr2;
+
+ rdmsr(MSR_IA32_APICBASE, msr, msr2);
+
+ if (msr & X2APIC_ENABLE) {
+ printk("x2apic enabled by BIOS, switching to x2apic ops\n");
+ x2apic_preenabled = x2apic = 1;
+ apic_ops = &x2apic_ops;
+ }
+}
+
+void enable_x2apic(void)
+{
+ int msr, msr2;
+
+ rdmsr(MSR_IA32_APICBASE, msr, msr2);
+ if (!(msr & X2APIC_ENABLE)) {
+ printk("Enabling x2apic\n");
+ wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
+ }
+}
+
+void enable_IR_x2apic(void)
+{
+#ifdef CONFIG_INTR_REMAP
+ int ret;
+ unsigned long flags;
+
+ if (!cpu_has_x2apic)
+ return;
+
+ if (!x2apic_preenabled && disable_x2apic) {
+ printk(KERN_INFO
+ "Skipped enabling x2apic and Interrupt-remapping "
+ "because of nox2apic\n");
+ return;
+ }
+
+ if (x2apic_preenabled && disable_x2apic)
+ panic("Bios already enabled x2apic, can't enforce nox2apic");
+
+ if (!x2apic_preenabled && skip_ioapic_setup) {
+ printk(KERN_INFO
+ "Skipped enabling x2apic and Interrupt-remapping "
+ "because of skipping io-apic setup\n");
+ return;
+ }
+
+ ret = dmar_table_init();
+ if (ret) {
+ printk(KERN_INFO
+ "dmar_table_init() failed with %d:\n", ret);
+
+ if (x2apic_preenabled)
+ panic("x2apic enabled by bios. But IR enabling failed");
+ else
+ printk(KERN_INFO
+ "Not enabling x2apic,Intr-remapping\n");
+ return;
+ }
+
+ local_irq_save(flags);
+ mask_8259A();
+ save_mask_IO_APIC_setup();
+
+ ret = enable_intr_remapping(1);
+
+ if (ret && x2apic_preenabled) {
+ local_irq_restore(flags);
+ panic("x2apic enabled by bios. But IR enabling failed");
+ }
+
+ if (ret)
+ goto end;
+
+ if (!x2apic) {
+ x2apic = 1;
+ apic_ops = &x2apic_ops;
+ enable_x2apic();
+ }
+end:
+ if (ret)
+ /*
+ * IR enabling failed
+ */
+ restore_IO_APIC_setup();
+ else
+ reinit_intr_remapped_IO_APIC(x2apic_preenabled);
+
+ unmask_8259A();
+ local_irq_restore(flags);
+
+ if (!ret) {
+ if (!x2apic_preenabled)
+ printk(KERN_INFO
+ "Enabled x2apic and interrupt-remapping\n");
+ else
+ printk(KERN_INFO
+ "Enabled Interrupt-remapping\n");
+ } else
+ printk(KERN_ERR
+ "Failed to enable Interrupt-remapping and x2apic\n");
+#else
+ if (!cpu_has_x2apic)
+ return;
+
+ if (x2apic_preenabled)
+ panic("x2apic enabled prior OS handover,"
+ " enable CONFIG_INTR_REMAP");
+
+ printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
+ " and x2apic\n");
+#endif
+
+ return;
+}
+
/*
* Detect and enable local APICs on non-SMP boards.
* Original code written by Keir Fraser.
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
*/
- boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+ boot_cpu_physical_apicid = read_apic_id();
}
/**
*/
void __init init_apic_mappings(void)
{
+ if (x2apic) {
+ boot_cpu_physical_apicid = read_apic_id();
+ return;
+ }
+
/*
* If no local APIC can be found then set up a fake all
* zeroes page to simulate the local APIC and another
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
*/
- boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
+ boot_cpu_physical_apicid = read_apic_id();
}
/*
return -1;
}
+ enable_IR_x2apic();
+ setup_apic_routing();
+
verify_local_APIC();
connect_bsp_APIC();
return;
}
- if (num_processors >= maxcpus) {
- printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
- " Processor ignored.\n", maxcpus);
- return;
- }
-
num_processors++;
cpus_complement(tmp_map, cpu_present_map);
cpu = first_cpu(tmp_map);
cpu_set(cpu, cpu_present_map);
}
+int hard_smp_processor_id(void)
+{
+ return read_apic_id();
+}
+
/*
* Power management
*/
maxlvt = lapic_get_maxlvt();
- apic_pm_state.apic_id = read_apic_id();
+ apic_pm_state.apic_id = apic_read(APIC_ID);
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
maxlvt = lapic_get_maxlvt();
local_irq_save(flags);
- rdmsr(MSR_IA32_APICBASE, l, h);
- l &= ~MSR_IA32_APICBASE_BASE;
- l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
- wrmsr(MSR_IA32_APICBASE, l, h);
+ if (!x2apic) {
+ rdmsr(MSR_IA32_APICBASE, l, h);
+ l &= ~MSR_IA32_APICBASE_BASE;
+ l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
+ wrmsr(MSR_IA32_APICBASE, l, h);
+ } else
+ enable_x2apic();
+
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
return (clusters > 2);
}
+static __init int setup_nox2apic(char *str)
+{
+ disable_x2apic = 1;
+ clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC);
+ return 0;
+}
+early_param("nox2apic", setup_nox2apic);
+
+
/*
* APIC command line parameters
*/
#include <asm/pat.h>
#include <asm/processor.h>
+#include <mach_apic.h>
+
struct cpuid_bit {
u16 feature;
u8 reg;
}
}
+/* leaf 0xb SMT level */
+#define SMT_LEVEL 0
+
+/* leaf 0xb sub-leaf types */
+#define INVALID_TYPE 0
+#define SMT_TYPE 1
+#define CORE_TYPE 2
+
+#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
+#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
+#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
+
+/*
+ * Check for extended topology enumeration cpuid leaf 0xb and if it
+ * exists, use it for populating initial_apicid and cpu topology
+ * detection.
+ */
+void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ unsigned int eax, ebx, ecx, edx, sub_index;
+ unsigned int ht_mask_width, core_plus_mask_width;
+ unsigned int core_select_mask, core_level_siblings;
+
+ if (c->cpuid_level < 0xb)
+ return;
+
+ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+
+ /*
+ * check if the cpuid leaf 0xb is actually implemented.
+ */
+ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
+ return;
+
+ set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
+
+ /*
+ * initial apic id, which also represents 32-bit extended x2apic id.
+ */
+ c->initial_apicid = edx;
+
+ /*
+ * Populate HT related information from sub-leaf level 0.
+ */
+ core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+
+ sub_index = 1;
+ do {
+ cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
+
+ /*
+ * Check for the Core type in the implemented sub leaves.
+ */
+ if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
+ core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+ break;
+ }
+
+ sub_index++;
+ } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
+
+ core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
+
+#ifdef CONFIG_X86_32
+ c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
+ & core_select_mask;
+ c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
+#else
+ c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
+ c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
+#endif
+ c->x86_max_cores = (core_level_siblings / smp_num_siblings);
+
+
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ c->phys_proc_id);
+ if (c->x86_max_cores > 1)
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ c->cpu_core_id);
+ return;
+#endif
+}
+
#ifdef CONFIG_X86_PAT
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
{
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
- if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
+ /*
+ * There is a known erratum on Pentium III and Core Solo
+ * and Core Duo CPUs.
+ * " Page with PAT set to WC while associated MTRR is UC
+ * may consolidate to UC "
+ * Because of this erratum, it is better to stick with
+ * setting WC in MTRR rather than using PAT on these CPUs.
+ *
+ * Enable PAT WC only on P4, Core 2 or later CPUs.
+ */
+ if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
return;
- break;
+
+ pat_disable("PAT WC disabled due to known CPU erratum.");
+ return;
+
case X86_VENDOR_AMD:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_TRANSMETA:
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* Load/Store Serialize to mem access disable (=reorder it) */
- setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
+ setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
/* set load/store serialize from 1GB to 4GB */
ccr3 |= 0xe0;
setCx86(CX86_CCR3, ccr3);
printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */
- setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
+ setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
/* set 'Not Write-through' */
write_cr0(read_cr0() | X86_CR0_NW);
/* CCR2 bit 2: lock NW bit and set WT1 */
- setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
+ setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
}
- static void __cpuinit set_cx86_inc(void)
- {
- unsigned char ccr3;
-
- printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
-
- ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- /* PCR1 -- Performance Control */
- /* Incrementor on, whatever that is */
- setCx86_old(CX86_PCR1, getCx86_old(CX86_PCR1) | 0x02);
- /* PCR0 -- Performance Control */
- /* Incrementor Margin 10 */
- setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) | 0x04);
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
- }
-
/*
* Configure later MediaGX and/or Geode processor.
*/
local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */
- setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
+ setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* FPU fast, DTE cache, Mem bypass */
- setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
+ setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
set_cx86_memwb();
set_cx86_reorder();
- set_cx86_inc();
local_irq_restore(flags);
}
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
/* Enable cxMMX extensions (GX1 Datasheet 54) */
- setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
+ setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
/*
* GXm : 0x30 ... 0x5f GXm datasheet 51
if (dir1 > 7) {
dir0_msn++; /* M II */
/* Enable MMX extensions (App note 108) */
- setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
+ setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
} else {
c->coma_bug = 1; /* 6x86MX, it has the bug. */
}
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */
+ setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
local_irq_restore(flags);
}
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
-#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/bootmem.h>
#include <linux/module.h>
+#include <linux/hardirq.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/bios.h>
+DEFINE_PER_CPU(int, x2apic_extra_bits);
+
+static enum uv_system_type uv_system_type;
+
+static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ if (!strcmp(oem_id, "SGI")) {
+ if (!strcmp(oem_table_id, "UVL"))
+ uv_system_type = UV_LEGACY_APIC;
+ else if (!strcmp(oem_table_id, "UVX"))
+ uv_system_type = UV_X2APIC;
+ else if (!strcmp(oem_table_id, "UVH")) {
+ uv_system_type = UV_NON_UNIQUE_APIC;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+enum uv_system_type get_uv_system_type(void)
+{
+ return uv_system_type;
+}
+
+int is_uv_system(void)
+{
+ return uv_system_type != UV_NONE;
+}
+EXPORT_SYMBOL_GPL(is_uv_system);
+
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
return 1;
}
+static void uv_init_apic_ldr(void)
+{
+}
+
static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
return BAD_APICID;
}
+static unsigned int get_apic_id(unsigned long x)
+{
+ unsigned int id;
+
+ WARN_ON(preemptible() && num_online_cpus() > 1);
+ id = x | __get_cpu_var(x2apic_extra_bits);
+
+ return id;
+}
+
+static unsigned long set_apic_id(unsigned int id)
+{
+ unsigned long x;
+
+ /* maskout x2apic_extra_bits ? */
+ x = id;
+ return x;
+}
+
+static unsigned int uv_read_apic_id(void)
+{
+
+ return get_apic_id(apic_read(APIC_ID));
+}
+
static unsigned int phys_pkg_id(int index_msb)
{
- return GET_APIC_ID(read_apic_id()) >> index_msb;
+ return uv_read_apic_id() >> index_msb;
}
#ifdef ZZZ /* Needs x2apic patch */
struct genapic apic_x2apic_uv_x = {
.name = "UV large system",
+ .acpi_madt_oem_check = uv_acpi_madt_oem_check,
.int_delivery_mode = dest_Fixed,
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
.target_cpus = uv_target_cpus,
.vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */
.apic_id_registered = uv_apic_id_registered,
+ .init_apic_ldr = uv_init_apic_ldr,
.send_IPI_all = uv_send_IPI_all,
.send_IPI_allbutself = uv_send_IPI_allbutself,
.send_IPI_mask = uv_send_IPI_mask,
/* ZZZ.send_IPI_self = uv_send_IPI_self, */
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */
+ .get_apic_id = get_apic_id,
+ .set_apic_id = set_apic_id,
+ .apic_id_mask = (0xFFFFFFFFu),
};
static __cpuinit void set_x2apic_extra_bits(int pnode)
enum map_type {map_wb, map_uc};
- static void map_high(char *id, unsigned long base, int shift, enum map_type map_type)
+ static __init void map_high(char *id, unsigned long base, int shift, enum map_type map_type)
{
unsigned long bytes, paddr;
sn_rtc_cycles_per_second = ticks_per_sec;
}
- static __init void uv_system_init(void)
+ static bool uv_system_inited;
+
+ void __init uv_system_init(void)
{
union uvh_si_addr_map_config_u m_n_config;
union uvh_node_id_u node_id;
map_mmr_high(max_pnode);
map_config_high(max_pnode);
map_mmioh_high(max_pnode);
+ uv_system_inited = true;
}
/*
*/
void __cpuinit uv_cpu_init(void)
{
- if (!uv_node_to_blade)
- uv_system_init();
+ BUG_ON(!uv_system_inited);
uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
set_x2apic_extra_bits(uv_hub_info->pnode);
}
+
+
return sum & 0xFF;
}
- static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
+ static void __init MP_processor_info(struct mpc_config_processor *m)
{
int apicid;
char *bootup_cpu = "";
generic_bigsmp_probe();
#endif
+#ifdef CONFIG_X86_32
setup_apic_routing();
+#endif
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
}
- static void construct_ioapic_table(int mpc_default_type)
+ static void __init construct_ioapic_table(int mpc_default_type)
{
struct mpc_config_ioapic ioapic;
struct mpc_config_bus bus;
construct_default_ioirq_mptable(mpc_default_type);
}
#else
- static inline void construct_ioapic_table(int mpc_default_type) { }
+ static inline void __init construct_ioapic_table(int mpc_default_type) { }
#endif
static inline void __init construct_default_ISA_mptable(int mpc_default_type)
struct pv_apic_ops pv_apic_ops = {
#ifdef CONFIG_X86_LOCAL_APIC
- .apic_write = native_apic_write,
- .apic_read = native_apic_read,
.setup_boot_clock = setup_boot_APIC_clock,
.setup_secondary_clock = setup_secondary_APIC_clock,
.startup_ipi_hook = paravirt_nop,
.spin_unlock = __ticket_spin_unlock,
#endif
};
- EXPORT_SYMBOL_GPL(pv_lock_ops);
+ EXPORT_SYMBOL(pv_lock_ops);
EXPORT_SYMBOL_GPL(pv_time_ops);
EXPORT_SYMBOL (pv_cpu_ops);
* @size: Size of the crashkernel memory to reserve.
* Returns the base address on success, and -1ULL on failure.
*/
- unsigned long long find_and_reserve_crashkernel(unsigned long long size)
+ unsigned long long __init find_and_reserve_crashkernel(unsigned long long size)
{
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long start = 0LL;
early_cpu_init();
early_ioremap_init();
- #if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
- /*
- * Must be before kernel pagetables are setup
- * or fixmap area is touched.
- */
- vmi_init();
- #endif
-
ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
screen_info = boot_params.screen_info;
edid_info = boot_params.edid_info;
parse_early_param();
+ #if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
+ /*
+ * Must be before kernel pagetables are setup
+ * or fixmap area is touched.
+ */
+ vmi_init();
+ #endif
+
/* after early param, so could get panic from serial */
reserve_early_setup_data();
num_physpages = max_pfn;
check_efer();
+ if (cpu_has_x2apic)
+ check_x2apic();
/* How many end-of-memory variables you have, grandma! */
/* need this before calling reserve_initrd */
static atomic_t init_deasserted;
-static int boot_cpu_logical_apicid;
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
#endif
#ifdef CONFIG_X86_32
+static int boot_cpu_logical_apicid;
+
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = BAD_APICID };
/*
* (This works even if the APIC is not enabled.)
*/
- phys_id = GET_APIC_ID(read_apic_id());
+ phys_id = read_apic_id();
cpuid = smp_processor_id();
if (cpu_isset(cpuid, cpu_callin_map)) {
panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
printk(KERN_CONT
"a previous APIC delivery may have failed\n");
- apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
- apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
+ apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
timeout = 0;
do {
int maxlvt;
/* Target chip */
- apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
-
/* Boot on the stack */
/* Kick the second */
- apic_write(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
+ apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
/*
* Turn INIT on target chip
*/
- apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
/*
* Send IPI
*/
- apic_write(APIC_ICR,
- APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT);
+ apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
+ phys_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
pr_debug("Deasserting INIT.\n");
/* Target chip */
- apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
/* Send IPI */
- apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+ apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
*/
/* Target chip */
- apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-
/* Boot on the stack */
/* Kick the second */
- apic_write(APIC_ICR, APIC_DM_STARTUP | (start_eip >> 12));
+ apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
+ phys_apicid);
/*
* Give the other CPU some time to accept the IPI.
}
#ifdef CONFIG_X86_64
+
+ /* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
+ static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
+ {
+ if (!after_bootmem)
+ free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
+ }
+
/*
* Allocate node local memory for the AP pda.
*
if (oldpda) {
memcpy(newpda, oldpda, size);
- if (!after_bootmem)
- free_bootmem((unsigned long)oldpda, size);
+ free_bootmem_pda(oldpda);
}
newpda->in_bootmem = 0;
flush_tlb_all();
low_mappings = 1;
- #ifdef CONFIG_X86_PC
- if (def_to_bigsmp && apicid > 8) {
- printk(KERN_WARNING
- "More than 8 CPUs detected - skipping them.\n"
- "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
- err = -1;
- } else
- err = do_boot_cpu(apicid, cpu);
- #else
err = do_boot_cpu(apicid, cpu);
- #endif
zap_low_mappings();
low_mappings = 0;
static int __init smp_sanity_check(unsigned max_cpus)
{
preempt_disable();
+
+ #if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
+ if (def_to_bigsmp && nr_cpu_ids > 8) {
+ unsigned int cpu;
+ unsigned nr;
+
+ printk(KERN_WARNING
+ "More than 8 CPUs detected - skipping them.\n"
+ "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+
+ nr = 0;
+ for_each_present_cpu(cpu) {
+ if (nr >= 8)
+ cpu_clear(cpu, cpu_present_map);
+ nr++;
+ }
+
+ nr = 0;
+ for_each_possible_cpu(cpu) {
+ if (nr >= 8)
+ cpu_clear(cpu, cpu_possible_map);
+ nr++;
+ }
+
+ nr_cpu_ids = 8;
+ }
+ #endif
+
if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
printk(KERN_WARNING "weird, boot CPU (#%d) not listed"
"by the BIOS.\n", hard_smp_processor_id());
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
+#ifdef CONFIG_X86_32
boot_cpu_logical_apicid = logical_smp_processor_id();
+#endif
current_thread_info()->cpu = 0; /* needed? */
set_cpu_sibling_map(0);
+#ifdef CONFIG_X86_64
+ enable_IR_x2apic();
+ setup_apic_routing();
+#endif
+
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
disable_smp();
}
preempt_disable();
- if (GET_APIC_ID(read_apic_id()) != boot_cpu_physical_apicid) {
+ if (read_apic_id() != boot_cpu_physical_apicid) {
panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
- GET_APIC_ID(read_apic_id()), boot_cpu_physical_apicid);
+ read_apic_id(), boot_cpu_physical_apicid);
/* Or can we switch back to PIC here? */
}
preempt_enable();
printk(KERN_INFO "CPU%d: ", 0);
print_cpu_info(&cpu_data(0));
setup_boot_clock();
+
+ if (is_uv_system())
+ uv_system_init();
out:
preempt_enable();
}
BUG();
}
#endif
-
- /*
- * If the BIOS enumerates physical processors before logical,
- * maxcpus=N at enumeration-time can be used to disable HT.
- */
- static int __init parse_maxcpus(char *arg)
- {
- extern unsigned int maxcpus;
-
- if (arg)
- maxcpus = simple_strtoul(arg, NULL, 0);
- return 0;
- }
- early_param("maxcpus", parse_maxcpus);
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
+#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
-#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
+#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
+#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
+ #define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */
+#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
+ #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
+#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
struct genapic {
char *name;
+ int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
u32 int_delivery_mode;
u32 int_dest_mode;
int (*apic_id_registered)(void);
void (*send_IPI_mask)(cpumask_t mask, int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
+ void (*send_IPI_self)(int vector);
/* */
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
unsigned int (*phys_pkg_id)(int index_msb);
+ unsigned int (*get_apic_id)(unsigned long x);
+ unsigned long (*set_apic_id)(unsigned int id);
+ unsigned long apic_id_mask;
};
extern struct genapic *genapic;
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
+extern struct genapic apic_x2apic_cluster;
+extern struct genapic apic_x2apic_phys;
extern int acpi_madt_oem_check(char *, char *);
+extern void apic_send_IPI_self(int vector);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
extern struct genapic apic_x2apic_uv_x;
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern void uv_cpu_init(void);
+ extern void uv_system_init(void);
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
extern void setup_apic_routing(void);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
+extern void detect_extended_topology(struct cpuinfo_x86 *c);
#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
extern void detect_ht(struct cpuinfo_x86 *c);
#else
extern unsigned long idle_halt;
extern unsigned long idle_nomwait;
+ /*
+ * on systems with caches, caches must be flashed as the absolute
+ * last instruction before going into a suspended halt. Otherwise,
+ * dirty data can linger in the cache and become stale on resume,
+ * leading to strange errors.
+ *
+ * perform a variety of operations to guarantee that the compiler
+ * will not reorder instructions. wbinvd itself is serializing
+ * so the processor will not reorder.
+ *
+ * Systems without cache can just go into halt.
+ */
+ static inline void wbinvd_halt(void)
+ {
+ mb();
+ /* check for clflush to determine if wbinvd is legal */
+ if (cpu_has_clflush)
+ asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
+ else
+ while (1)
+ halt();
+ }
+
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);