config GENERIC_ENTRY
bool
-config OPROFILE
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
- OProfile is a profiling system capable of profiling the
- whole system, include the kernel, kernel modules, libraries,
- and applications.
-
- If unsure, say N.
-
-config OPROFILE_EVENT_MULTIPLEX
- bool "OProfile multiplexing support (EXPERIMENTAL)"
- default n
- depends on OPROFILE && X86
- help
- The number of hardware counters is limited. The multiplexing
- feature enables OProfile to gather more events than counters
- are provided by the hardware. This is realized by switching
- between events at a user specified time interval.
-
- If unsure, say N.
-
-config HAVE_OPROFILE
- bool
-
-config OPROFILE_NMI_TIMER
- def_bool y
- depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64
-
config KPROBES
bool "Kprobes"
depends on MODULES
accesses are required to be 64 bit aligned in this way even
though it is not a 64 bit architecture.
- See Documentation/unaligned-memory-access.txt for more
- information on the topic of unaligned memory accesses.
+ See Documentation/core-api/unaligned-memory-access.rst for
+ more information on the topic of unaligned memory accesses.
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
still support 32-bit off_t. This option is enabled for all such
architectures explicitly.
+# Selected by 64 bit architectures which have a 32 bit f_tinode in struct ustat
+config ARCH_32BIT_USTAT_F_TINODE
+ bool
+
config HAVE_ASM_MODVERSIONS
bool
help
reading and writing arbitrary memory may be able to locate them
and hijack control flow by modifying the stacks.
+config LTO
+ bool
+ help
+ Selected if the kernel will be built using the compiler's LTO feature.
+
+config LTO_CLANG
+ bool
+ select LTO
+ help
+ Selected if the kernel will be built using Clang's LTO feature.
+
+config ARCH_SUPPORTS_LTO_CLANG
+ bool
+ help
+ An architecture should select this option if it supports:
+ - compiling with Clang,
+ - compiling inline assembly with Clang's integrated assembler,
+ - and linking with LLD.
+
+config ARCH_SUPPORTS_LTO_CLANG_THIN
+ bool
+ help
+ An architecture should select this option if it can support Clang's
+ ThinLTO mode.
+
+config HAS_LTO_CLANG
+ def_bool y
+ # Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
+ depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD
+ depends on $(success,test $(LLVM) -eq 1)
+ depends on $(success,test $(LLVM_IAS) -eq 1)
+ depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
+ depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
+ depends on ARCH_SUPPORTS_LTO_CLANG
+ depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
+ depends on !KASAN
+ depends on !GCOV_KERNEL
+ help
+ The compiler and Kconfig options support building with Clang's
+ LTO.
+
+choice
+ prompt "Link Time Optimization (LTO)"
+ default LTO_NONE
+ help
+ This option enables Link Time Optimization (LTO), which allows the
+ compiler to optimize binaries globally.
+
+ If unsure, select LTO_NONE. Note that LTO is very resource-intensive
+ so it's disabled by default.
+
+config LTO_NONE
+ bool "None"
+ help
+ Build the kernel normally, without Link Time Optimization (LTO).
+
+config LTO_CLANG_FULL
+ bool "Clang Full LTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG
+ depends on !COMPILE_TEST
+ select LTO_CLANG
+ help
+ This option enables Clang's full Link Time Optimization (LTO), which
+ allows the compiler to optimize the kernel globally. If you enable
+ this option, the compiler generates LLVM bitcode instead of ELF
+ object files, and the actual compilation from bitcode happens at
+ the LTO link step, which may take several minutes depending on the
+ kernel configuration. More information can be found from LLVM's
+ documentation:
+
+ https://llvm.org/docs/LinkTimeOptimization.html
+
+ During link time, this option can use a large amount of RAM, and
+ may take much longer than the ThinLTO option.
+
+config LTO_CLANG_THIN
+ bool "Clang ThinLTO (EXPERIMENTAL)"
+ depends on HAS_LTO_CLANG && ARCH_SUPPORTS_LTO_CLANG_THIN
+ select LTO_CLANG
+ help
+ This option enables Clang's ThinLTO, which allows for parallel
+ optimization and faster incremental compiles compared to the
+ CONFIG_LTO_CLANG_FULL option. More information can be found
+ from Clang's documentation:
+
+ https://clang.llvm.org/docs/ThinLTO.html
+
+ If unsure, say Y.
+endchoice
+
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
This spares a stack switch and improves cache usage on softirq
processing.
+ config HAVE_SOFTIRQ_ON_OWN_STACK
+ bool
+ help
+ Architecture provides a function to run __do_softirq() on a
+ seperate stack.
+
config PGTABLE_LEVELS
int
default 2
bool
depends on HAVE_STATIC_CALL
+config HAVE_PREEMPT_DYNAMIC
+ bool
+ depends on HAVE_STATIC_CALL
+ depends on GENERIC_ENTRY
+ help
+ Select this if the architecture support boot time preempt setting
+ on top of static calls. It is strongly advised to support inline
+ static call to avoid any overhead.
+
config ARCH_WANT_LD_ORPHAN_WARN
bool
help
If a 32-bit architecture requires 64-bit arguments to be split into
pairs of 32-bit arguments, select this option.
+config ARCH_HAS_ELFCORE_COMPAT
+ bool
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select SET_FS
help
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
+config TLB_PTLOCK
+ bool "Use page table locks in TLB fault handler"
+ depends on SMP
+ default n
+ help
+ Select this option to enable page table locking in the TLB
+ fault handler. This ensures that page table entries are
+ updated consistently on SMP machines at the expense of some
+ loss in performance.
+
config HOTPLUG_CPU
bool
default y if SMP
config COMPAT
def_bool y
depends on 64BIT
- select COMPAT_BINFMT_ELF if BINFMT_ELF
config SYSVIPC_COMPAT
def_bool y
#include <linux/types.h>
#include <asm/io.h>
+ #include <asm/softirq_stack.h>
#include <asm/smp.h>
#include <asm/ldcw.h>
/*
* IRQ STACK - used for irq handler
*/
+#ifdef CONFIG_64BIT
+#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
+#else
#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
+#endif
union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_CONTEXT_TRACKING if PPC64
- select HAVE_TIF_NOHZ if PPC64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
- select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
select MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+ select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_IRQ_TIME_ACCOUNTING
bool "Enable support for 32bit binaries"
depends on PPC64
default y if !CPU_LITTLE_ENDIAN
- select COMPAT_BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
Say N if you are unsure.
config PPC_QUEUED_SPINLOCKS
- bool "Queued spinlocks"
+ bool "Queued spinlocks" if EXPERT
depends on SMP
+ default PPC_BOOK3S_64
help
Say Y here to use queued spinlocks which give better scalability and
fairness on large SMP and NUMA systems without harming single threaded
performance.
- This option is currently experimental, the code is more complex and
- less tested so it defaults to "N" for the moment.
-
- If unsure, say "N".
-
config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU
def_bool y
depends on MEMORY_HOTPLUG
-config STDBINUTILS
- bool "Using standard binutils settings"
- depends on 44x
- default y
- help
- Turning this option off allows you to select 256KB PAGE_SIZE on 44x.
- Note, that kernel will be able to run only those applications,
- which had been compiled using binutils later than 2.17.50.0.3 with
- '-zmax-page-size' set to 256K (the default is 64K). Or, if using
- the older binutils, you can patch them with a trivial patch, which
- changes the ELF_MAXPAGESIZE definition from 0x10000 to 0x40000.
-
choice
prompt "Page size"
default PPC_4K_PAGES
select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
config PPC_256K_PAGES
- bool "256k page size"
- depends on 44x && !STDBINUTILS
+ bool "256k page size (Requires non-standard binutils settings)"
+ depends on 44x && !PPC_47x
help
Make the page size 256k.
- As the ELF standard only requires alignment to support page
- sizes up to 64k, you will need to compile all of your user
- space applications with a non-standard binutils settings
- (see the STDBINUTILS description for details).
-
- Say N unless you know what you are doing.
+ The kernel will only be able to run applications that have been
+ compiled with '-zmax-page-size' set to 256K (the default is 64K) using
+ binutils later than 2.17.50.0.3, or by patching the ELF_MAXPAGESIZE
+ definition from 0x10000 to 0x40000 in older versions.
endchoice
#include <linux/pgtable.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/livepatch.h>
#include <asm/asm-prototypes.h>
#include <asm/hw_irq.h>
+ #include <asm/softirq_stack.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
}
}
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
+static inline void replay_soft_interrupts_irqrestore(void)
+{
+ unsigned long kuap_state = get_kuap();
+
+ /*
+ * Check if anything calls local_irq_enable/restore() when KUAP is
+ * disabled (user access enabled). We handle that case here by saving
+ * and re-locking AMR but we shouldn't get here in the first place,
+ * hence the warning.
+ */
+ kuap_check_amr();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(AMR_KUAP_BLOCKED);
+
+ replay_soft_interrupts();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(kuap_state);
+}
+#else
+#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
+#endif
+
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
- replay_soft_interrupts();
+ replay_soft_interrupts_irqrestore();
local_paca->irq_happened = 0;
trace_hardirqs_on();
{
unsigned int irq;
- irq_enter();
-
trace_irq_entry(regs);
/*
generic_handle_irq(irq);
trace_irq_exit(regs);
-
- irq_exit();
}
-void do_IRQ(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
{
struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp;
# Note: keep this list sorted alphabetically
#
imply IMA_SECURE_AND_OR_TRUSTED_BOOT
+ select ARCH_32BIT_USTAT_F_TINODE
select ARCH_BINFMT_ELF_STATE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select GENERIC_ALLOCATOR
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
+ select GENERIC_ENTRY
select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_TIME_NS
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_NOP_MCOUNT
- select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
+ select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
config COMPAT
def_bool y
prompt "Kernel support for 31 bit emulation"
- select COMPAT_BINFMT_ELF if BINFMT_ELF
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
select HAVE_UID16
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/irq.h>
+#include <linux/entry-common.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/stacktrace.h>
+ #include <asm/softirq_stack.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
-void do_IRQ(struct pt_regs *regs, int irq)
+static void do_IRQ(struct pt_regs *regs, int irq)
{
- struct pt_regs *old_regs;
-
- old_regs = set_irq_regs(regs);
- irq_enter();
if (tod_after_eq(S390_lowcore.int_clock,
S390_lowcore.clock_comparator))
/* Serve timer interrupts first. */
clock_comparator_work();
generic_handle_irq(irq);
+}
+
+static int on_async_stack(void)
+{
+ unsigned long frame = current_frame_address();
+
+ return !!!((S390_lowcore.async_stack - frame) >> (PAGE_SHIFT + THREAD_SIZE_ORDER));
+}
+
+static void do_irq_async(struct pt_regs *regs, int irq)
+{
+ if (on_async_stack())
+ do_IRQ(regs, irq);
+ else
+ CALL_ON_STACK(do_IRQ, S390_lowcore.async_stack, 2, regs, irq);
+}
+
+static int irq_pending(struct pt_regs *regs)
+{
+ int cc;
+
+ asm volatile("tpi 0\n"
+ "ipm %0" : "=d" (cc) : : "cc");
+ return cc >> 28;
+}
+
+void noinstr do_io_irq(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ int from_idle;
+
+ irq_enter();
+
+ if (user_mode(regs))
+ update_timer_sys();
+
+ from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
+ if (from_idle)
+ account_idle_time_irq();
+
+ do {
+ memcpy(®s->int_code, &S390_lowcore.subchannel_id, 12);
+ if (S390_lowcore.io_int_word & BIT(31))
+ do_irq_async(regs, THIN_INTERRUPT);
+ else
+ do_irq_async(regs, IO_INTERRUPT);
+ } while (MACHINE_IS_LPAR && irq_pending(regs));
+
+ irq_exit();
+ set_irq_regs(old_regs);
+ irqentry_exit(regs, state);
+
+ if (from_idle)
+ regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
+}
+
+void noinstr do_ext_irq(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ int from_idle;
+
+ irq_enter();
+
+ if (user_mode(regs))
+ update_timer_sys();
+
+ memcpy(®s->int_code, &S390_lowcore.ext_cpu_addr, 4);
+ regs->int_parm = S390_lowcore.ext_params;
+ regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2;
+
+ from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
+ if (from_idle)
+ account_idle_time_irq();
+
+ do_irq_async(regs, EXT_INTERRUPT);
+
irq_exit();
set_irq_regs(old_regs);
+ irqentry_exit(regs, state);
+
+ if (from_idle)
+ regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
static void show_msi_interrupt(struct seq_file *p, int irq)
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_UID16
+ select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_FORCED_THREADING
select OF_PROMTREE
select HAVE_ASM_MODVERSIONS
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_ARCH_KGDB if !SMP || SPARC64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_SECCOMP if SPARC64
select ARCH_HAS_PTE_SPECIAL
select PCI_DOMAINS if PCI
select ARCH_HAS_GIGANTIC_PAGE
+ select HAVE_SOFTIRQ_ON_OWN_STACK
config ARCH_PROC_KCORE_TEXT
def_bool y
Management" code will be disabled if you say Y here.
See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO
- available at <http://www.tldp.org/docs.html#howto>.
+ available at <https://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
bool
depends on SPARC64
default y
- select COMPAT_BINFMT_ELF
select HAVE_UID16
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += module.lds.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
+ generic-y += softirq_stack.h
generic-y += switch_to.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
generic-y += kprobes.h
+generic-y += mm_hooks.h
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select SWIOTLB
+ select ARCH_HAS_ELFCORE_COMPAT
config FORCE_DYNAMIC_FTRACE
def_bool y
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
+ select ARCH_SUPPORTS_LTO_CLANG if X86_64
+ select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select HAVE_CONTEXT_TRACKING if X86_64
select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
+ select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_HW_BREAKPOINT
select HAVE_IDE
select HAVE_IOREMAP_PROT
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_OPTPROBES
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API
+ select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
+ select HAVE_PREEMPT_DYNAMIC
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
If you don't know what to do here, say N.
config X86_MPPARSE
- bool "Enable MPS table" if ACPI || SFI
+ bool "Enable MPS table" if ACPI
default y
depends on X86_LOCAL_APIC
help
depends on PCI
depends on X86_64 || (PCI_GOANY && X86_32)
depends on X86_IO_APIC
- select SFI
select I2C
select DW_APB_TIMER
select APB_TIMER
config HPET_EMULATE_RTC
def_bool y
- depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
-
-config APB_TIMER
- def_bool y if X86_INTEL_MID
- prompt "Intel MID APB Timer Support" if X86_INTEL_MID
- select DW_APB_TIMER
- depends on X86_INTEL_MID && SFI
- help
- APB timer is the replacement for 8254, HPET on X86 MID platforms.
- The APBT provides a stable time base on SMP
- systems, unlike the TSC, but it is more expensive to access,
- as it is off-chip. APB timers are always running regardless of CPU
- C states, they are used as per CPU clockevent device when possible.
+ depends on HPET_TIMER && (RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
# Mark as expert because too many people got it wrong.
# The code disables itself when not needed.
If you don't know what a machine check is and you don't do kernel
QA it is safe to say n.
-config X86_THERMAL_VECTOR
- def_bool y
- depends on X86_MCE_INTEL
-
source "arch/x86/events/Kconfig"
config X86_LEGACY_VM86
source "drivers/acpi/Kconfig"
-source "drivers/sfi/Kconfig"
-
config X86_APM_BOOT
def_bool y
depends on APM
config PCI_MMCONFIG
bool "Support mmconfig PCI config space access" if X86_64
default y
- depends on PCI && (ACPI || SFI || JAILHOUSE_GUEST)
+ depends on PCI && (ACPI || JAILHOUSE_GUEST)
depends on X86_64 || (PCI_GOANY || PCI_GOMMCONFIG)
config PCI_OLPC
depends on X86_64
select ARCH_WANT_OLD_COMPAT_IPC
select BINFMT_ELF
- select COMPAT_BINFMT_ELF
select COMPAT_OLD_SIGACTION
help
Include code to run legacy 32-bit programs under a
extern int irq_init_percpu_irqstack(unsigned int cpu);
- #define __ARCH_HAS_DO_SOFTIRQ
-
struct irq_desc;
extern void fixup_irqs(void);
extern void __handle_irq(struct irq_desc *desc, struct pt_regs *regs);
-extern __visible void do_IRQ(struct pt_regs *regs, unsigned long vector);
-
extern void init_ISA_irqs(void);
extern void __init init_IRQ(void);
#ifdef CONFIG_X86_X2APIC
int x2apic_mode;
+EXPORT_SYMBOL_GPL(x2apic_mode);
enum {
X2APIC_OFF,
* Local APIC interrupts
*/
- /**
- * spurious_interrupt - Catch all for interrupts raised on unused vectors
- * @regs: Pointer to pt_regs on stack
- * @vector: The vector number
- *
- * This is invoked from ASM entry code to catch all interrupts which
- * trigger on an entry which is routed to the common_spurious idtentry
- * point.
- *
- * Also called from sysvec_spurious_apic_interrupt().
+ /*
+ * Common handling code for spurious_interrupt and spurious_vector entry
+ * points below. No point in allowing the compiler to inline it twice.
*/
- DEFINE_IDTENTRY_IRQ(spurious_interrupt)
+ static noinline void handle_spurious_interrupt(u8 vector)
{
u32 v;
trace_spurious_apic_exit(vector);
}
+ /**
+ * spurious_interrupt - Catch all for interrupts raised on unused vectors
+ * @regs: Pointer to pt_regs on stack
+ * @vector: The vector number
+ *
+ * This is invoked from ASM entry code to catch all interrupts which
+ * trigger on an entry which is routed to the common_spurious idtentry
+ * point.
+ */
+ DEFINE_IDTENTRY_IRQ(spurious_interrupt)
+ {
+ handle_spurious_interrupt(vector);
+ }
+
DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
{
- __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
+ handle_spurious_interrupt(SPURIOUS_APIC_VECTOR);
}
/*
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
+ if (c->extended_cpuid_level >= 0x8000001f)
+ c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
+
init_scattered_cpuid_features(c);
init_speculation_control(c);
&init_task;
EXPORT_PER_CPU_SYMBOL(current_task);
- DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
- DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
+ DEFINE_PER_CPU(void *, hardirq_stack_ptr);
+ DEFINE_PER_CPU(bool, hardirq_stack_inuse);
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
#include <asm/hw_irq.h>
#include <asm/desc.h>
#include <asm/traps.h>
+#include <asm/thermal.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_X86_64))
- run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
+ generic_handle_irq_desc(desc);
else
__handle_irq(desc, regs);
}
}
}
#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+static void smp_thermal_vector(void)
+{
+ if (x86_thermal_enabled())
+ intel_thermal_interrupt();
+ else
+ pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
+ smp_processor_id());
+}
+
+DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
+{
+ trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
+ inc_irq_stat(irq_thermal_count);
+ smp_thermal_vector();
+ trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
+ ack_APIC_irq();
+}
+#endif
mandatory-y += linkage.h
mandatory-y += local.h
mandatory-y += local64.h
-mandatory-y += mm-arch-hooks.h
mandatory-y += mmiowb.h
mandatory-y += mmu.h
mandatory-y += mmu_context.h
mandatory-y += serial.h
mandatory-y += shmparam.h
mandatory-y += simd.h
+ mandatory-y += softirq_stack.h
mandatory-y += switch_to.h
mandatory-y += timex.h
mandatory-y += tlbflush.h
* not correctly determine insn->call_dest->sec (external symbols do
* not have a section).
*/
- if (vmlinux && sec)
+ if (vmlinux && noinstr && sec)
state->noinstr = sec->noinstr;
}
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
- WARN("static_call: can't find static_call_key symbol: %s", tmp);
- return -1;
+ if (!module) {
+ WARN("static_call: can't find static_call_key symbol: %s", tmp);
+ return -1;
+ }
+
+ /*
+ * For modules(), the key might not be exported, which
+ * means the module can make static calls but isn't
+ * allowed to change them.
+ *
+ * In that case we temporarily set the key to be the
+ * trampoline address. This is fixed up in
+ * static_call_add_module().
+ */
+ key_sym = insn->call_dest;
}
free(key_name);
return 0;
}
+static int create_mcount_loc_sections(struct objtool_file *file)
+{
+ struct section *sec, *reloc_sec;
+ struct reloc *reloc;
+ unsigned long *loc;
+ struct instruction *insn;
+ int idx;
+
+ sec = find_section_by_name(file->elf, "__mcount_loc");
+ if (sec) {
+ INIT_LIST_HEAD(&file->mcount_loc_list);
+ WARN("file already has __mcount_loc section, skipping");
+ return 0;
+ }
+
+ if (list_empty(&file->mcount_loc_list))
+ return 0;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node)
+ idx++;
+
+ sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
+ if (!sec)
+ return -1;
+
+ reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
+ if (!reloc_sec)
+ return -1;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
+
+ loc = (unsigned long *)sec->data->d_buf + idx;
+ memset(loc, 0, sizeof(unsigned long));
+
+ reloc = malloc(sizeof(*reloc));
+ if (!reloc) {
+ perror("malloc");
+ return -1;
+ }
+ memset(reloc, 0, sizeof(*reloc));
+
+ if (insn->sec->sym) {
+ reloc->sym = insn->sec->sym;
+ reloc->addend = insn->offset;
+ } else {
+ reloc->sym = find_symbol_containing(insn->sec, insn->offset);
+
+ if (!reloc->sym) {
+ WARN("missing symbol for insn at offset 0x%lx\n",
+ insn->offset);
+ return -1;
+ }
+
+ reloc->addend = insn->offset - reloc->sym->offset;
+ }
+
+ reloc->type = R_X86_64_64;
+ reloc->offset = idx * sizeof(unsigned long);
+ reloc->sec = reloc_sec;
+ elf_add_reloc(file->elf, reloc);
+
+ idx++;
+ }
+
+ if (elf_rebuild_reloc_section(file->elf, reloc_sec))
+ return -1;
+
+ return 0;
+}
+
/*
* Warnings shouldn't be reported for ignored functions.
*/
static const char *uaccess_safe_builtin[] = {
/* KASAN */
"kasan_report",
- "check_memory_region",
+ "kasan_check_range",
/* KASAN out-of-line */
"__asan_loadN_noabort",
"__asan_load1_noabort",
insn->type = INSN_NOP;
}
+ if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) {
+ if (reloc) {
+ reloc->type = R_NONE;
+ elf_write_reloc(file->elf, reloc);
+ }
+
+ elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ arch_nop_insn(insn->len));
+
+ insn->type = INSN_NOP;
+
+ list_add_tail(&insn->mcount_loc_node,
+ &file->mcount_loc_list);
+ }
+
/*
* Whatever stack impact regular CALLs have, should be undone
* by the RETURN of the called function.
}
}
+ else if (op->dest.reg == CFI_SP &&
+ cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
+ cfi->vals[op->src.reg].offset == cfa->offset) {
+
+ /*
+ * The same stack swizzle case 2) as above. But
+ * because we can't change cfa->base, case 3)
+ * will become a regular POP. Pretend we're a
+ * PUSH so things don't go unbalanced.
+ */
+ cfi->stack_size += 8;
+ }
+
+
break;
case OP_SRC_ADD:
goto out;
warnings += ret;
+ if (mcount) {
+ ret = create_mcount_loc_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
out:
/*
* For now, don't fail the kernel build on fatal warnings. These