select ACPI_MCFG if ACPI
select ACPI_SPCR_TABLE if ACPI
select ARCH_CLOCKSOURCE_DATA
+ select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_SUPPORTS_ATOMIC_RMW
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
- select HAVE_KRETPROBES if HAVE_KPROBES
+ select HAVE_KRETPROBES
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
config MMU
def_bool y
-config DEBUG_RODATA
- def_bool y
-
config ARM64_PAGE_SHIFT
int
default 16 if ARM64_64K_PAGES
If unsure, say Y.
+ config QCOM_FALKOR_ERRATUM_1003
+ bool "Falkor E1003: Incorrect translation due to ASID change"
+ default y
+ select ARM64_PAN if ARM64_SW_TTBR0_PAN
+ help
+ On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
+ and BADDR are changed together in TTBRx_EL1. The workaround for this
+ issue is to use a reserved ASID in cpu_do_switch_mm() before
+ switching to the new ASID. Saying Y here selects ARM64_PAN if
+ ARM64_SW_TTBR0_PAN is selected. This is done because implementing and
+ maintaining the E1003 workaround in the software PAN emulation code
+ would be an unnecessary complication. The affected Falkor v1 CPU
+ implements ARMv8.1 hardware PAN support and using hardware PAN
+ support versus software PAN emulation is mutually exclusive at
+ runtime.
+
+ If unsure, say Y.
+
+ config QCOM_FALKOR_ERRATUM_1009
+ bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
+ default y
+ help
+ On Falkor v1, the CPU may prematurely complete a DSB following a
+ TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation
+ one more time to fix the issue.
+
+ If unsure, say Y.
+
endmenu
def_bool y
depends on NUMA
+ config HOLES_IN_ZONE
+ def_bool y
+ depends on NUMA
+
source kernel/Kconfig.preempt
source kernel/Kconfig.hz
config COMPAT
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
- select COMPAT_BINFMT_ELF
+ select COMPAT_BINFMT_ELF if BINFMT_ELF
select HAVE_UID16
select OLD_SIGSUSPEND3
select COMPAT_OLD_SIGACTION
If in doubt, say "Y".
-config DEBUG_SET_MODULE_RONX
- bool "Set loadable kernel module data as NX and text as RO"
- depends on MODULES
- default y
- help
- Is this is set, kernel module text and rodata will be made read-only.
- This is to help catch accidental or malicious attempts to change the
- kernel's executable code.
-
- If in doubt, say Y.
-
config DEBUG_ALIGN_RODATA
- depends on DEBUG_RODATA
+ depends on STRICT_KERNEL_RWX
bool "Align linker sections up to SECTION_SIZE"
help
If this option is enabled, sections that may potentially be marked as
If in doubt, say N.
+ config DEBUG_EFI
+ depends on EFI && DEBUG_INFO
+ bool "UEFI debugging"
+ help
+ Enable this option to include EFI specific debugging features into
+ the kernel that are only useful when using a debug build of the
+ UEFI firmware
+
source "drivers/hwtracing/coresight/Kconfig"
endmenu
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
+ #include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC.
+ * <symbol> is within the range +/- 4 GB of the PC when running
+ * in core kernel context. In module context, a movz/movk sequence
+ * is used, since modules may be loaded far away from the kernel
+ * when KASLR is in effect.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
- * @tmp: optional scratch register to be used if <dst> == sp, which
- * is not allowed in an adrp instruction
*/
- .macro adr_l, dst, sym, tmp=
- .ifb \tmp
+ .macro adr_l, dst, sym
+#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
- .else
- adrp \tmp, \sym
- add \dst, \tmp, :lo12:\sym
- .endif
+#else
+ movz \dst, #:abs_g3:\sym
+ movk \dst, #:abs_g2_nc:\sym
+ movk \dst, #:abs_g1_nc:\sym
+ movk \dst, #:abs_g0_nc:\sym
+#endif
.endm
/*
* the address
*/
.macro ldr_l, dst, sym, tmp=
+#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
+#else
+ .ifb \tmp
+ adr_l \dst, \sym
+ ldr \dst, [\dst]
+ .else
+ adr_l \tmp, \sym
+ ldr \dst, [\tmp]
+ .endif
+#endif
.endm
/*
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
+#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
+#else
+ adr_l \tmp, \sym
+ str \src, [\tmp]
+#endif
.endm
/*
mrs \rd, sp_el0
.endm
+ /*
+ * Errata workaround prior to TTBR0_EL1 update
+ *
+ * val: TTBR value with new BADDR, preserved
+ * tmp0: temporary register, clobbered
+ * tmp1: other temporary register, clobbered
+ */
+ .macro pre_ttbr0_update_workaround, val, tmp0, tmp1
+ #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+ alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ mrs \tmp0, ttbr0_el1
+ mov \tmp1, #FALKOR_RESERVED_ASID
+ bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR
+ msr ttbr0_el1, \tmp0
+ isb
+ bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR
+ msr ttbr0_el1, \tmp0
+ isb
+ alternative_else_nop_endif
+ #endif
+ .endm
+
/*
* Errata workaround post TTBR0_EL1 update.
*/
#define KASAN_SHADOW_SIZE (0)
#endif
- /*
- * Physical vs virtual RAM address space conversion. These are
- * private definitions which should NOT be used outside memory.h
- * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
- */
- #define __virt_to_phys(x) ({ \
- phys_addr_t __x = (phys_addr_t)(x); \
- __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \
- (__x - kimage_voffset); })
-
- #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
- #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
-
- /*
- * Convert a page to/from a physical address
- */
- #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
- #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
-
/*
* Memory types available.
*/
*/
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+ /*
+ * Physical vs virtual RAM address space conversion. These are
+ * private definitions which should NOT be used outside memory.h
+ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ */
+
+
+ /*
+ * The linear kernel range starts in the middle of the virtual adddress
+ * space. Testing the top bit for the start of the region is a
+ * sufficient check.
+ */
+ #define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1)))
+
+ #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+ #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
+
+ #define __virt_to_phys_nodebug(x) ({ \
+ phys_addr_t __x = (phys_addr_t)(x); \
+ __is_lm_address(__x) ? __lm_to_phys(__x) : \
+ __kimg_to_phys(__x); \
+ })
+
+ #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
+
+ #ifdef CONFIG_DEBUG_VIRTUAL
+ extern phys_addr_t __virt_to_phys(unsigned long x);
+ extern phys_addr_t __phys_addr_symbol(unsigned long x);
+ #else
+ #define __virt_to_phys(x) __virt_to_phys_nodebug(x)
+ #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
+ #endif
+
+ #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+ #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
+
+ /*
+ * Convert a page to/from a physical address
+ */
+ #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+ #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
/*
* Note: Drivers should NOT use these. They are the wrong
* translation for translating DMA addresses. Use the driver
* Drivers should NOT use these either.
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
+ #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+ #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
- #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
+ #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
+ #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
/*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
bool module = !core_kernel_text(uintaddr);
struct page *page;
- if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+ if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else if (!module)
- page = pfn_to_page(PHYS_PFN(__pa(addr)));
+ page = phys_to_page(__pa_symbol(addr));
else
return addr;
return insn;
}
+ u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
+ u32 insn)
+ {
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_REGTYPE_RT:
+ case AARCH64_INSN_REGTYPE_RD:
+ shift = 0;
+ break;
+ case AARCH64_INSN_REGTYPE_RN:
+ shift = 5;
+ break;
+ case AARCH64_INSN_REGTYPE_RT2:
+ case AARCH64_INSN_REGTYPE_RA:
+ shift = 10;
+ break;
+ case AARCH64_INSN_REGTYPE_RM:
+ shift = 16;
+ break;
+ default:
+ pr_err("%s: unknown register type encoding %d\n", __func__,
+ type);
+ return 0;
+ }
+
+ return (insn >> shift) & GENMASK(4, 0);
+ }
+
static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
u32 insn,
enum aarch64_insn_register reg)
* for more details.
*/
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
per_cpu(cpu_scale, cpu) = capacity;
}
- #ifdef CONFIG_PROC_SYSCTL
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
return 0;
}
subsys_initcall(register_cpu_capacity_sysctl);
- #endif
static u32 capacity_scale;
static u32 *raw_capacity;
static int __init register_cpufreq_notifier(void)
{
- if (cap_parsing_failed)
+ /*
+ * on ACPI-based systems we need to use the default cpu capacity
+ * until we have the necessary code to parse the cpu capacity, so
+ * skip registering cpufreq notifier.
+ */
+ if (!acpi_disabled || cap_parsing_failed)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
- address = (rt == 31) ? 0 : regs->regs[rt];
+ address = pt_regs_read_reg(regs, rt);
switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
+
+ pt_regs_write_reg(regs, rt, val);
- regs->regs[rt] = arm64_ftr_reg_ctrel0.sys_val;
regs->pc += 4;
}
return;
}
- force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ /*
+ * New SYS instructions may previously have been undefined at EL0. Fall
+ * back to our usual undefined instruction handler so that we handle
+ * these consistently.
+ */
+ do_undefinstr(regs);
}
long compat_arm_syscall(struct pt_regs *regs);
}
/*
- * bad_mode handles the impossible case in the exception vector.
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
- siginfo_t info;
- void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
+
+ die("Oops - bad mode", regs, 0);
+ local_irq_disable();
+ panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+ siginfo_t info;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+ console_verbose();
+
+ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
+ smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
- arm64_notify_die("Oops - bad mode", regs, &info, 0);
+ current->thread.fault_address = 0;
+ current->thread.fault_code = 0;
+
+ force_sig_info(info.si_signo, &info, current);
}
void __pte_error(const char *file, int line, unsigned long val)
dma_addr_t dev_addr;
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
- if (!is_device_dma_coherent(dev))
+ if (!is_device_dma_coherent(dev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
return dev_addr;
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (!is_device_dma_coherent(dev))
+ if (!is_device_dma_coherent(dev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}
int i, ret;
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
- if (!is_device_dma_coherent(dev))
+ if (!is_device_dma_coherent(dev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
for_each_sg(sgl, sg, ret, i)
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
struct scatterlist *sg;
int i;
- if (!is_device_dma_coherent(dev))
+ if (!is_device_dma_coherent(dev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
for_each_sg(sgl, sg, nelems, i)
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
return 1;
}
+ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
+ {
+ if (swiotlb)
+ return swiotlb_dma_mapping_error(hwdev, addr);
+ return 0;
+ }
+
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
.dma_supported = __swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
+ .mapping_error = __swiotlb_dma_mapping_error,
};
static int __init atomic_pool_init(void)
unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
- int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t iosize = size;
void *addr;
unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
- int prot = dma_direction_to_prot(dir, coherent);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!iommu_dma_mapping_error(dev, dev_addr) &&
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
return iommu_dma_map_sg(dev, sgl, nelems,
- dma_direction_to_prot(dir, coherent));
+ dma_info_to_prot(dir, coherent, attrs));
}
static void __iommu_unmap_sg_attrs(struct device *dev,
.sync_sg_for_device = __iommu_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
- .dma_supported = iommu_dma_supported,
.mapping_error = iommu_dma_mapping_error,
};
* then the IOMMU core will have already configured a group for this
* device, and allocated the default domain for that group.
*/
- if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
- dev_name(dev));
- return false;
+ if (!domain)
+ goto out_err;
+
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (iommu_dma_init_domain(domain, dma_base, size, dev))
+ goto out_err;
+
+ dev->archdata.dma_ops = &iommu_dma_ops;
}
- dev->archdata.dma_ops = &iommu_dma_ops;
return true;
+ out_err:
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
+ return false;
}
static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
#include <linux/efi.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
+ #include <linux/mm.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
* linear mapping. Take care not to clip the kernel which may be
* high in memory.
*/
- memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
- ULLONG_MAX);
+ memblock_remove(max_t(u64, memstart_addr + linear_region_size,
+ __pa_symbol(_end)), ULLONG_MAX);
if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
/* ensure that memstart_addr remains sufficiently aligned */
memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
*/
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
memblock_mem_limit_remove_map(memory_limit);
- memblock_add(__pa(_text), (u64)(_end - _text));
+ memblock_add(__pa_symbol(_text), (u64)(_end - _text));
}
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
- memblock_reserve(__pa(_text), _end - _text);
+ memblock_reserve(__pa_symbol(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
memblock_reserve(initrd_start, initrd_end - initrd_start);
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
void free_initmem(void)
{
- free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
+ free_reserved_area(lm_alias(__init_begin),
+ lm_alias(__init_end),
0, "unused kernel");
/*
* Unmap the __init region but leave the VM area in place. This
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_PMEM_API if X86_64
+ select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
config FIX_EARLYCON_MEM
def_bool y
-config DEBUG_RODATA
- def_bool y
-
config PGTABLE_LEVELS
int
default 4 if X86_64
def_bool y
config X86_MCE_INJECT
- depends on X86_MCE
+ depends on X86_MCE && X86_LOCAL_APIC
tristate "Machine check injector support"
---help---
Provide support for injecting machine checks for testing purposes.
theoretically possible, but the implementations are further
limited due to memory layouts.
- If CONFIG_HIBERNATE is also enabled, KASLR is disabled at boot
- time. To enable it, boot with "kaslr" on the kernel command
- line (which will also disable hibernation).
-
If unsure, say N.
# Relocation on x86 needs some additional build support
return NULL;
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
- node->mapping_offset);
+ node->mapping_offset + index * sizeof(*map));
/* Firmware bug! */
if (!map->output_reference) {
if (!(IORT_TYPE_MASK(parent->type) & type_mask))
return NULL;
- if (map[index].flags & ACPI_IORT_ID_SINGLE_MAPPING) {
+ if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
- *id_out = map[index].output_base;
+ *id_out = map->output_base;
return parent;
}
}
if (!iort_fwnode)
return NULL;
- ops = iommu_get_instance(iort_fwnode);
+ ops = iommu_ops_from_fwnode(iort_fwnode);
if (!ops)
return NULL;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
- return PTR_ERR(pdev);
+ return -ENOMEM;
count = ops->iommu_count_resources(node);
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/of.h>
+ #include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
continue;
/* Allocate an alias_prop with enough space for the stem */
- ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+ ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
if (!ap)
continue;
memset(ap, 0, sizeof(*ap) + len + 1);
return NULL;
}
+ /**
+ * of_find_last_cache_level - Find the level at which the last cache is
+ * present for the given logical cpu
+ *
+ * @cpu: cpu number(logical index) for which the last cache level is needed
+ *
+ * Returns the the level at which the last cache is present. It is exactly
+ * same as the total number of cache levels for the given logical cpu.
+ */
+ int of_find_last_cache_level(unsigned int cpu)
+ {
+ u32 cache_level = 0;
+ struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
+
+ while (np) {
+ prev = np;
+ of_node_put(np);
+ np = of_find_next_cache_node(np);
+ }
+
+ of_property_read_u32(prev, "cache-level", &cache_level);
+
+ return cache_level;
+ }
+
/**
* of_graph_parse_endpoint() - parse common endpoint node properties
* @node: pointer to endpoint device_node
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
- CPUHP_PERF_X86_UNCORE_PREP,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
- CPUHP_PERF_X86_RAPL_PREP,
CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_MIPS_SOC_PREPARE,
+ CPUHP_BP_PREPARE_DYN,
+ CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_PERF_X86_UNCORE_STARTING,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
+ #ifndef lm_alias
+ #define lm_alias(x) __va(__pa_symbol(x))
+ #endif
+
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
- spinlock_t **ptlp);
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
+ depends on DEBUG_INFO && !FRV
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
If unsure, say N.
+ config ARCH_HAS_DEBUG_VIRTUAL
+ bool
+
config DEBUG_VIRTUAL
bool "Debug VM translations"
- depends on DEBUG_KERNEL && X86
+ depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL
help
Enable some costly sanity checks in virtual to page code. This can
catch mistakes with virt_to_page() and friends.
source "lib/Kconfig.kasan"
+config DEBUG_REFCOUNT
+ bool "Verbose refcount checks"
+ help
+ Say Y here if you want reference counters (refcount_t and kref) to
+ generate WARNs on dubious usage. Without this refcount_t will still
+ be a saturating counter and avoid Use-After-Free by turning it into
+ a resource leak Denial-Of-Service.
+
+ Use of this option will increase kernel text size but will alert the
+ admin of potential abuse.
+
+ If in doubt, say "N".
+
endmenu # "Memory Debugging"
config ARCH_HAS_KCOV
If unsure, say N.
-config TIMER_STATS
- bool "Collect kernel timers statistics"
- depends on DEBUG_KERNEL && PROC_FS
- help
- If you say Y here, additional code will be inserted into the
- timer routines to collect statistics about kernel timers being
- reprogrammed. The statistics can be read from /proc/timer_stats.
- The statistics collection is started by writing 1 to /proc/timer_stats,
- writing 0 stops it. This feature is useful to collect information
- about timer usage patterns in kernel and userspace. This feature
- is lightweight if enabled in the kernel config but not activated
- (it defaults to deactivated on bootup and will only be activated
- if some application like powertop activates it explicitly).
-
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
Say M if you want these torture tests to build as a module.
Say N if you are unsure.
+config WW_MUTEX_SELFTEST
+ tristate "Wait/wound mutex selftests"
+ help
+ This option provides a kernel module that runs tests on the
+ on the struct ww_mutex locking API.
+
+ It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction
+ with this test harness.
+
+ Say M if you want these self tests to build as a module.
+ Say N if you are unsure.
+
endmenu # lock debugging
config TRACE_IRQFLAGS
config RCU_TRACE
bool "Enable tracing for RCU"
depends on DEBUG_KERNEL
+ default y if TREE_RCU
select TRACE_CLOCK
help
This option provides tracing in RCU which presents stats
tristate "Perform selftest on hash functions"
default n
help
- Enable this option to test the kernel's integer (<linux/hash,h>)
- and string (<linux/stringhash.h>) hash functions on boot
- (or module load).
+ Enable this option to test the kernel's integer (<linux/hash.h>),
+ string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
+ hash functions on boot (or module load).
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
+config TEST_PARMAN
+ tristate "Perform selftest on priority array manager"
+ default n
+ depends on PARMAN
+ help
+ Enable this option to test priority array manager on boot
+ (or module load).
+
+ If unsure, say N.
+
endmenu # runtime tests
config PROVIDE_OHCI1394_DMA_INIT