config HAVE_IMA_KEXEC
bool
+config ARCH_HAS_SUBPAGE_FAULTS
+ bool
+ help
+ Select if the architecture can check permissions at sub-page
+ granularity (e.g. arm64 MTE). The probe_user_*() functions
+ must be implemented.
+
config HOTPLUG_SMT
bool
depends on MODULES
depends on HAVE_KPROBES
select KALLSYMS
+ select TASKS_RCU if PREEMPTION
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
depends on CC_HAS_ASM_GOTO
+ select OBJTOOL if HAVE_JUMP_LABEL_HACK
help
This option enables a transparent branch optimization that
makes certain almost-always-true or almost-always-false branch
config CFI_CLANG
bool "Use Clang's Control Flow Integrity (CFI)"
depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG
- # Clang >= 12:
- # - https://bugs.llvm.org/show_bug.cgi?id=46258
- # - https://bugs.llvm.org/show_bug.cgi?id=47479
- depends on CLANG_VERSION >= 120000
+ depends on CLANG_VERSION >= 140000
select KALLSYMS
help
This option enables Clang’s forward-edge Control Flow Integrity
Modules only use ELF REL relocations. Modules with ELF RELA
relocations will give an error.
+config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
+ bool
+ help
+ For architectures like powerpc/32 which have constraints on module
+ allocation and need to allocate module data outside of module area.
+
config HAVE_IRQ_EXIT_ON_IRQ_STACK
bool
help
depends on !IA64_PAGE_SIZE_64KB
depends on !PAGE_SIZE_64KB
depends on !PARISC_PAGE_SIZE_64KB
- depends on !PPC_64K_PAGES
depends on PAGE_SIZE_LESS_THAN_256KB
config PAGE_SIZE_LESS_THAN_256KB
def_bool y
- depends on !PPC_256K_PAGES
depends on !PAGE_SIZE_256KB
# This allows to use a set of generic functions to determine mmap base
depends on MMU
select ARCH_HAS_ELF_RANDOMIZE
+config HAVE_OBJTOOL
+ bool
+
+config HAVE_JUMP_LABEL_HACK
+ bool
+
+config HAVE_NOINSTR_HACK
+ bool
+
+config HAVE_NOINSTR_VALIDATION
+ bool
+
config HAVE_STACK_VALIDATION
bool
help
- Architecture supports the 'objtool check' host tool command, which
- performs compile-time stack metadata validation.
+ Architecture supports objtool compile-time frame pointer rule
+ validation.
config HAVE_RELIABLE_STACKTRACE
bool
config HAVE_STATIC_CALL_INLINE
bool
depends on HAVE_STATIC_CALL
+ select OBJTOOL
config HAVE_PREEMPT_DYNAMIC
bool
#endif /* CONFIG_COMPAT */
#ifndef CONFIG_ARM64_FORCE_52BIT
- #define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
- DEFAULT_MAP_WINDOW)
+ #define arch_get_mmap_end(addr, len, flags) \
+ (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW)
#define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
enum vec_type {
ARM64_VEC_SVE = 0,
+ ARM64_VEC_SME,
ARM64_VEC_MAX,
};
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
+ void *za_state; /* ZA register, if any */
unsigned int vl[ARM64_VEC_MAX]; /* vector length */
unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
unsigned long fault_address; /* fault info */
u64 mte_ctrl;
#endif
u64 sctlr_user;
+ u64 svcr;
+ u64 tpidr2_el0;
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
return thread_get_vl(thread, ARM64_VEC_SVE);
}
+static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
+{
+ return thread_get_vl(thread, ARM64_VEC_SME);
+}
+
+static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
+{
+ if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
+ return thread_get_sme_vl(thread);
+ else
+ return thread_get_sve_vl(thread);
+}
+
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
void task_set_vl(struct task_struct *task, enum vec_type type,
unsigned long vl);
return task_get_vl(task, ARM64_VEC_SVE);
}
+static inline unsigned int task_get_sme_vl(const struct task_struct *task)
+{
+ return task_get_vl(task, ARM64_VEC_SME);
+}
+
static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
{
task_set_vl(task, ARM64_VEC_SVE, vl);
*/
#include <asm/fpsimd.h>
-/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
+/* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
+#define SME_SET_VL(arg) sme_set_current_vl(arg)
+#define SME_GET_VL() sme_get_current_vl()
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
* of header definitions for the use of task_stack_page.
*/
-#define current_top_of_stack() \
-({ \
- struct stack_info _info; \
- BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \
- _info.high; \
-})
+/*
+ * The top of the current task's task stack
+ */
+#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
#endif /* __ASSEMBLY__ */
# Please keep this list sorted alphabetically.
#
select ARCH_32BIT_OFF_T if PPC32
+ select ARCH_DISABLE_KASAN_INLINE if PPC_RADIX_MMU
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
- select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_VM_GET_PAGE_PROT if PPC_BOOK3S_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
+ select ARCH_WANTS_NO_INSTR
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
- select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
+ select HAVE_ARCH_KASAN if PPC_RADIX_MMU
+ select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_FUNCTION_DESCRIPTORS if PPC64 && !CPU_LITTLE_ENDIAN
+ select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
bool "Build a relocatable kernel"
depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))
select NONSTATIC_KERNEL
- select MODULE_REL_CRCS if MODVERSIONS
help
This builds a kernel image that is capable of running at the
location the kernel is loaded at. For ppc32, there is no any
endchoice
+ config PAGE_SIZE_4KB
+ def_bool y
+ depends on PPC_4K_PAGES
+
+ config PAGE_SIZE_16KB
+ def_bool y
+ depends on PPC_16K_PAGES
+
+ config PAGE_SIZE_64KB
+ def_bool y
+ depends on PPC_64K_PAGES
+
+ config PAGE_SIZE_256KB
+ def_bool y
+ depends on PPC_256K_PAGES
+
config PPC_PAGE_SHIFT
int
default 18 if PPC_256K_PAGES
ifdef CONFIG_PPC64
ifndef CONFIG_CC_IS_CLANG
- cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
- cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
- aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
- aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
+ cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1)
+ cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mcall-aixdesc)
+ aflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1)
+ aflags-$(CONFIG_PPC64_ELF_ABI_V2) += -mabi=elfv2
endif
endif
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
ifndef CONFIG_CC_IS_CLANG
- ifdef CONFIG_CPU_LITTLE_ENDIAN
+ ifdef CONFIG_PPC64_ELF_ABI_V2
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
ifdef CONFIG_CPU_BIG_ENDIAN
CHECKFLAGS += -D__BIG_ENDIAN__
else
- CHECKFLAGS += -D__LITTLE_ENDIAN__ -D_CALL_ELF=2
+ CHECKFLAGS += -D__LITTLE_ENDIAN__
endif
ifdef CONFIG_476FPE_ERR46
PHONY += install
install:
- sh -x $(srctree)/$(boot)/install.sh "$(KERNELRELEASE)" vmlinux \
- System.map "$(INSTALL_PATH)"
+ $(call cmd,install)
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
$(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER
- BOOTCFLAGS += -m64
+ ifdef CONFIG_CPU_LITTLE_ENDIAN
+ BOOTCFLAGS += -m64 -mcpu=powerpc64le
else
- BOOTCFLAGS += -m32
+ BOOTCFLAGS += -m64 -mcpu=powerpc64
+ endif
+ else
+ BOOTCFLAGS += -m32 -mcpu=powerpc
endif
BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)
BOOTCFLAGS += -mbig-endian
else
BOOTCFLAGS += -mlittle-endian
+ endif
+ ifdef CONFIG_PPC64_ELF_ABI_V2
BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif
clean-kernel-base := vmlinux.strip vmlinux.bin
clean-kernel := $(addsuffix .gz,$(clean-kernel-base))
clean-kernel += $(addsuffix .xz,$(clean-kernel-base))
-# If not absolute clean-files are relative to $(obj).
-clean-files += $(addprefix $(objtree)/, $(clean-kernel))
+# clean-files are relative to $(obj).
+clean-files += $(addprefix ../../../, $(clean-kernel))
WRAPPER_OBJDIR := /usr/lib/kernel-wrapper
WRAPPER_DTSDIR := /usr/lib/kernel-wrapper/dts
/*
* Common bits between hash and Radix page table
*/
-#define _PAGE_BIT_SWAP_TYPE 0
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+ #define MAX_PTRS_PER_PTE ((H_PTRS_PER_PTE > R_PTRS_PER_PTE) ? H_PTRS_PER_PTE : R_PTRS_PER_PTE)
+ #define MAX_PTRS_PER_PMD ((H_PTRS_PER_PMD > R_PTRS_PER_PMD) ? H_PTRS_PER_PMD : R_PTRS_PER_PMD)
+ #define MAX_PTRS_PER_PUD ((H_PTRS_PER_PUD > R_PTRS_PER_PUD) ? H_PTRS_PER_PUD : R_PTRS_PER_PUD)
#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
* Don't have overlapping bits with _PAGE_HPTEFLAGS \
* We filter HPTEFLAGS on set_pte. \
*/ \
- BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & SWP_TYPE_MASK); \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_EXCLUSIVE); \
} while (0)
#define SWP_TYPE_BITS 5
-#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
- & ((1UL << SWP_TYPE_BITS) - 1))
+#define SWP_TYPE_MASK ((1UL << SWP_TYPE_BITS) - 1)
+#define __swp_type(x) ((x).val & SWP_TYPE_MASK)
#define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << _PAGE_BIT_SWAP_TYPE) \
- | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
+ (type) | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
/*
* swp_entry_t must be independent of pte bits. We build a swp_entry_t from
* swap type and offset we get from swap and convert that to pte to find a
#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_SOFT_DIRTY
#else
#define _PAGE_SWP_SOFT_DIRTY 0UL
#endif /* CONFIG_MEM_SOFT_DIRTY */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_NON_IDEMPOTENT
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_EXCLUSIVE));
+}
+
static inline bool check_pte_access(unsigned long access, unsigned long ptev)
{
/*
unsigned long addr,
unsigned long len)
{
- if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
+ if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
return slice_is_hugepage_only_range(mm, addr, len);
return 0;
}
}
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
- huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte_t pte;
+
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_hugetlb_page(vma, addr);
+ return pte;
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
#ifdef CONFIG_PPC_SVM
+ #include <asm/reg.h>
+
static inline bool is_secure_guest(void)
{
return mfmsr() & MSR_S;
}
-void __init svm_swiotlb_init(void);
-
void dtl_cache_ctor(void *addr);
#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
return false;
}
-static inline void svm_swiotlb_init(void) {}
-
#define get_dtl_cache_ctor() NULL
#endif /* CONFIG_PPC_SVM */
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/memblock.h>
+ #include <linux/of.h>
#include <asm/code-patching.h>
#include <asm/kdump.h>
- #include <asm/prom.h>
#include <asm/firmware.h>
-#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <asm/rtas.h>
#include <asm/inst.h>
}
#endif /* CONFIG_NONSTATIC_KERNEL */
-static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
- unsigned long offset, int userbuf)
-{
- if (userbuf) {
- if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
- return -EFAULT;
- } else
- memcpy(buf, (vaddr + offset), csize);
-
- return csize;
-}
-
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
phys_addr_t paddr;
if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(paddr);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
} else {
vaddr = ioremap_cache(paddr, PAGE_SIZE);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
}
#include <linux/cma.h>
#include <linux/hugetlb.h>
#include <linux/debugfs.h>
+ #include <linux/of.h>
+ #include <linux/of_fdt.h>
#include <asm/page.h>
- #include <asm/prom.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
#include <asm/setup.h>
* The total size of fadump reserved memory covers for boot memory size
* + cpu data size + hpte size and metadata.
* Initialize only the area equivalent to boot memory size for CMA use.
- * The reamining portion of fadump reserved memory will be not given
- * to CMA and pages for thoes will stay reserved. boot memory size is
+ * The remaining portion of fadump reserved memory will be not given
+ * to CMA and pages for those will stay reserved. boot memory size is
* aligned per CMA requirement to satisy cma_init_reserved_mem() call.
* But for some reason even if it fails we still have the memory reservation
* with us and we can still continue doing fadump.
size += fw_dump.cpu_state_data_size;
size += fw_dump.hpte_region_size;
+ /*
+ * Account for pagesize alignment of boot memory area destination address.
+ * This faciliates in mmap reading of first kernel's memory.
+ */
+ size = PAGE_ALIGN(size);
size += fw_dump.boot_memory_size;
size += sizeof(struct fadump_crash_info_header);
size += sizeof(struct elfhdr); /* ELF core header.*/
else
ppc_save_regs(&fdh->regs);
- fdh->online_mask = *cpu_online_mask;
+ fdh->cpu_mask = *cpu_online_mask;
/*
* If we came in via system reset, wait a while for the secondary
* FIXME: How do i get PID? Do I really need it?
* prstatus.pr_pid = ????
*/
- elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
+ elf_core_copy_regs(&prstatus.pr_reg, regs);
buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
return buf;
sizeof(struct fadump_memory_range));
return 0;
}
-
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
u64 base, u64 end)
{
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
- if ((start + size) == base)
+ /*
+ * Boot memory area needs separate PT_LOAD segment(s) as it
+ * is moved to a different location at the time of crash.
+ * So, fold only if the region is not boot memory area.
+ */
+ if ((start + size) == base && start >= fw_dump.boot_mem_top)
is_adjacent = true;
}
if (!is_adjacent) {
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
- #if defined(_CALL_ELF)
- elf->e_flags = _CALL_ELF;
- #else
- elf->e_flags = 0;
- #endif
+
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ elf->e_flags = 2;
+ else if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ elf->e_flags = 1;
+ else
+ elf->e_flags = 0;
+
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize = sizeof(struct elf_phdr);
elf->e_phnum = 0;
fdh->elfcorehdr_addr = addr;
/* We will set the crashing cpu id in crash_fadump() during crash. */
fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
+ /*
+ * When LPAR is terminated by PYHP, ensure all possible CPUs'
+ * register data is processed while exporting the vmcore.
+ */
+ fdh->cpu_mask = *cpu_possible_mask;
return addr;
}
static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
{
struct fadump_memory_range *mem_ranges;
- struct fadump_memory_range tmp_range;
u64 base, size;
int i, j, idx;
if (mem_ranges[idx].base > mem_ranges[j].base)
idx = j;
}
- if (idx != i) {
- tmp_range = mem_ranges[idx];
- mem_ranges[idx] = mem_ranges[i];
- mem_ranges[i] = tmp_range;
- }
+ if (idx != i)
+ swap(mem_ranges[idx], mem_ranges[i]);
}
/* Merge adjacent reserved ranges */
}
/*
* Use subsys_initcall_sync() here because there is dependency with
- * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization
- * is done before regisering with f/w.
+ * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization
+ * is done before registering with f/w.
*/
subsys_initcall_sync(setup_fadump);
#else /* !CONFIG_PRESERVE_FA_DUMP */
#include <linux/mm_types.h>
#include <linux/memblock.h>
#include <linux/memremap.h>
+#include <linux/pkeys.h>
#include <linux/debugfs.h>
#include <misc/cxl-base.h>
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
- * the allocated page with single fragement
+ * the allocated page with single fragment
* count.
*/
if (likely(!mm->context.pmd_frag)) {
}
EXPORT_SYMBOL_GPL(memremap_compat_align);
#endif
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ unsigned long prot = pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+
+ if (vm_flags & VM_SAO)
+ prot |= _PAGE_SAO;
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ prot |= vmflag_to_pte_pkey_bits(vm_flags);
+#endif
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);
#include <linux/suspend.h>
#include <linux/dma-direct.h>
+#include <asm/swiotlb.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
#include <asm/svm.h>
#include <asm/mmzone.h>
+ #include <asm/ftrace.h>
+ #include <asm/code-patching.h>
#include <mm/mmu_decl.h>
* back to to-down.
*/
memblock_set_bottom_up(true);
- if (is_secure_guest())
- svm_swiotlb_init();
- else
- swiotlb_init(0);
+ swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
{
ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
+ static_branch_enable(&init_mem_is_free);
free_initmem_default(POISON_FREE_INITMEM);
+ ftrace_free_init_tramp();
}
/*
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/of.h>
+ #include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/memblock.h>
#include <linux/swiotlb.h>
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
- #include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
}
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
- #ifdef CONFIG_KEXEC_CORE
- static void pSeries_machine_kexec(struct kimage *image)
- {
- if (firmware_has_feature(FW_FEATURE_SET_MODE))
- pseries_disable_reloc_on_exc();
-
- default_machine_kexec(image);
- }
- #endif
-
#ifdef __LITTLE_ENDIAN__
void pseries_big_endian_exceptions(void)
{
*/
num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
if (resno >= num_res)
- return 0; /* or an errror */
+ return 0; /* or an error */
i = START_OF_ENTRIES + NEXT_ENTRY * resno;
switch (value) {
if (!pdev->is_physfn)
return;
- /*Firmware must support open sriov otherwise dont configure*/
+ /*Firmware must support open sriov otherwise don't configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
if (indexes)
of_pci_parse_iov_addrs(pdev, indexes);
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
-
- if (swiotlb_force == SWIOTLB_FORCE)
- ppc_swiotlb_enable = 1;
}
static void pseries_panic(char *str)
.machine_check_exception = pSeries_machine_check_exception,
.machine_check_log_err = pSeries_machine_check_log_err,
#ifdef CONFIG_KEXEC_CORE
- .machine_kexec = pSeries_machine_kexec,
+ .machine_kexec = pseries_machine_kexec,
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
* Called under mmap_write_lock(mm).
*/
- #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long
hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
- info.high_limit = arch_get_mmap_end(addr);
+ info.high_limit = arch_get_mmap_end(addr, len, flags);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = current->mm->mmap_base;
- info.high_limit = arch_get_mmap_end(addr);
+ info.high_limit = arch_get_mmap_end(addr, len, flags);
addr = vm_unmapped_area(&info);
}
return addr;
}
- static unsigned long
- hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long
+ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct hstate *h = hstate_file(file);
- const unsigned long mmap_end = arch_get_mmap_end(addr);
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
if (len & ~huge_page_mask(h))
return -EINVAL;
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
}
+
+ #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+ static unsigned long
+ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+ {
+ return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
+ }
#endif
static size_t
static int hugetlbfs_write_begin(struct file *file,
struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
return -EINVAL;
}
static void
-hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
+hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
+ zap_flags_t zap_flags)
{
struct vm_area_struct *vma;
}
unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
- NULL);
+ NULL, zap_flags);
}
}
mutex_lock(&hugetlb_fault_mutex_table[hash]);
hugetlb_vmdelete_list(&mapping->i_mmap,
index * pages_per_huge_page(h),
- (index + 1) * pages_per_huge_page(h));
+ (index + 1) * pages_per_huge_page(h),
+ ZAP_FLAG_DROP_MARKER);
i_mmap_unlock_write(mapping);
}
i_mmap_lock_write(mapping);
i_size_write(inode, offset);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
- hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
+ hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
+ ZAP_FLAG_DROP_MARKER);
i_mmap_unlock_write(mapping);
remove_inode_hugepages(inode, offset, LLONG_MAX);
}
i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap,
- hole_start >> PAGE_SHIFT,
- hole_end >> PAGE_SHIFT);
+ hole_start >> PAGE_SHIFT,
+ hole_end >> PAGE_SHIFT, 0);
i_mmap_unlock_write(mapping);
remove_inode_hugepages(inode, hole_start, hole_end);
inode_unlock(inode);
if (sbinfo->spool) {
long free_pages;
- spin_lock(&sbinfo->spool->lock);
+ spin_lock_irq(&sbinfo->spool->lock);
buf->f_blocks = sbinfo->spool->max_hpages;
free_pages = sbinfo->spool->max_hpages
- sbinfo->spool->used_hpages;
buf->f_bavail = buf->f_bfree = free_pages;
- spin_unlock(&sbinfo->spool->lock);
+ spin_unlock_irq(&sbinfo->spool->lock);
buf->f_files = sbinfo->max_inodes;
buf->f_ffree = sbinfo->free_inodes;
}
struct vm_area_struct *new_vma,
unsigned long old_addr, unsigned long new_addr,
unsigned long len);
-int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
+int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
+ struct vm_area_struct *, struct vm_area_struct *);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
unsigned long *, unsigned long *, long, unsigned int,
int *);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
+ unsigned long, unsigned long, struct page *,
+ zap_flags_t);
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct page *ref_page);
+ struct page *ref_page, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
void hugetlb_show_meminfo(void);
unsigned long dst_addr,
unsigned long src_addr,
enum mcopy_atomic_mode mode,
- struct page **pagep);
+ struct page **pagep,
+ bool wp_copy);
#endif /* CONFIG_USERFAULTFD */
bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot);
+ unsigned long address, unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
}
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
- struct mm_struct *src, struct vm_area_struct *vma)
+ struct mm_struct *src,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
BUG();
return 0;
unsigned long dst_addr,
unsigned long src_addr,
enum mcopy_atomic_mode mode,
- struct page **pagep)
+ struct page **pagep,
+ bool wp_copy)
{
BUG();
return 0;
static inline unsigned long hugetlb_change_protection(
struct vm_area_struct *vma, unsigned long address,
- unsigned long end, pgprot_t newprot)
+ unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags)
{
return 0;
}
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+ unsigned long end, struct page *ref_page,
+ zap_flags_t zap_flags)
{
BUG();
}
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+ unsigned long
+ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
/*
* huegtlb page specific state flags. These flags are located in page.private
* of the hugetlb head page. Functions created via the below macros should be
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
- unsigned int nr_free_vmemmap_pages;
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+ unsigned int optimize_vmemmap_pages;
#endif
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
pte_t *ptep, pte_t pte, unsigned long sz)
{
}
+
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
* x: (yes) yes
*/
pgprot_t protection_map[16] __ro_after_init = {
- __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
- __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ [VM_NONE] = __P000,
+ [VM_READ] = __P001,
+ [VM_WRITE] = __P010,
+ [VM_WRITE | VM_READ] = __P011,
+ [VM_EXEC] = __P100,
+ [VM_EXEC | VM_READ] = __P101,
+ [VM_EXEC | VM_WRITE] = __P110,
+ [VM_EXEC | VM_WRITE | VM_READ] = __P111,
+ [VM_SHARED] = __S000,
+ [VM_SHARED | VM_READ] = __S001,
+ [VM_SHARED | VM_WRITE] = __S010,
+ [VM_SHARED | VM_WRITE | VM_READ] = __S011,
+ [VM_SHARED | VM_EXEC] = __S100,
+ [VM_SHARED | VM_EXEC | VM_READ] = __S101,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __S110,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111
};
-#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
-static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
-{
- return prot;
-}
-#endif
-
+#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
- pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
- pgprot_val(arch_vm_get_page_prot(vm_flags)));
-
- return arch_filter_pgprot(ret);
+ return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
}
EXPORT_SYMBOL(vm_get_page_prot);
+#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
end, prev->vm_pgoff, NULL, prev);
if (err)
return NULL;
- khugepaged_enter_vma_merge(prev, vm_flags);
+ khugepaged_enter_vma(prev, vm_flags);
return prev;
}
}
if (err)
return NULL;
- khugepaged_enter_vma_merge(area, vm_flags);
+ khugepaged_enter_vma(area, vm_flags);
return area;
}
* the same as 'old', the other will be the new one that is trying
* to share the anon_vma.
*
- * NOTE! This runs with mm_sem held for reading, so it is possible that
+ * NOTE! This runs with mmap_lock held for reading, so it is possible that
* the anon_vma of 'old' is concurrently in the process of being set up
* by another page fault trying to merge _that_. But that's ok: if it
* is being set up, that automatically means that it will be a singleton
*
* We also make sure that the two vma's are compatible (adjacent,
* and with the same memory policies). That's all stable, even with just
- * a read lock on the mm_sem.
+ * a read lock on the mmap_lock.
*/
static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
{
}
vma_link(mm, vma, prev, rb_link, rb_parent);
+
+ /*
+ * vma_merge() calls khugepaged_enter_vma() either, the below
+ * call covers the non-merge case.
+ */
+ khugepaged_enter_vma(vma, vma->vm_flags);
+
/* Once vma denies write, undo our temporary denial count */
unmap_writable:
if (file && vm_flags & VM_SHARED)
*
* This function "knows" that -ENOMEM has the bits set.
*/
- #ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long
- arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ generic_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
- const unsigned long mmap_end = arch_get_mmap_end(addr);
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
info.align_offset = 0;
return vm_unmapped_area(&info);
}
+
+ #ifndef HAVE_ARCH_UNMAPPED_AREA
+ unsigned long
+ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+ {
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+ }
#endif
/*
* This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base):
*/
- #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
- arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
{
struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
- const unsigned long mmap_end = arch_get_mmap_end(addr);
+ const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
/* requested length too big for entire address space */
if (len > mmap_end - mmap_min_addr)
return addr;
}
+
+ #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+ unsigned long
+ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+ {
+ return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+ }
#endif
unsigned long
return -ENOMEM;
/* mlock limit tests */
- if (vma->vm_flags & VM_LOCKED) {
- unsigned long locked;
- unsigned long limit;
- locked = mm->locked_vm + grow;
- limit = rlimit(RLIMIT_MEMLOCK);
- limit >>= PAGE_SHIFT;
- if (locked > limit && !capable(CAP_IPC_LOCK))
- return -ENOMEM;
- }
+ if (mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT))
+ return -ENOMEM;
/* Check to ensure the stack will not grow into a hugetlb-only region */
new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
}
}
anon_vma_unlock_write(vma->anon_vma);
- khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ khugepaged_enter_vma(vma, vma->vm_flags);
validate_mm(mm);
return error;
}
}
}
anon_vma_unlock_write(vma->anon_vma);
- khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ khugepaged_enter_vma(vma, vma->vm_flags);
validate_mm(mm);
return error;
}
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(mmap_read_trylock(mm));
+ mmap_assert_write_locked(mm);
mutex_lock(&mm_all_locks_mutex);
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(mmap_read_trylock(mm));
+ mmap_assert_write_locked(mm);
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
for (vma = mm->mmap; vma; vma = vma->vm_next) {
#include <linux/uaccess.h>
#include "internal.h"
+#include "swap.h"
/**
* kfree_const - conditionally free memory
#endif
}
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+ * @range: The size of the area, starting at @start, within which the
+ * random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
+ *
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned. We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range). On error,
+ * @start is returned.
+ */
+unsigned long randomize_page(unsigned long start, unsigned long range)
+{
+ if (!PAGE_ALIGNED(start)) {
+ range -= PAGE_ALIGN(start) - start;
+ start = PAGE_ALIGN(start);
+ }
+
+ if (start > ULONG_MAX - range)
+ range = ULONG_MAX - start;
+
+ range >>= PAGE_SHIFT;
+
+ if (range == 0)
+ return start;
+
+ return start + (get_random_long() % range << PAGE_SHIFT);
+}
+
#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
- unsigned long arch_randomize_brk(struct mm_struct *mm)
+ unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
{
/* Is the current task 32bit ? */
if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())