void arch_release_task_struct(struct task_struct *tsk)
{
/* Free the vector context of datap. */
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
riscv_v_thread_free(tsk);
}
p->thread.s[0] = 0;
}
p->thread.riscv_v_flags = 0;
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
riscv_v_thread_alloc(p);
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
* disable it for tasks that already opted in to the relaxed ABI.
*/
-static struct ctl_table tagged_addr_sysctl_table[] = {
+static const struct ctl_table tagged_addr_sysctl_table[] = {
{
.procname = "tagged_addr_disabled",
.mode = 0644,
{
unsigned long this_vsize;
- /* There are 32 vector registers with vlenb length. */
+ /*
+ * There are 32 vector registers with vlenb length.
+ *
+ * If the thead,vlenb property was provided by the firmware, use that
+ * instead of probing the CSRs.
+ */
+ if (thead_vlenb_of) {
+ riscv_v_vsize = thead_vlenb_of * 32;
+ return 0;
+ }
+
riscv_v_enable();
this_vsize = csr_read(CSR_VLENB) * 32;
riscv_v_disable();
void __init riscv_v_setup_ctx_cache(void)
{
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return;
riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx",
u32 __user *epc = (u32 __user *)regs->epc;
u32 insn = (u32)regs->badaddr;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return false;
/* Do not handle if V is not supported, or disabled */
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return;
next = riscv_v_ctrl_get_next(tsk);
long riscv_v_vstate_ctrl_get_current(void)
{
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return -EINVAL;
return current->thread.vstate_ctrl & PR_RISCV_V_VSTATE_CTRL_MASK;
bool inherit;
int cur, next;
- if (!has_vector())
+ if (!(has_vector() || has_xtheadvector()))
return -EINVAL;
if (arg & ~PR_RISCV_V_VSTATE_CTRL_MASK)
#ifdef CONFIG_SYSCTL
-static struct ctl_table riscv_v_default_vstate_table[] = {
+static const struct ctl_table riscv_v_default_vstate_table[] = {
{
.procname = "riscv_v_default_allow",
.data = &riscv_v_implicit_uacc,
static int __init riscv_v_sysctl_init(void)
{
- if (has_vector())
+ if (has_vector() || has_xtheadvector())
if (!register_sysctl("abi", riscv_v_default_vstate_table))
return -EINVAL;
return 0;
static int __init riscv_v_sysctl_init(void) { return 0; }
#endif /* ! CONFIG_SYSCTL */
- static int riscv_v_init(void)
+ static int __init riscv_v_init(void)
{
return riscv_v_sysctl_init();
}
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/soc.h>
+#include <asm/sparsemem.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define VMEMMAP_ADDR_ALIGN (1ULL << SECTION_SIZE_BITS)
+
+unsigned long vmemmap_start_pfn __ro_after_init;
+EXPORT_SYMBOL(vmemmap_start_pfn);
+#endif
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
- if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+ if (!IS_ENABLED(CONFIG_XIP_KERNEL)) {
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
+ }
/*
* In 64-bit, any use of __va/__pa before this point is wrong as we
*/
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
- memblock_cap_memory_range(phys_ram_base,
- max_mapped_addr - phys_ram_base);
+ if (memblock_end_of_DRAM() > max_mapped_addr) {
+ memblock_cap_memory_range(phys_ram_base,
+ max_mapped_addr - phys_ram_base);
+ pr_warn("Physical memory overflows the linear mapping size: region above %pa removed",
+ &max_mapped_addr);
+ }
}
/*
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
phys_ram_base = CONFIG_PHYS_RAM_BASE;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
return;
}
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
pmd_clear(pmd);
}
-static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
+static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemmap)
{
struct page *page = pud_page(*pud);
struct ptdesc *ptdesc = page_ptdesc(page);
return;
}
- pagetable_pmd_dtor(ptdesc);
+ if (!is_vmemmap)
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
if (pgtable_l4_enabled)
- free_pmd_table(pmd_base, pudp);
+ free_pmd_table(pmd_base, pudp, is_vmemmap);
}
}