*/
#define UL(x) _AC(x, UL)
+ ++++/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+ ++++#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+ ++++
#ifdef CONFIG_MMU
/*
- ---- * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
- ----#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
#define TASK_UNMAPPED_BASE UL(0x00000000)
#endif
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
-#endif
-
#ifndef END_MEM
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
- ----#ifndef PAGE_OFFSET
- --- #define PAGE_OFFSET PLAT_PHYS_OFFSET
-#define PAGE_OFFSET (PHYS_OFFSET)
- ----#endif
- ----
/*
* The module can be at any place in ram in nommu mode.
*/
#define MODULES_END (END_MEM)
-#define MODULES_VADDR (PHYS_OFFSET)
+#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
#endif
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory. This is used for XIP and NoMMU kernels, or by kernels which
+ * have their own mach/memory.h. Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#ifndef PLAT_PHYS_OFFSET
+#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+#endif
+
#ifndef __ASSEMBLY__
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
++ +++ *
++ +++ * PFNs are used to describe any physical page; this means
++ +++ * PFN 0 == physical address 0.
*/
-- ---#ifndef __virt_to_phys
-- ---#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
++ +++#if defined(__virt_to_phys)
++ +++#define PHYS_OFFSET PLAT_PHYS_OFFSET
++ +++#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
++ +++
++ +++#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
++ +++
++ +++#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
#define __PV_BITS_31_24 0x81000000
#define __PV_BITS_7_0 0x81
-- ---extern u64 __pv_phys_offset;
++ +++extern unsigned long __pv_phys_pfn_offset;
extern u64 __pv_offset;
extern void fixup_pv_table(const void *, unsigned long);
extern const void *__pv_table_begin, *__pv_table_end;
-- ---#define PHYS_OFFSET __pv_phys_offset
++ +++#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
++ +++#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
++ +++
++ +++#define virt_to_pfn(kaddr) \
++ +++ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
++ +++ PHYS_PFN_OFFSET)
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
#else
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
++ +++#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
return x - PHYS_OFFSET + PAGE_OFFSET;
}
-- ---#endif
-- ---#endif
-#endif /* __ASSEMBLY__ */
++ +++#define virt_to_pfn(kaddr) \
++ +++ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
++ +++ PHYS_PFN_OFFSET)
-- -- /*
-- -- * PFNs are used to describe any physical page; this means
-- -- * PFN 0 == physical address 0.
-- -- *
-- -- * This is the PFN of the first RAM page in the kernel
-- -- * direct-mapped view. We assume this is the first page
-- -- * of RAM in the mem_map as well.
-- -- */
-- -- #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
-#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#endif
++ ++ #endif
-#ifndef __ASSEMBLY__
-
-/*
- * PFNs are used to describe any physical page; this means
- * PFN 0 == physical address 0.
- *
- * This is the PFN of the first RAM page in the kernel
- * direct-mapped view. We assume this is the first page
- * of RAM in the mem_map as well.
- */
-#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
-
/*
* These are *only* valid on the kernel direct mapped RAM memory.
* Note: Drivers should NOT use these. They are the wrong
*/
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
-- ---#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
++ +++#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
-- -- && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
++ +++ && pfn_valid(virt_to_pfn(kaddr)))
#endif
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
.macro pgtbl, rd, phys
- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
+ add \rd, \phys, #TEXT_OFFSET
+ sub \rd, \rd, #PG_DIR_SIZE
.endm
/*
and r3, r3, #0xf @ extract VMSA support
cmp r3, #5 @ long-descriptor translation table format?
THUMB( it lo ) @ force fixup-able long branch encoding
- ---- blo __error_p @ only classic page table format
+ ++++ blo __error_lpae @ only classic page table format
#endif
#ifndef CONFIG_XIP_KERNEL
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
- ldr r8, =PHYS_OFFSET @ always constant in this case
+ ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
-- --- add r6, r6, r3 @ adjust __pv_phys_offset address
++ +++ add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
add r7, r7, r3 @ adjust __pv_offset address
-- --- str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset
++ +++ mov r0, r8, lsr #12 @ convert to PFN
++ +++ str r0, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
1: .long .
.long __pv_table_begin
.long __pv_table_end
-- ---2: .long __pv_phys_offset
++ +++2: .long __pv_phys_pfn_offset
.long __pv_offset
.text
ENDPROC(fixup_pv_table)
.data
-- --- .globl __pv_phys_offset
-- --- .type __pv_phys_offset, %object
-- ---__pv_phys_offset:
-- --- .quad 0
-- --- .size __pv_phys_offset, . -__pv_phys_offset
++ +++ .globl __pv_phys_pfn_offset
++ +++ .type __pv_phys_pfn_offset, %object
++ +++__pv_phys_pfn_offset:
++ +++ .word 0
++ +++ .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
.globl __pv_offset
.type __pv_offset, %object
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL(elf_hwcap);
++ +++unsigned int elf_hwcap2 __read_mostly;
++ +++EXPORT_SYMBOL(elf_hwcap2);
++ +++
#ifdef MULTI_CPU
struct processor processor __read_mostly;
cacheid = CACHEID_VIVT;
}
- printk("CPU: %s data cache, %s instruction cache\n",
+ pr_info("CPU: %s data cache, %s instruction cache\n",
cache_is_vivt() ? "VIVT" :
cache_is_vipt_aliasing() ? "VIPT aliasing" :
cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
struct stack *stk = &stacks[cpu];
if (cpu >= NR_CPUS) {
- printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
+ pr_crit("CPU%u: bad primary CPU number\n", cpu);
BUG();
}
*/
set_my_cpu_offset(0);
- printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
+ pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
}
struct mpidr_hash mpidr_hash;
*/
list = lookup_processor_type(read_cpuid_id());
if (!list) {
- printk("CPU configuration botched (ID %08x), unable "
- "to continue.\n", read_cpuid_id());
+ pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
+ read_cpuid_id());
while (1);
}
cpu_cache = *list->cache;
#endif
- printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
- proc_arch[cpu_architecture()], cr_alignment);
+ pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
+ cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+ proc_arch[cpu_architecture()], cr_alignment);
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
list->arch_name, ENDIANNESS);
u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
- printk(KERN_CRIT "NR_BANKS too low, "
- "ignoring memory at 0x%08llx\n", (long long)start);
+ pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
+ (long long)start);
return -EINVAL;
}
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
if (aligned_start > ULONG_MAX) {
- printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
- "32-bit physical address space\n", (long long)start);
+ pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
+ (long long)start);
return -EINVAL;
}
if (aligned_start + size > ULONG_MAX) {
- printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
- "32-bit physical address space\n", (long long)start);
+ pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
+ (long long)start);
/*
* To ensure bank->start + bank->size is representable in
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
}
#endif
+ if (aligned_start < PHYS_OFFSET) {
+ if (aligned_start + size <= PHYS_OFFSET) {
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, aligned_start + size);
+ return -EINVAL;
+ }
+
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, (u64)PHYS_OFFSET);
+
+ size -= PHYS_OFFSET - aligned_start;
+ aligned_start = PHYS_OFFSET;
+ }
+
bank->start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
- --- res = memblock_virt_alloc_low(sizeof(*res), 0);
- res = alloc_bootmem_low(sizeof(*res));
+ ++++ res = memblock_virt_alloc(sizeof(*res), 0);
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
if (ret)
return;
- ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
+ ret = memblock_reserve(crash_base, crash_size);
if (ret < 0) {
- printk(KERN_WARNING "crashkernel reservation failed - "
- "memory is in use (0x%lx)\n", (unsigned long)crash_base);
+ pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
+ (unsigned long)crash_base);
return;
}
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(total_mem >> 20));
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crash_base >> 20),
+ (unsigned long)(total_mem >> 20));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
machine_desc = mdesc;
machine_name = mdesc->name;
- setup_dma_zone(mdesc);
-
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
+ setup_dma_zone(mdesc);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
NULL
};
++ +++static const char *hwcap2_str[] = {
++ +++ "aes",
++ +++ "pmull",
++ +++ "sha1",
++ +++ "sha2",
++ +++ "crc32",
++ +++ NULL
++ +++};
++ +++
static int c_show(struct seq_file *m, void *v)
{
int i, j;
if (elf_hwcap & (1 << j))
seq_printf(m, "%s ", hwcap_str[j]);
++ +++ for (j = 0; hwcap2_str[j]; j++)
++ +++ if (elf_hwcap2 & (1 << j))
++ +++ seq_printf(m, "%s ", hwcap2_str[j]);
++ +++
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
seq_printf(m, "CPU architecture: %s\n",
proc_arch[cpu_architecture()]);
#ifndef __ASM_ARCH_COLLIE_H
#define __ASM_ARCH_COLLIE_H
+ ++++#include "hardware.h" /* Gives GPIO_MAX */
+ ++++
extern void locomolcd_power(int on);
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
#define COLLIE_TC35143_GPIO_VERSION0 UCB_IO_0
#define COLLIE_TC35143_GPIO_TBL_CHK UCB_IO_1
#define COLLIE_TC35143_GPIO_VPEN_ON UCB_IO_2
----- #define COLLIE_TC35143_GPIO_IR_ON UCB_IO_3
+++++ #define COLLIE_GPIO_IR_ON (COLLIE_TC35143_GPIO_BASE + 3)
#define COLLIE_TC35143_GPIO_AMP_ON UCB_IO_4
#define COLLIE_TC35143_GPIO_VERSION1 UCB_IO_5
#define COLLIE_TC35143_GPIO_FS8KLPF UCB_IO_5
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
+#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+ ++++#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
+ ++++ .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
+ ++++ s2_policy(L_PTE_S2_MT_DEV_SHARED) |
+ ++++ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
- [MT_MEMORY] = {
+ [MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_RW] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
- [MT_MEMORY_NONCACHED] = {
+ [MT_MEMORY_RWX_NONCACHED] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
- [MT_MEMORY_DTCM] = {
+ [MT_MEMORY_RW_DTCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
- [MT_MEMORY_ITCM] = {
+ [MT_MEMORY_RWX_ITCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
- [MT_MEMORY_SO] = {
+ [MT_MEMORY_RW_SO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_UNCACHED | L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_DMA_READY] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
}
EXPORT_SYMBOL(get_mem_type);
+#define PTE_SET_FN(_name, pteop) \
+static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
+ void *data) \
+{ \
+ pte_t pte = pteop(*ptep); \
+\
+ set_pte_ext(ptep, pte, 0); \
+ return 0; \
+} \
+
+#define SET_MEMORY_FN(_name, callback) \
+int set_memory_##_name(unsigned long addr, int numpages) \
+{ \
+ unsigned long start = addr; \
+ unsigned long size = PAGE_SIZE*numpages; \
+ unsigned end = start + size; \
+\
+ if (start < MODULES_VADDR || start >= MODULES_END) \
+ return -EINVAL;\
+\
+ if (end < MODULES_VADDR || end >= MODULES_END) \
+ return -EINVAL; \
+\
+ apply_to_page_range(&init_mm, start, size, callback, NULL); \
+ flush_tlb_kernel_range(start, end); \
+ return 0;\
+}
+
+PTE_SET_FN(ro, pte_wrprotect)
+PTE_SET_FN(rw, pte_mkwrite)
+PTE_SET_FN(x, pte_mkexec)
+PTE_SET_FN(nx, pte_mknexec)
+
+SET_MEMORY_FN(ro, pte_set_ro)
+SET_MEMORY_FN(rw, pte_set_rw)
+SET_MEMORY_FN(x, pte_set_x)
+SET_MEMORY_FN(nx, pte_set_nx)
+
/*
* Adjust the PMD section entries according to the CPU in use.
*/
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+
+ /* Also setup NX memory mapping */
+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
}
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/*
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
- ---- hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ ++++ hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ ++++ s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
+ +++
++ +++ /*
++ +++ * We don't use domains on ARMv6 (since this causes problems with
++ +++ * v6/v7 kernels), so we must use a separate memory type for user
++ +++ * r/o, kernel r/w to map the vectors page.
++ +++ */
++ +++#ifndef CONFIG_ARM_LPAE
++ +++ if (cpu_arch == CPU_ARCH_ARMv6)
++ +++ vecs_pgprot |= L_PTE_MT_VECTORS;
++ +++#endif
+
/*
* ARMv6 and above have extended page tables.
*/
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
}
}
if (cpu_arch >= CPU_ARCH_ARMv6) {
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/* Non-cacheable Normal is XCB = 001 */
- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
PMD_SECT_BUFFERED;
} else {
/* For both ARMv6 and non-TEX-remapping ARMv7 */
- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
PMD_SECT_TEX(1);
}
} else {
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+ mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
}
#ifdef CONFIG_ARM_LPAE
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
+ mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
static void __init map_lowmem(void)
{
struct memblock_region *reg;
+ unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
if (start >= end)
break;
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY;
+ if (end < kernel_x_start || start >= kernel_x_end) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_RWX;
- create_mapping(&map);
+ create_mapping(&map);
+ } else {
+ /* This better cover the entire kernel */
+ if (start < kernel_x_start) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = kernel_x_start - start;
+ map.type = MT_MEMORY_RW;
+
+ create_mapping(&map);
+ }
+
+ map.pfn = __phys_to_pfn(kernel_x_start);
+ map.virtual = __phys_to_virt(kernel_x_start);
+ map.length = kernel_x_end - kernel_x_start;
+ map.type = MT_MEMORY_RWX;
+
+ create_mapping(&map);
+
+ if (kernel_x_end < end) {
+ map.pfn = __phys_to_pfn(kernel_x_end);
+ map.virtual = __phys_to_virt(kernel_x_end);
+ map.length = end - kernel_x_end;
+ map.type = MT_MEMORY_RW;
+
+ create_mapping(&map);
+ }
+ }
}
}
mov r10, #(1 << 0) @ Cache/TLB ops broadcasting
b 1f
__v7_ca7mp_setup:
++ +++__v7_ca12mp_setup:
__v7_ca15mp_setup:
mov r10, #0
1:
4: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
- ---- dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
+ ++++ dsb @ Complete invalidations
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
__v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
++ +++ /*
++ +++ * ARM Ltd. Cortex A12 processor.
++ +++ */
++ +++ .type __v7_ca12mp_proc_info, #object
++ +++__v7_ca12mp_proc_info:
++ +++ .long 0x410fc0d0
++ +++ .long 0xff0ffff0
++ +++ __v7_proc __v7_ca12mp_setup
++ +++ .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
++ +++
/*
* ARM Ltd. Cortex A15 processor.
*/