]>
Commit | Line | Data |
---|---|---|
9a8fd558 | 1 | /* |
9a8fd558 CZ |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
2d1c645c | 6 | * Copyright (C) 2001 - 2008 Tensilica Inc. |
38fef73c | 7 | * Copyright (C) 2015 Cadence Design Systems Inc. |
9a8fd558 CZ |
8 | */ |
9 | ||
10 | #ifndef _XTENSA_PROCESSOR_H | |
11 | #define _XTENSA_PROCESSOR_H | |
12 | ||
367b8112 | 13 | #include <variant/core.h> |
9a8fd558 | 14 | |
f6dc8c5b | 15 | #include <linux/compiler.h> |
9a8fd558 CZ |
16 | #include <asm/ptrace.h> |
17 | #include <asm/types.h> | |
173d6681 | 18 | #include <asm/regs.h> |
9a8fd558 CZ |
19 | |
20 | /* Assertions. */ | |
21 | ||
22 | #if (XCHAL_HAVE_WINDOWED != 1) | |
173d6681 | 23 | # error Linux requires the Xtensa Windowed Registers Option. |
9a8fd558 CZ |
24 | #endif |
25 | ||
0773495b MF |
26 | /* Xtensa ABI requires stack alignment to be at least 16 */ |
27 | ||
28 | #define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16) | |
29 | ||
30 | #define ARCH_SLAB_MINALIGN STACK_ALIGN | |
a81cbd2d | 31 | |
9a8fd558 CZ |
32 | /* |
33 | * User space process size: 1 GB. | |
34 | * Windowed call ABI requires caller and callee to be located within the same | |
35 | * 1 GB region. The C compiler places trampoline code on the stack for sources | |
36 | * that take the address of a nested C function (a feature used by glibc), so | |
37 | * the 1 GB requirement applies to the stack as well. | |
38 | */ | |
39 | ||
e5083a63 | 40 | #ifdef CONFIG_MMU |
24a9ab7f | 41 | #define TASK_SIZE __XTENSA_UL_CONST(0x40000000) |
e5083a63 | 42 | #else |
3de00482 | 43 | #define TASK_SIZE __XTENSA_UL_CONST(0xffffffff) |
e5083a63 JW |
44 | #endif |
45 | ||
922a70d3 DH |
46 | #define STACK_TOP TASK_SIZE |
47 | #define STACK_TOP_MAX STACK_TOP | |
9a8fd558 | 48 | |
38fef73c MF |
49 | /* |
50 | * General exception cause assigned to fake NMI. Fake NMI needs to be handled | |
51 | * differently from other interrupts, but it uses common kernel entry/exit | |
52 | * code. | |
53 | */ | |
54 | ||
55 | #define EXCCAUSE_MAPPED_NMI 62 | |
56 | ||
9a8fd558 CZ |
57 | /* |
58 | * General exception cause assigned to debug exceptions. Debug exceptions go | |
59 | * to their own vector, rather than the general exception vectors (user, | |
60 | * kernel, double); and their specific causes are reported via DEBUGCAUSE | |
61 | * rather than EXCCAUSE. However it is sometimes convenient to redirect debug | |
62 | * exceptions to the general exception mechanism. To do this, an otherwise | |
63 | * unused EXCCAUSE value was assigned to debug exceptions for this purpose. | |
64 | */ | |
65 | ||
66 | #define EXCCAUSE_MAPPED_DEBUG 63 | |
67 | ||
68 | /* | |
69 | * We use DEPC also as a flag to distinguish between double and regular | |
70 | * exceptions. For performance reasons, DEPC might contain the value of | |
71 | * EXCCAUSE for regular exceptions, so we use this definition to mark a | |
72 | * valid double exception address. | |
73 | * (Note: We use it in bgeui, so it should be 64, 128, or 256) | |
74 | */ | |
75 | ||
76 | #define VALID_DOUBLE_EXCEPTION_ADDRESS 64 | |
77 | ||
38fef73c MF |
78 | #define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno) |
79 | #define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL | |
80 | ||
81 | #define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level) | |
82 | #define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK) | |
83 | ||
e4629194 MF |
84 | #define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l) |
85 | #define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK) | |
38fef73c MF |
86 | |
87 | #define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT) | |
88 | ||
9a8fd558 CZ |
89 | /* LOCKLEVEL defines the interrupt level that masks all |
90 | * general-purpose interrupts. | |
91 | */ | |
e4629194 MF |
92 | #if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT) |
93 | #define LOCKLEVEL (PROFILING_INTLEVEL - 1) | |
38fef73c | 94 | #else |
2d1c645c | 95 | #define LOCKLEVEL XCHAL_EXCM_LEVEL |
38fef73c | 96 | #endif |
e4629194 | 97 | |
38fef73c MF |
98 | #define TOPLEVEL XCHAL_EXCM_LEVEL |
99 | #define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL) | |
9a8fd558 CZ |
100 | |
101 | /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE | |
102 | * registers | |
103 | */ | |
104 | #define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */ | |
105 | #define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */ | |
106 | ||
107 | #ifndef __ASSEMBLY__ | |
108 | ||
109 | /* Build a valid return address for the specified call winsize. | |
110 | * winsize must be 1 (call4), 2 (call8), or 3 (call12) | |
111 | */ | |
112 | #define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30) | |
113 | ||
114 | /* Convert return address to a valid pc | |
115 | * Note: We assume that the stack pointer is in the same 1GB ranges as the ra | |
116 | */ | |
117 | #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) | |
118 | ||
062b1c19 MF |
119 | /* Spill slot location for the register reg in the spill area under the stack |
120 | * pointer sp. reg must be in the range [0..4). | |
121 | */ | |
122 | #define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg))) | |
123 | ||
124 | /* Spill slot location for the register reg in the spill area under the stack | |
125 | * pointer sp for the call8. reg must be in the range [4..8). | |
126 | */ | |
127 | #define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg))) | |
128 | ||
129 | /* Spill slot location for the register reg in the spill area under the stack | |
130 | * pointer sp for the call12. reg must be in the range [4..12). | |
131 | */ | |
132 | #define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg))) | |
133 | ||
9a8fd558 | 134 | typedef struct { |
c4c4594b | 135 | unsigned long seg; |
9a8fd558 CZ |
136 | } mm_segment_t; |
137 | ||
138 | struct thread_struct { | |
139 | ||
140 | /* kernel's return address and stack pointer for context switching */ | |
141 | unsigned long ra; /* kernel's a0: return address and window call size */ | |
142 | unsigned long sp; /* kernel's a1: stack pointer */ | |
143 | ||
144 | mm_segment_t current_ds; /* see uaccess.h for example uses */ | |
145 | ||
146 | /* struct xtensa_cpuinfo info; */ | |
147 | ||
148 | unsigned long bad_vaddr; /* last user fault */ | |
149 | unsigned long bad_uaddr; /* last kernel fault accessing user space */ | |
150 | unsigned long error_code; | |
c91e02bd MF |
151 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
152 | struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK]; | |
153 | struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK]; | |
154 | #endif | |
9a8fd558 CZ |
155 | /* Make structure 16 bytes aligned. */ |
156 | int align[0] __attribute__ ((aligned(16))); | |
157 | }; | |
158 | ||
9a8fd558 CZ |
159 | /* This decides where the kernel will search for a free chunk of vm |
160 | * space during mmap's. | |
161 | */ | |
162 | #define TASK_UNMAPPED_BASE (TASK_SIZE / 2) | |
163 | ||
164 | #define INIT_THREAD \ | |
165 | { \ | |
166 | ra: 0, \ | |
167 | sp: sizeof(init_stack) + (long) &init_stack, \ | |
168 | current_ds: {0}, \ | |
169 | /*info: {0}, */ \ | |
170 | bad_vaddr: 0, \ | |
171 | bad_uaddr: 0, \ | |
172 | error_code: 0, \ | |
173 | } | |
174 | ||
175 | ||
176 | /* | |
177 | * Do necessary setup to start up a newly executed thread. | |
178 | * Note: We set-up ps as if we did a call4 to the new pc. | |
179 | * set_thread_state in signal.c depends on it. | |
180 | */ | |
173d6681 | 181 | #define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ |
c4c4594b CZ |
182 | (1 << PS_CALLINC_SHIFT) | \ |
183 | (USER_RING << PS_RING_SHIFT) | \ | |
184 | (1 << PS_UM_BIT) | \ | |
185 | (1 << PS_EXCM_BIT)) | |
9a8fd558 CZ |
186 | |
187 | /* Clearing a0 terminates the backtrace. */ | |
188 | #define start_thread(regs, new_pc, new_sp) \ | |
3306a726 | 189 | memset(regs, 0, sizeof(*regs)); \ |
9a8fd558 CZ |
190 | regs->pc = new_pc; \ |
191 | regs->ps = USER_PS_VALUE; \ | |
192 | regs->areg[1] = new_sp; \ | |
193 | regs->areg[0] = 0; \ | |
194 | regs->wmask = 1; \ | |
195 | regs->depc = 0; \ | |
196 | regs->windowbase = 0; \ | |
197 | regs->windowstart = 1; | |
198 | ||
199 | /* Forward declaration */ | |
200 | struct task_struct; | |
201 | struct mm_struct; | |
202 | ||
9a8fd558 CZ |
203 | /* Free all resources held by a thread. */ |
204 | #define release_thread(thread) do { } while(0) | |
205 | ||
9a8fd558 CZ |
206 | extern unsigned long get_wchan(struct task_struct *p); |
207 | ||
04fe6faf AV |
208 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) |
209 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1]) | |
9a8fd558 | 210 | |
f6dc8c5b | 211 | #define cpu_relax() barrier() |
9a8fd558 CZ |
212 | |
213 | /* Special register access. */ | |
214 | ||
215 | #define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v)); | |
216 | #define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v)); | |
217 | ||
218 | #define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) | |
219 | #define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) | |
220 | ||
26a8e96a MF |
221 | #ifndef XCHAL_HAVE_EXTERN_REGS |
222 | #define XCHAL_HAVE_EXTERN_REGS 0 | |
223 | #endif | |
224 | ||
225 | #if XCHAL_HAVE_EXTERN_REGS | |
226 | ||
227 | static inline void set_er(unsigned long value, unsigned long addr) | |
228 | { | |
229 | asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory"); | |
230 | } | |
231 | ||
232 | static inline unsigned long get_er(unsigned long addr) | |
233 | { | |
234 | register unsigned long value; | |
235 | asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory"); | |
236 | return value; | |
237 | } | |
238 | ||
239 | #endif /* XCHAL_HAVE_EXTERN_REGS */ | |
240 | ||
9a8fd558 CZ |
241 | #endif /* __ASSEMBLY__ */ |
242 | #endif /* _XTENSA_PROCESSOR_H */ |