2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 #ifndef _ASM_PROCESSOR_H
12 #define _ASM_PROCESSOR_H
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/sizes.h>
17 #include <linux/threads.h>
19 #include <asm/cachectl.h>
21 #include <asm/cpu-info.h>
22 #include <asm/dsemul.h>
23 #include <asm/mipsregs.h>
24 #include <asm/prefetch.h>
25 #include <asm/vdso/processor.h>
28 * System setup and hardware flags..
31 extern unsigned int vced_count, vcei_count;
32 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
35 #ifdef CONFIG_KVM_GUEST
36 /* User space process size is limited to 1GB in KVM Guest Mode */
37 #define TASK_SIZE 0x3fff8000UL
40 * User space process size: 2GB. This is hardcoded into a few places,
41 * so don't change it unless you know what you are doing.
43 #define TASK_SIZE 0x80000000UL
46 #define STACK_TOP_MAX TASK_SIZE
48 #define TASK_IS_32BIT_ADDR 1
54 * User space process size: 1TB. This is hardcoded into a few places,
55 * so don't change it unless you know what you are doing. TASK_SIZE
56 * is limited to 1TB by the R4000 architecture; R10000 and better can
57 * support 16TB; the architectural reserve for future expansion is
60 #define TASK_SIZE32 0x7fff8000UL
61 #ifdef CONFIG_MIPS_VA_BITS_48
62 #define TASK_SIZE64 (0x1UL << ((cpu_data[0].vmbits>48)?48:cpu_data[0].vmbits))
64 #define TASK_SIZE64 0x10000000000UL
66 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
67 #define STACK_TOP_MAX TASK_SIZE64
69 #define TASK_SIZE_OF(tsk) \
70 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
72 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
76 #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
78 extern unsigned long mips_stack_top(void);
79 #define STACK_TOP mips_stack_top()
82 * This decides where the kernel will search for a free chunk of vm
83 * space during mmap's.
85 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
88 #define NUM_FPU_REGS 32
90 #ifdef CONFIG_CPU_HAS_MSA
91 # define FPU_REG_WIDTH 128
93 # define FPU_REG_WIDTH 64
97 __u32 val32[FPU_REG_WIDTH / 32];
98 __u64 val64[FPU_REG_WIDTH / 64];
101 #ifdef CONFIG_CPU_LITTLE_ENDIAN
102 # define FPR_IDX(width, idx) (idx)
104 # define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
107 #define BUILD_FPR_ACCESS(width) \
108 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
110 return fpr->val##width[FPR_IDX(width, idx)]; \
113 static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
116 fpr->val##width[FPR_IDX(width, idx)] = val; \
123 * It would be nice to add some more fields for emulator statistics,
124 * the additional information is private to the FPU emulator for now.
125 * See arch/mips/include/asm/fpu_emulator.h.
128 struct mips_fpu_struct {
129 union fpureg fpr[NUM_FPU_REGS];
134 #define NUM_DSP_REGS 6
136 typedef unsigned long dspreg_t;
138 struct mips_dsp_state {
139 dspreg_t dspr[NUM_DSP_REGS];
140 unsigned int dspcontrol;
143 #define INIT_CPUMASK { \
147 struct mips3264_watch_reg_state {
148 /* The width of watchlo is 32 in a 32 bit kernel and 64 in a
149 64 bit kernel. We use unsigned long as it has the same
151 unsigned long watchlo[NUM_WATCH_REGS];
152 /* Only the mask and IRW bits from watchhi. */
153 u16 watchhi[NUM_WATCH_REGS];
156 union mips_watch_reg_state {
157 struct mips3264_watch_reg_state mips3264;
160 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
162 struct octeon_cop2_state {
163 /* DMFC2 rt, 0x0201 */
164 unsigned long cop2_crc_iv;
165 /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
166 unsigned long cop2_crc_length;
167 /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
168 unsigned long cop2_crc_poly;
169 /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
170 unsigned long cop2_llm_dat[2];
171 /* DMFC2 rt, 0x0084 */
172 unsigned long cop2_3des_iv;
173 /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
174 unsigned long cop2_3des_key[3];
175 /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
176 unsigned long cop2_3des_result;
177 /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
178 unsigned long cop2_aes_inp0;
179 /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
180 unsigned long cop2_aes_iv[2];
181 /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
183 unsigned long cop2_aes_key[4];
184 /* DMFC2 rt, 0x0110 */
185 unsigned long cop2_aes_keylen;
186 /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
187 unsigned long cop2_aes_result[2];
188 /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
189 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
190 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
191 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
192 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
193 unsigned long cop2_hsh_datw[15];
194 /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
195 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
196 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
197 unsigned long cop2_hsh_ivw[8];
198 /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
199 unsigned long cop2_gfm_mult[2];
200 /* DMFC2 rt, 0x025E - Pass2 */
201 unsigned long cop2_gfm_poly;
202 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
203 unsigned long cop2_gfm_result[2];
204 /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
205 unsigned long cop2_sha3[2];
210 struct octeon_cvmseg_state {
211 unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE]
212 [cpu_dcache_line_size() / sizeof(unsigned long)];
215 #elif defined(CONFIG_CPU_XLP)
216 struct nlm_cop2_state {
224 .cp2 = {{0}, {0}, 0, 0},
233 #ifdef CONFIG_CPU_HAS_MSA
234 # define ARCH_MIN_TASKALIGN 16
235 # define FPU_ALIGN __aligned(16)
237 # define ARCH_MIN_TASKALIGN 8
244 * If you change thread_struct remember to change the #defines below too!
246 struct thread_struct {
247 /* Saved main processor registers. */
249 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
250 unsigned long reg29, reg30, reg31;
252 /* Saved cp0 stuff. */
253 unsigned long cp0_status;
255 #ifdef CONFIG_MIPS_FP_SUPPORT
256 /* Saved fpu/fpu emulator stuff. */
257 struct mips_fpu_struct fpu FPU_ALIGN;
258 /* Assigned branch delay slot 'emulation' frame */
259 atomic_t bd_emu_frame;
260 /* PC of the branch from a branch delay slot 'emulation' */
261 unsigned long bd_emu_branch_pc;
262 /* PC to continue from following a branch delay slot 'emulation' */
263 unsigned long bd_emu_cont_pc;
265 #ifdef CONFIG_MIPS_MT_FPAFF
266 /* Emulated instruction count */
267 unsigned long emulated_fp;
268 /* Saved per-thread scheduler affinity mask */
269 cpumask_t user_cpus_allowed;
270 #endif /* CONFIG_MIPS_MT_FPAFF */
272 /* Saved state of the DSP ASE, if available. */
273 struct mips_dsp_state dsp;
275 /* Saved watch register state, if available. */
276 union mips_watch_reg_state watch;
278 /* Other stuff associated with the thread. */
279 unsigned long cp0_badvaddr; /* Last user fault */
280 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
281 unsigned long error_code;
282 unsigned long trap_nr;
283 #ifdef CONFIG_CPU_CAVIUM_OCTEON
284 struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
285 struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
287 #ifdef CONFIG_CPU_XLP
288 struct nlm_cop2_state cp2;
290 struct mips_abi *abi;
293 #ifdef CONFIG_MIPS_MT_FPAFF
296 .user_cpus_allowed = INIT_CPUMASK,
299 #endif /* CONFIG_MIPS_MT_FPAFF */
301 #ifdef CONFIG_MIPS_FP_SUPPORT
308 /* Delay slot emulation */ \
309 .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \
310 .bd_emu_branch_pc = 0, \
316 #define INIT_THREAD { \
318 * Saved main processor registers \
336 * Saved FPU/FPU emulator stuff \
340 * FPU affinity state (null if not FPAFF) \
351 * saved watch register stuff \
353 .watch = {{{0,},},}, \
355 * Other stuff associated with the process \
362 * Platform specific cop2 registers(null if no COP2) \
369 /* Free all resources held by a thread. */
370 #define release_thread(thread) do { } while(0)
373 * Do necessary setup to start up a newly executed thread.
375 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
377 static inline void flush_thread(void)
381 unsigned long get_wchan(struct task_struct *p);
383 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
384 THREAD_SIZE - 32 - sizeof(struct pt_regs))
385 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
386 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
387 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
388 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
391 * Return_address is a replacement for __builtin_return_address(count)
392 * which on certain architectures cannot reasonably be implemented in GCC
393 * (MIPS, Alpha) or is unusable with -fomit-frame-pointer (i386).
394 * Note that __builtin_return_address(x>=1) is forbidden because GCC
395 * aborts compilation on some CPUs. It's simply not possible to unwind
396 * some CPU's stackframes.
398 * __builtin_return_address works only for non-leaf functions. We avoid the
399 * overhead of a function call by forcing the compiler to save the return
400 * address register on the stack.
402 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
404 #ifdef CONFIG_CPU_HAS_PREFETCH
406 #define ARCH_HAS_PREFETCH
407 #define prefetch(x) __builtin_prefetch((x), 0, 1)
409 #define ARCH_HAS_PREFETCHW
410 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
415 * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
416 * to the prctl syscall.
418 extern int mips_get_process_fp_mode(struct task_struct *task);
419 extern int mips_set_process_fp_mode(struct task_struct *task,
422 #define GET_FP_MODE(task) mips_get_process_fp_mode(task)
423 #define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
425 #endif /* _ASM_PROCESSOR_H */