]>
Commit | Line | Data |
---|---|---|
8531a35e KK |
1 | /* |
2 | * This program is free software; you can redistribute it and/or | |
3 | * modify it under the terms of the GNU General Public License | |
4 | * as published by the Free Software Foundation; either version 2 | |
5 | * of the License, or (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2004 Mips Technologies, Inc | |
17 | * Copyright (C) 2008 Kevin D. Kissell | |
18 | */ | |
41c594ab | 19 | |
ea580401 | 20 | #include <linux/clockchips.h> |
41c594ab RB |
21 | #include <linux/kernel.h> |
22 | #include <linux/sched.h> | |
631330f5 | 23 | #include <linux/smp.h> |
41c594ab RB |
24 | #include <linux/cpumask.h> |
25 | #include <linux/interrupt.h> | |
ae036b79 | 26 | #include <linux/kernel_stat.h> |
ec43c014 | 27 | #include <linux/module.h> |
8f99a162 | 28 | #include <linux/ftrace.h> |
5a0e3ad6 | 29 | #include <linux/slab.h> |
41c594ab RB |
30 | |
31 | #include <asm/cpu.h> | |
32 | #include <asm/processor.h> | |
60063497 | 33 | #include <linux/atomic.h> |
41c594ab RB |
34 | #include <asm/hardirq.h> |
35 | #include <asm/hazards.h> | |
3b1d4ed5 | 36 | #include <asm/irq.h> |
bdc92d74 | 37 | #include <asm/idle.h> |
41c594ab | 38 | #include <asm/mmu_context.h> |
41c594ab RB |
39 | #include <asm/mipsregs.h> |
40 | #include <asm/cacheflush.h> | |
41 | #include <asm/time.h> | |
42 | #include <asm/addrspace.h> | |
43 | #include <asm/smtc.h> | |
41c594ab | 44 | #include <asm/smtc_proc.h> |
df1cc3da | 45 | #include <asm/setup.h> |
41c594ab RB |
46 | |
47 | /* | |
1146fe30 RB |
48 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask |
49 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | |
50 | * in this table. | |
41c594ab | 51 | */ |
1146fe30 | 52 | unsigned long irq_hwmask[NR_IRQS]; |
41c594ab | 53 | |
41c594ab RB |
54 | #define LOCK_MT_PRA() \ |
55 | local_irq_save(flags); \ | |
56 | mtflags = dmt() | |
57 | ||
58 | #define UNLOCK_MT_PRA() \ | |
59 | emt(mtflags); \ | |
60 | local_irq_restore(flags) | |
61 | ||
62 | #define LOCK_CORE_PRA() \ | |
63 | local_irq_save(flags); \ | |
64 | mtflags = dvpe() | |
65 | ||
66 | #define UNLOCK_CORE_PRA() \ | |
67 | evpe(mtflags); \ | |
68 | local_irq_restore(flags) | |
69 | ||
70 | /* | |
71 | * Data structures purely associated with SMTC parallelism | |
72 | */ | |
73 | ||
74 | ||
75 | /* | |
76 | * Table for tracking ASIDs whose lifetime is prolonged. | |
77 | */ | |
78 | ||
79 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | |
80 | ||
41c594ab | 81 | /* |
603e82ed | 82 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
41c594ab RB |
83 | */ |
84 | ||
85 | #define IPIBUF_PER_CPU 4 | |
86 | ||
d2bb01b0 | 87 | struct smtc_ipi_q IPIQ[NR_CPUS]; |
5868756d | 88 | static struct smtc_ipi_q freeIPIq; |
41c594ab RB |
89 | |
90 | ||
889a4c7b SH |
91 | /* |
92 | * Number of FPU contexts for each VPE | |
93 | */ | |
94 | ||
95 | static int smtc_nconf1[MAX_SMTC_VPES]; | |
96 | ||
97 | ||
41c594ab RB |
98 | /* Forward declarations */ |
99 | ||
937a8015 | 100 | void ipi_decode(struct smtc_ipi *); |
5868756d | 101 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); |
20bb25d1 | 102 | static void setup_cross_vpe_interrupts(unsigned int nvpe); |
41c594ab RB |
103 | void init_smtc_stats(void); |
104 | ||
105 | /* Global SMTC Status */ | |
106 | ||
982f6ffe | 107 | unsigned int smtc_status; |
41c594ab RB |
108 | |
109 | /* Boot command line configuration overrides */ | |
110 | ||
be5f1f21 | 111 | static int vpe0limit; |
982f6ffe RB |
112 | static int ipibuffers; |
113 | static int nostlb; | |
114 | static int asidmask; | |
48c4ac97 | 115 | unsigned long smtc_asid_mask = 0xff; |
41c594ab | 116 | |
be5f1f21 KK |
117 | static int __init vpe0tcs(char *str) |
118 | { | |
119 | get_option(&str, &vpe0limit); | |
120 | ||
121 | return 1; | |
122 | } | |
123 | ||
41c594ab RB |
124 | static int __init ipibufs(char *str) |
125 | { | |
126 | get_option(&str, &ipibuffers); | |
127 | return 1; | |
128 | } | |
129 | ||
130 | static int __init stlb_disable(char *s) | |
131 | { | |
132 | nostlb = 1; | |
133 | return 1; | |
134 | } | |
135 | ||
136 | static int __init asidmask_set(char *str) | |
137 | { | |
138 | get_option(&str, &asidmask); | |
4bf42d42 | 139 | switch (asidmask) { |
41c594ab RB |
140 | case 0x1: |
141 | case 0x3: | |
142 | case 0x7: | |
143 | case 0xf: | |
144 | case 0x1f: | |
145 | case 0x3f: | |
146 | case 0x7f: | |
147 | case 0xff: | |
148 | smtc_asid_mask = (unsigned long)asidmask; | |
149 | break; | |
150 | default: | |
151 | printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); | |
152 | } | |
153 | return 1; | |
154 | } | |
155 | ||
be5f1f21 | 156 | __setup("vpe0tcs=", vpe0tcs); |
41c594ab RB |
157 | __setup("ipibufs=", ipibufs); |
158 | __setup("nostlb", stlb_disable); | |
159 | __setup("asidmask=", asidmask_set); | |
160 | ||
c68644d3 | 161 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
41c594ab | 162 | |
982f6ffe | 163 | static int hang_trig; |
41c594ab RB |
164 | |
165 | static int __init hangtrig_enable(char *s) | |
166 | { | |
167 | hang_trig = 1; | |
168 | return 1; | |
169 | } | |
170 | ||
171 | ||
172 | __setup("hangtrig", hangtrig_enable); | |
173 | ||
174 | #define DEFAULT_BLOCKED_IPI_LIMIT 32 | |
175 | ||
176 | static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; | |
177 | ||
178 | static int __init tintq(char *str) | |
179 | { | |
180 | get_option(&str, &timerq_limit); | |
181 | return 1; | |
182 | } | |
183 | ||
184 | __setup("tintq=", tintq); | |
185 | ||
889a4c7b | 186 | static int imstuckcount[MAX_SMTC_VPES][8]; |
41c594ab | 187 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ |
889a4c7b | 188 | static int vpemask[MAX_SMTC_VPES][8] = { |
20bb25d1 RB |
189 | {0, 0, 1, 0, 0, 0, 0, 1}, |
190 | {0, 0, 0, 0, 0, 0, 0, 1} | |
191 | }; | |
41c594ab | 192 | int tcnoprog[NR_CPUS]; |
52553664 | 193 | static atomic_t idle_hook_initialized = ATOMIC_INIT(0); |
41c594ab RB |
194 | static int clock_hang_reported[NR_CPUS]; |
195 | ||
c68644d3 | 196 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
41c594ab | 197 | |
41c594ab RB |
198 | /* |
199 | * Configure shared TLB - VPC configuration bit must be set by caller | |
200 | */ | |
201 | ||
5868756d | 202 | static void smtc_configure_tlb(void) |
41c594ab | 203 | { |
21a151d8 | 204 | int i, tlbsiz, vpes; |
41c594ab RB |
205 | unsigned long mvpconf0; |
206 | unsigned long config1val; | |
207 | ||
208 | /* Set up ASID preservation table */ | |
209 | for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { | |
210 | for(i = 0; i < MAX_SMTC_ASIDS; i++) { | |
211 | smtc_live_asid[vpes][i] = 0; | |
212 | } | |
213 | } | |
214 | mvpconf0 = read_c0_mvpconf0(); | |
215 | ||
216 | if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) | |
217 | >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { | |
218 | /* If we have multiple VPEs, try to share the TLB */ | |
219 | if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { | |
220 | /* | |
221 | * If TLB sizing is programmable, shared TLB | |
222 | * size is the total available complement. | |
223 | * Otherwise, we have to take the sum of all | |
224 | * static VPE TLB entries. | |
225 | */ | |
226 | if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) | |
227 | >> MVPCONF0_PTLBE_SHIFT)) == 0) { | |
228 | /* | |
229 | * If there's more than one VPE, there had better | |
230 | * be more than one TC, because we need one to bind | |
231 | * to each VPE in turn to be able to read | |
232 | * its configuration state! | |
233 | */ | |
234 | settc(1); | |
235 | /* Stop the TC from doing anything foolish */ | |
236 | write_tc_c0_tchalt(TCHALT_H); | |
237 | mips_ihb(); | |
238 | /* No need to un-Halt - that happens later anyway */ | |
239 | for (i=0; i < vpes; i++) { | |
70342287 | 240 | write_tc_c0_tcbind(i); |
41c594ab RB |
241 | /* |
242 | * To be 100% sure we're really getting the right | |
243 | * information, we exit the configuration state | |
244 | * and do an IHB after each rebinding. | |
245 | */ | |
246 | write_c0_mvpcontrol( | |
247 | read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | |
248 | mips_ihb(); | |
249 | /* | |
250 | * Only count if the MMU Type indicated is TLB | |
251 | */ | |
4bf42d42 | 252 | if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { |
41c594ab RB |
253 | config1val = read_vpe_c0_config1(); |
254 | tlbsiz += ((config1val >> 25) & 0x3f) + 1; | |
255 | } | |
256 | ||
257 | /* Put core back in configuration state */ | |
258 | write_c0_mvpcontrol( | |
259 | read_c0_mvpcontrol() | MVPCONTROL_VPC ); | |
260 | mips_ihb(); | |
261 | } | |
262 | } | |
263 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); | |
c80697b3 | 264 | ehb(); |
41c594ab RB |
265 | |
266 | /* | |
267 | * Setup kernel data structures to use software total, | |
268 | * rather than read the per-VPE Config1 value. The values | |
269 | * for "CPU 0" gets copied to all the other CPUs as part | |
270 | * of their initialization in smtc_cpu_setup(). | |
271 | */ | |
272 | ||
a0b62180 RB |
273 | /* MIPS32 limits TLB indices to 64 */ |
274 | if (tlbsiz > 64) | |
275 | tlbsiz = 64; | |
276 | cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; | |
41c594ab | 277 | smtc_status |= SMTC_TLB_SHARED; |
a0b62180 | 278 | local_flush_tlb_all(); |
41c594ab RB |
279 | |
280 | printk("TLB of %d entry pairs shared by %d VPEs\n", | |
281 | tlbsiz, vpes); | |
282 | } else { | |
283 | printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); | |
284 | } | |
285 | } | |
286 | } | |
287 | ||
288 | ||
289 | /* | |
290 | * Incrementally build the CPU map out of constituent MIPS MT cores, | |
70342287 | 291 | * using the specified available VPEs and TCs. Plaform code needs |
41c594ab RB |
292 | * to ensure that each MIPS MT core invokes this routine on reset, |
293 | * one at a time(!). | |
294 | * | |
295 | * This version of the build_cpu_map and prepare_cpus routines assumes | |
296 | * that *all* TCs of a MIPS MT core will be used for Linux, and that | |
297 | * they will be spread across *all* available VPEs (to minimise the | |
298 | * loss of efficiency due to exception service serialization). | |
299 | * An improved version would pick up configuration information and | |
300 | * possibly leave some TCs/VPEs as "slave" processors. | |
301 | * | |
302 | * Use c0_MVPConf0 to find out how many TCs are available, setting up | |
0b5f9c00 | 303 | * cpu_possible_mask and the logical/physical mappings. |
41c594ab RB |
304 | */ |
305 | ||
8531a35e | 306 | int __init smtc_build_cpu_map(int start_cpu_slot) |
41c594ab RB |
307 | { |
308 | int i, ntcs; | |
309 | ||
310 | /* | |
311 | * The CPU map isn't actually used for anything at this point, | |
312 | * so it's not clear what else we should do apart from set | |
313 | * everything up so that "logical" = "physical". | |
314 | */ | |
315 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | |
316 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { | |
4037ac6e | 317 | set_cpu_possible(i, true); |
41c594ab RB |
318 | __cpu_number_map[i] = i; |
319 | __cpu_logical_map[i] = i; | |
320 | } | |
ea580401 | 321 | #ifdef CONFIG_MIPS_MT_FPAFF |
41c594ab RB |
322 | /* Initialize map of CPUs with FPUs */ |
323 | cpus_clear(mt_fpu_cpumask); | |
ea580401 | 324 | #endif |
41c594ab RB |
325 | |
326 | /* One of those TC's is the one booting, and not a secondary... */ | |
327 | printk("%i available secondary CPU TC(s)\n", i - 1); | |
328 | ||
329 | return i; | |
330 | } | |
331 | ||
332 | /* | |
333 | * Common setup before any secondaries are started | |
c7b2ec21 | 334 | * Make sure all CPUs are in a sensible state before we boot any of the |
41c594ab RB |
335 | * secondaries. |
336 | * | |
337 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly | |
338 | * as possible across the available VPEs. | |
339 | */ | |
340 | ||
341 | static void smtc_tc_setup(int vpe, int tc, int cpu) | |
342 | { | |
889a4c7b SH |
343 | static int cp1contexts[MAX_SMTC_VPES]; |
344 | ||
345 | /* | |
346 | * Make a local copy of the available FPU contexts in order | |
347 | * to keep track of TCs that can have one. | |
348 | */ | |
349 | if (tc == 1) | |
350 | { | |
351 | /* | |
352 | * FIXME: Multi-core SMTC hasn't been tested and the | |
70342287 | 353 | * maximum number of VPEs may change. |
889a4c7b SH |
354 | */ |
355 | cp1contexts[0] = smtc_nconf1[0] - 1; | |
356 | cp1contexts[1] = smtc_nconf1[1]; | |
357 | } | |
358 | ||
41c594ab RB |
359 | settc(tc); |
360 | write_tc_c0_tchalt(TCHALT_H); | |
361 | mips_ihb(); | |
362 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | |
363 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | |
364 | | TCSTATUS_A); | |
8531a35e KK |
365 | /* |
366 | * TCContext gets an offset from the base of the IPIQ array | |
367 | * to be used in low-level code to detect the presence of | |
c7b2ec21 | 368 | * an active IPI queue. |
8531a35e KK |
369 | */ |
370 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | |
889a4c7b SH |
371 | |
372 | /* Bind TC to VPE. */ | |
41c594ab | 373 | write_tc_c0_tcbind(vpe); |
889a4c7b | 374 | |
c7b2ec21 | 375 | /* In general, all TCs should have the same cpu_data indications. */ |
41c594ab | 376 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); |
889a4c7b SH |
377 | |
378 | /* Check to see if there is a FPU context available for this TC. */ | |
379 | if (!cp1contexts[vpe]) | |
41c594ab | 380 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
889a4c7b SH |
381 | else |
382 | cp1contexts[vpe]--; | |
383 | ||
384 | /* Store the TC and VPE into the cpu_data structure. */ | |
41c594ab RB |
385 | cpu_data[cpu].vpe_id = vpe; |
386 | cpu_data[cpu].tc_id = tc; | |
889a4c7b SH |
387 | |
388 | /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */ | |
8531a35e | 389 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; |
41c594ab RB |
390 | } |
391 | ||
8531a35e | 392 | /* |
889a4c7b | 393 | * Tweak to get Count registers synced as closely as possible. The |
c7b2ec21 | 394 | * value seems good for 34K-class cores. |
8531a35e KK |
395 | */ |
396 | ||
397 | #define CP0_SKEW 8 | |
41c594ab | 398 | |
8531a35e | 399 | void smtc_prepare_cpus(int cpus) |
41c594ab | 400 | { |
be5f1f21 | 401 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; |
41c594ab RB |
402 | unsigned long flags; |
403 | unsigned long val; | |
404 | int nipi; | |
405 | struct smtc_ipi *pipi; | |
406 | ||
407 | /* disable interrupts so we can disable MT */ | |
408 | local_irq_save(flags); | |
409 | /* disable MT so we can configure */ | |
410 | dvpe(); | |
411 | dmt(); | |
412 | ||
34af946a | 413 | spin_lock_init(&freeIPIq.lock); |
41c594ab RB |
414 | |
415 | /* | |
416 | * We probably don't have as many VPEs as we do SMP "CPUs", | |
417 | * but it's possible - and in any case we'll never use more! | |
418 | */ | |
419 | for (i=0; i<NR_CPUS; i++) { | |
420 | IPIQ[i].head = IPIQ[i].tail = NULL; | |
34af946a | 421 | spin_lock_init(&IPIQ[i].lock); |
41c594ab | 422 | IPIQ[i].depth = 0; |
2e41f91d | 423 | IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ |
41c594ab RB |
424 | } |
425 | ||
426 | /* cpu_data index starts at zero */ | |
427 | cpu = 0; | |
428 | cpu_data[cpu].vpe_id = 0; | |
429 | cpu_data[cpu].tc_id = 0; | |
8531a35e | 430 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; |
41c594ab RB |
431 | cpu++; |
432 | ||
433 | /* Report on boot-time options */ | |
49a89efb | 434 | mips_mt_set_cpuoptions(); |
41c594ab RB |
435 | if (vpelimit > 0) |
436 | printk("Limit of %d VPEs set\n", vpelimit); | |
437 | if (tclimit > 0) | |
438 | printk("Limit of %d TCs set\n", tclimit); | |
439 | if (nostlb) { | |
440 | printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); | |
441 | } | |
442 | if (asidmask) | |
443 | printk("ASID mask value override to 0x%x\n", asidmask); | |
444 | ||
445 | /* Temporary */ | |
c68644d3 | 446 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
41c594ab RB |
447 | if (hang_trig) |
448 | printk("Logic Analyser Trigger on suspected TC hang\n"); | |
c68644d3 | 449 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
41c594ab RB |
450 | |
451 | /* Put MVPE's into 'configuration state' */ | |
452 | write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); | |
453 | ||
454 | val = read_c0_mvpconf0(); | |
455 | nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | |
456 | if (vpelimit > 0 && nvpe > vpelimit) | |
457 | nvpe = vpelimit; | |
458 | ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | |
459 | if (ntc > NR_CPUS) | |
460 | ntc = NR_CPUS; | |
461 | if (tclimit > 0 && ntc > tclimit) | |
462 | ntc = tclimit; | |
be5f1f21 KK |
463 | slop = ntc % nvpe; |
464 | for (i = 0; i < nvpe; i++) { | |
465 | tcpervpe[i] = ntc / nvpe; | |
466 | if (slop) { | |
467 | if((slop - i) > 0) tcpervpe[i]++; | |
468 | } | |
469 | } | |
470 | /* Handle command line override for VPE0 */ | |
471 | if (vpe0limit > ntc) vpe0limit = ntc; | |
472 | if (vpe0limit > 0) { | |
473 | int slopslop; | |
474 | if (vpe0limit < tcpervpe[0]) { | |
475 | /* Reducing TC count - distribute to others */ | |
476 | slop = tcpervpe[0] - vpe0limit; | |
477 | slopslop = slop % (nvpe - 1); | |
478 | tcpervpe[0] = vpe0limit; | |
479 | for (i = 1; i < nvpe; i++) { | |
480 | tcpervpe[i] += slop / (nvpe - 1); | |
481 | if(slopslop && ((slopslop - (i - 1) > 0))) | |
482 | tcpervpe[i]++; | |
483 | } | |
484 | } else if (vpe0limit > tcpervpe[0]) { | |
485 | /* Increasing TC count - steal from others */ | |
486 | slop = vpe0limit - tcpervpe[0]; | |
487 | slopslop = slop % (nvpe - 1); | |
488 | tcpervpe[0] = vpe0limit; | |
489 | for (i = 1; i < nvpe; i++) { | |
490 | tcpervpe[i] -= slop / (nvpe - 1); | |
491 | if(slopslop && ((slopslop - (i - 1) > 0))) | |
492 | tcpervpe[i]--; | |
493 | } | |
494 | } | |
495 | } | |
41c594ab RB |
496 | |
497 | /* Set up shared TLB */ | |
498 | smtc_configure_tlb(); | |
499 | ||
500 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { | |
889a4c7b SH |
501 | /* Get number of CP1 contexts for each VPE. */ |
502 | if (tc == 0) | |
503 | { | |
504 | /* | |
505 | * Do not call settc() for TC0 or the FPU context | |
506 | * value will be incorrect. Besides, we know that | |
507 | * we are TC0 anyway. | |
508 | */ | |
509 | smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & | |
510 | VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); | |
511 | if (nvpe == 2) | |
512 | { | |
513 | settc(1); | |
514 | smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & | |
515 | VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); | |
516 | settc(0); | |
517 | } | |
518 | } | |
d8e5f9fe KM |
519 | if (tcpervpe[vpe] == 0) |
520 | continue; | |
41c594ab RB |
521 | if (vpe != 0) |
522 | printk(", "); | |
523 | printk("VPE %d: TC", vpe); | |
be5f1f21 | 524 | for (i = 0; i < tcpervpe[vpe]; i++) { |
41c594ab RB |
525 | /* |
526 | * TC 0 is bound to VPE 0 at reset, | |
527 | * and is presumably executing this | |
528 | * code. Leave it alone! | |
529 | */ | |
530 | if (tc != 0) { | |
21a151d8 | 531 | smtc_tc_setup(vpe, tc, cpu); |
889a4c7b SH |
532 | if (vpe != 0) { |
533 | /* | |
534 | * Set MVP bit (possibly again). Do it | |
535 | * here to catch CPUs that have no TCs | |
536 | * bound to the VPE at reset. In that | |
537 | * case, a TC must be bound to the VPE | |
538 | * before we can set VPEControl[MVP] | |
539 | */ | |
540 | write_vpe_c0_vpeconf0( | |
541 | read_vpe_c0_vpeconf0() | | |
542 | VPECONF0_MVP); | |
543 | } | |
41c594ab RB |
544 | cpu++; |
545 | } | |
546 | printk(" %d", tc); | |
547 | tc++; | |
548 | } | |
41c594ab | 549 | if (vpe != 0) { |
d8e5f9fe KM |
550 | /* |
551 | * Allow this VPE to control others. | |
552 | */ | |
553 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | | |
554 | VPECONF0_MVP); | |
555 | ||
41c594ab RB |
556 | /* |
557 | * Clear any stale software interrupts from VPE's Cause | |
558 | */ | |
559 | write_vpe_c0_cause(0); | |
560 | ||
561 | /* | |
562 | * Clear ERL/EXL of VPEs other than 0 | |
563 | * and set restricted interrupt enable/mask. | |
564 | */ | |
565 | write_vpe_c0_status((read_vpe_c0_status() | |
566 | & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) | |
567 | | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 | |
568 | | ST0_IE)); | |
569 | /* | |
570 | * set config to be the same as vpe0, | |
571 | * particularly kseg0 coherency alg | |
572 | */ | |
573 | write_vpe_c0_config(read_c0_config()); | |
574 | /* Clear any pending timer interrupt */ | |
575 | write_vpe_c0_compare(0); | |
576 | /* Propagate Config7 */ | |
577 | write_vpe_c0_config7(read_c0_config7()); | |
8531a35e KK |
578 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); |
579 | ehb(); | |
41c594ab RB |
580 | } |
581 | /* enable multi-threading within VPE */ | |
582 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | |
583 | /* enable the VPE */ | |
584 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | |
585 | } | |
586 | ||
587 | /* | |
588 | * Pull any physically present but unused TCs out of circulation. | |
589 | */ | |
590 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { | |
4037ac6e RR |
591 | set_cpu_possible(tc, false); |
592 | set_cpu_present(tc, false); | |
41c594ab RB |
593 | tc++; |
594 | } | |
595 | ||
596 | /* release config state */ | |
597 | write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | |
598 | ||
599 | printk("\n"); | |
600 | ||
601 | /* Set up coprocessor affinity CPU mask(s) */ | |
602 | ||
ea580401 | 603 | #ifdef CONFIG_MIPS_MT_FPAFF |
41c594ab | 604 | for (tc = 0; tc < ntc; tc++) { |
4bf42d42 | 605 | if (cpu_data[tc].options & MIPS_CPU_FPU) |
41c594ab RB |
606 | cpu_set(tc, mt_fpu_cpumask); |
607 | } | |
ea580401 | 608 | #endif |
41c594ab RB |
609 | |
610 | /* set up ipi interrupts... */ | |
611 | ||
612 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ | |
613 | ||
20bb25d1 | 614 | setup_cross_vpe_interrupts(nvpe); |
41c594ab RB |
615 | |
616 | /* Set up queue of free IPI "messages". */ | |
617 | nipi = NR_CPUS * IPIBUF_PER_CPU; | |
618 | if (ipibuffers > 0) | |
619 | nipi = ipibuffers; | |
620 | ||
621 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); | |
622 | if (pipi == NULL) | |
ab75dc02 | 623 | panic("kmalloc of IPI message buffers failed"); |
41c594ab RB |
624 | else |
625 | printk("IPI buffer pool of %d buffers\n", nipi); | |
626 | for (i = 0; i < nipi; i++) { | |
627 | smtc_ipi_nq(&freeIPIq, pipi); | |
628 | pipi++; | |
629 | } | |
630 | ||
631 | /* Arm multithreading and enable other VPEs - but all TCs are Halted */ | |
632 | emt(EMT_ENABLE); | |
633 | evpe(EVPE_ENABLE); | |
634 | local_irq_restore(flags); | |
635 | /* Initialize SMTC /proc statistics/diagnostics */ | |
636 | init_smtc_stats(); | |
637 | } | |
638 | ||
639 | ||
640 | /* | |
641 | * Setup the PC, SP, and GP of a secondary processor and start it | |
642 | * running! | |
643 | * smp_bootstrap is the place to resume from | |
644 | * __KSTK_TOS(idle) is apparently the stack pointer | |
645 | * (unsigned long)idle->thread_info the gp | |
646 | * | |
647 | */ | |
e119d49a | 648 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) |
41c594ab RB |
649 | { |
650 | extern u32 kernelsp[NR_CPUS]; | |
b7e4226e | 651 | unsigned long flags; |
41c594ab RB |
652 | int mtflags; |
653 | ||
654 | LOCK_MT_PRA(); | |
655 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | |
656 | dvpe(); | |
657 | } | |
658 | settc(cpu_data[cpu].tc_id); | |
659 | ||
660 | /* pc */ | |
661 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | |
662 | ||
663 | /* stack pointer */ | |
664 | kernelsp[cpu] = __KSTK_TOS(idle); | |
665 | write_tc_gpr_sp(__KSTK_TOS(idle)); | |
666 | ||
667 | /* global pointer */ | |
c9f4f06d | 668 | write_tc_gpr_gp((unsigned long)task_thread_info(idle)); |
41c594ab RB |
669 | |
670 | smtc_status |= SMTC_MTC_ACTIVE; | |
671 | write_tc_c0_tchalt(0); | |
672 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | |
673 | evpe(EVPE_ENABLE); | |
674 | } | |
675 | UNLOCK_MT_PRA(); | |
676 | } | |
677 | ||
678 | void smtc_init_secondary(void) | |
679 | { | |
41c594ab RB |
680 | } |
681 | ||
682 | void smtc_smp_finish(void) | |
683 | { | |
8531a35e KK |
684 | int cpu = smp_processor_id(); |
685 | ||
686 | /* | |
687 | * Lowest-numbered CPU per VPE starts a clock tick. | |
688 | * Like per_cpu_trap_init() hack, this assumes that | |
689 | * SMTC init code assigns TCs consdecutively and | |
690 | * in ascending order across available VPEs. | |
691 | */ | |
692 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | |
693 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | |
694 | ||
70dc8fa7 YZ |
695 | local_irq_enable(); |
696 | ||
41c594ab RB |
697 | printk("TC %d going on-line as CPU %d\n", |
698 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | |
699 | } | |
700 | ||
701 | void smtc_cpus_done(void) | |
702 | { | |
703 | } | |
704 | ||
705 | /* | |
706 | * Support for SMTC-optimized driver IRQ registration | |
707 | */ | |
708 | ||
709 | /* | |
710 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | |
711 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | |
712 | * in this table. | |
713 | */ | |
714 | ||
715 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, | |
716 | unsigned long hwmask) | |
717 | { | |
ef36fc3c | 718 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
20bb25d1 RB |
719 | unsigned int vpe = current_cpu_data.vpe_id; |
720 | ||
3b1d4ed5 | 721 | vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; |
20bb25d1 | 722 | #endif |
ef36fc3c | 723 | irq_hwmask[irq] = hwmask; |
41c594ab RB |
724 | |
725 | return setup_irq(irq, new); | |
726 | } | |
727 | ||
f571eff0 KK |
728 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
729 | /* | |
730 | * Support for IRQ affinity to TCs | |
731 | */ | |
732 | ||
733 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |
734 | { | |
735 | /* | |
736 | * If a "fast path" cache of quickly decodable affinity state | |
737 | * is maintained, this is where it gets done, on a call up | |
738 | * from the platform affinity code. | |
739 | */ | |
740 | } | |
741 | ||
930cd54b | 742 | void smtc_forward_irq(struct irq_data *d) |
f571eff0 | 743 | { |
930cd54b | 744 | unsigned int irq = d->irq; |
f571eff0 KK |
745 | int target; |
746 | ||
747 | /* | |
748 | * OK wise guy, now figure out how to get the IRQ | |
749 | * to be serviced on an authorized "CPU". | |
750 | * | |
751 | * Ideally, to handle the situation where an IRQ has multiple | |
752 | * eligible CPUS, we would maintain state per IRQ that would | |
753 | * allow a fair distribution of service requests. Since the | |
754 | * expected use model is any-or-only-one, for simplicity | |
755 | * and efficiency, we just pick the easiest one to find. | |
756 | */ | |
757 | ||
2a2b2212 | 758 | target = cpumask_first(d->affinity); |
f571eff0 KK |
759 | |
760 | /* | |
761 | * We depend on the platform code to have correctly processed | |
762 | * IRQ affinity change requests to ensure that the IRQ affinity | |
763 | * mask has been purged of bits corresponding to nonexistent and | |
764 | * offline "CPUs", and to TCs bound to VPEs other than the VPE | |
765 | * connected to the physical interrupt input for the interrupt | |
70342287 | 766 | * in question. Otherwise we have a nasty problem with interrupt |
f571eff0 | 767 | * mask management. This is best handled in non-performance-critical |
70342287 | 768 | * platform IRQ affinity setting code, to minimize interrupt-time |
f571eff0 KK |
769 | * checks. |
770 | */ | |
771 | ||
772 | /* If no one is eligible, service locally */ | |
930cd54b | 773 | if (target >= NR_CPUS) |
f571eff0 | 774 | do_IRQ_no_affinity(irq); |
930cd54b TG |
775 | else |
776 | smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); | |
f571eff0 KK |
777 | } |
778 | ||
779 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | |
780 | ||
41c594ab RB |
781 | /* |
782 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. | |
783 | * Within a VPE one TC can interrupt another by different approaches. | |
784 | * The easiest to get right would probably be to make all TCs except | |
785 | * the target IXMT and set a software interrupt, but an IXMT-based | |
786 | * scheme requires that a handler must run before a new IPI could | |
787 | * be sent, which would break the "broadcast" loops in MIPS MT. | |
788 | * A more gonzo approach within a VPE is to halt the TC, extract | |
789 | * its Restart, Status, and a couple of GPRs, and program the Restart | |
790 | * address to emulate an interrupt. | |
791 | * | |
792 | * Within a VPE, one can be confident that the target TC isn't in | |
793 | * a critical EXL state when halted, since the write to the Halt | |
794 | * register could not have issued on the writing thread if the | |
795 | * halting thread had EXL set. So k0 and k1 of the target TC | |
796 | * can be used by the injection code. Across VPEs, one can't | |
797 | * be certain that the target TC isn't in a critical exception | |
798 | * state. So we try a two-step process of sending a software | |
799 | * interrupt to the target VPE, which either handles the event | |
800 | * itself (if it was the target) or injects the event within | |
801 | * the VPE. | |
802 | */ | |
803 | ||
5868756d | 804 | static void smtc_ipi_qdump(void) |
41c594ab RB |
805 | { |
806 | int i; | |
2e41f91d | 807 | struct smtc_ipi *temp; |
41c594ab RB |
808 | |
809 | for (i = 0; i < NR_CPUS ;i++) { | |
2e41f91d | 810 | pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", |
41c594ab RB |
811 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, |
812 | IPIQ[i].depth); | |
2e41f91d JP |
813 | temp = IPIQ[i].head; |
814 | ||
815 | while (temp != IPIQ[i].tail) { | |
816 | pr_debug("%d %d %d: ", temp->type, temp->dest, | |
817 | (int)temp->arg); | |
818 | #ifdef SMTC_IPI_DEBUG | |
819 | pr_debug("%u %lu\n", temp->sender, temp->stamp); | |
820 | #else | |
821 | pr_debug("\n"); | |
822 | #endif | |
823 | temp = temp->flink; | |
824 | } | |
41c594ab RB |
825 | } |
826 | } | |
827 | ||
828 | /* | |
829 | * The standard atomic.h primitives don't quite do what we want | |
830 | * here: We need an atomic add-and-return-previous-value (which | |
831 | * could be done with atomic_add_return and a decrement) and an | |
832 | * atomic set/zero-and-return-previous-value (which can't really | |
833 | * be done with the atomic.h primitives). And since this is | |
834 | * MIPS MT, we can assume that we have LL/SC. | |
835 | */ | |
ea580401 | 836 | static inline int atomic_postincrement(atomic_t *v) |
41c594ab RB |
837 | { |
838 | unsigned long result; | |
839 | ||
840 | unsigned long temp; | |
841 | ||
842 | __asm__ __volatile__( | |
843 | "1: ll %0, %2 \n" | |
844 | " addu %1, %0, 1 \n" | |
845 | " sc %1, %2 \n" | |
846 | " beqz %1, 1b \n" | |
d87d0c93 | 847 | __WEAK_LLSC_MB |
ea580401 RB |
848 | : "=&r" (result), "=&r" (temp), "=m" (v->counter) |
849 | : "m" (v->counter) | |
41c594ab RB |
850 | : "memory"); |
851 | ||
852 | return result; | |
853 | } | |
854 | ||
41c594ab RB |
855 | void smtc_send_ipi(int cpu, int type, unsigned int action) |
856 | { | |
857 | int tcstatus; | |
858 | struct smtc_ipi *pipi; | |
b7e4226e | 859 | unsigned long flags; |
41c594ab | 860 | int mtflags; |
8531a35e | 861 | unsigned long tcrestart; |
2e41f91d JP |
862 | int set_resched_flag = (type == LINUX_SMP_IPI && |
863 | action == SMP_RESCHEDULE_YOURSELF); | |
41c594ab RB |
864 | |
865 | if (cpu == smp_processor_id()) { | |
866 | printk("Cannot Send IPI to self!\n"); | |
867 | return; | |
868 | } | |
2e41f91d JP |
869 | if (set_resched_flag && IPIQ[cpu].resched_flag != 0) |
870 | return; /* There is a reschedule queued already */ | |
871 | ||
41c594ab RB |
872 | /* Set up a descriptor, to be delivered either promptly or queued */ |
873 | pipi = smtc_ipi_dq(&freeIPIq); | |
874 | if (pipi == NULL) { | |
875 | bust_spinlocks(1); | |
876 | mips_mt_regdump(dvpe()); | |
ab75dc02 | 877 | panic("IPI Msg. Buffers Depleted"); |
41c594ab RB |
878 | } |
879 | pipi->type = type; | |
880 | pipi->arg = (void *)action; | |
881 | pipi->dest = cpu; | |
882 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | |
603e82ed | 883 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
2e41f91d | 884 | IPIQ[cpu].resched_flag |= set_resched_flag; |
41c594ab RB |
885 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
886 | LOCK_CORE_PRA(); | |
887 | settc(cpu_data[cpu].tc_id); | |
888 | write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); | |
889 | UNLOCK_CORE_PRA(); | |
890 | } else { | |
891 | /* | |
892 | * Not sufficient to do a LOCK_MT_PRA (dmt) here, | |
893 | * since ASID shootdown on the other VPE may | |
894 | * collide with this operation. | |
895 | */ | |
896 | LOCK_CORE_PRA(); | |
897 | settc(cpu_data[cpu].tc_id); | |
898 | /* Halt the targeted TC */ | |
899 | write_tc_c0_tchalt(TCHALT_H); | |
900 | mips_ihb(); | |
901 | ||
902 | /* | |
70342287 | 903 | * Inspect TCStatus - if IXMT is set, we have to queue |
41c594ab RB |
904 | * a message. Otherwise, we set up the "interrupt" |
905 | * of the other TC | |
70342287 | 906 | */ |
41c594ab RB |
907 | tcstatus = read_tc_c0_tcstatus(); |
908 | ||
909 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | |
910 | /* | |
8531a35e KK |
911 | * If we're in the the irq-off version of the wait |
912 | * loop, we need to force exit from the wait and | |
913 | * do a direct post of the IPI. | |
914 | */ | |
915 | if (cpu_wait == r4k_wait_irqoff) { | |
916 | tcrestart = read_tc_c0_tcrestart(); | |
f94d9a8e | 917 | if (address_is_in_r4k_wait_irqoff(tcrestart)) { |
8531a35e KK |
918 | write_tc_c0_tcrestart(__pastwait); |
919 | tcstatus &= ~TCSTATUS_IXMT; | |
920 | write_tc_c0_tcstatus(tcstatus); | |
921 | goto postdirect; | |
922 | } | |
923 | } | |
924 | /* | |
925 | * Otherwise we queue the message for the target TC | |
926 | * to pick up when he does a local_irq_restore() | |
41c594ab RB |
927 | */ |
928 | write_tc_c0_tchalt(0); | |
929 | UNLOCK_CORE_PRA(); | |
2e41f91d | 930 | IPIQ[cpu].resched_flag |= set_resched_flag; |
41c594ab RB |
931 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
932 | } else { | |
8531a35e | 933 | postdirect: |
41c594ab RB |
934 | post_direct_ipi(cpu, pipi); |
935 | write_tc_c0_tchalt(0); | |
936 | UNLOCK_CORE_PRA(); | |
937 | } | |
938 | } | |
939 | } | |
940 | ||
941 | /* | |
942 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set | |
943 | */ | |
5868756d | 944 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) |
41c594ab RB |
945 | { |
946 | struct pt_regs *kstack; | |
947 | unsigned long tcstatus; | |
948 | unsigned long tcrestart; | |
949 | extern u32 kernelsp[NR_CPUS]; | |
950 | extern void __smtc_ipi_vector(void); | |
ea580401 | 951 | //printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); |
41c594ab RB |
952 | |
953 | /* Extract Status, EPC from halted TC */ | |
954 | tcstatus = read_tc_c0_tcstatus(); | |
955 | tcrestart = read_tc_c0_tcrestart(); | |
956 | /* If TCRestart indicates a WAIT instruction, advance the PC */ | |
957 | if ((tcrestart & 0x80000000) | |
958 | && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { | |
959 | tcrestart += 4; | |
960 | } | |
961 | /* | |
962 | * Save on TC's future kernel stack | |
963 | * | |
964 | * CU bit of Status is indicator that TC was | |
965 | * already running on a kernel stack... | |
966 | */ | |
70342287 | 967 | if (tcstatus & ST0_CU0) { |
41c594ab RB |
968 | /* Note that this "- 1" is pointer arithmetic */ |
969 | kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; | |
970 | } else { | |
971 | kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; | |
972 | } | |
973 | ||
974 | kstack->cp0_epc = (long)tcrestart; | |
975 | /* Save TCStatus */ | |
976 | kstack->cp0_tcstatus = tcstatus; | |
977 | /* Pass token of operation to be performed kernel stack pad area */ | |
978 | kstack->pad0[4] = (unsigned long)pipi; | |
979 | /* Pass address of function to be called likewise */ | |
980 | kstack->pad0[5] = (unsigned long)&ipi_decode; | |
981 | /* Set interrupt exempt and kernel mode */ | |
982 | tcstatus |= TCSTATUS_IXMT; | |
983 | tcstatus &= ~TCSTATUS_TKSU; | |
984 | write_tc_c0_tcstatus(tcstatus); | |
985 | ehb(); | |
986 | /* Set TC Restart address to be SMTC IPI vector */ | |
987 | write_tc_c0_tcrestart(__smtc_ipi_vector); | |
988 | } | |
989 | ||
937a8015 | 990 | static void ipi_resched_interrupt(void) |
41c594ab | 991 | { |
184748cc | 992 | scheduler_ipi(); |
41c594ab RB |
993 | } |
994 | ||
937a8015 | 995 | static void ipi_call_interrupt(void) |
41c594ab RB |
996 | { |
997 | /* Invoke generic function invocation code in smp.c */ | |
998 | smp_call_function_interrupt(); | |
999 | } | |
1000 | ||
8531a35e | 1001 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
ea580401 | 1002 | |
8f99a162 | 1003 | static void __irq_entry smtc_clock_tick_interrupt(void) |
41c594ab | 1004 | { |
ea580401 RB |
1005 | unsigned int cpu = smp_processor_id(); |
1006 | struct clock_event_device *cd; | |
8f99a162 WZ |
1007 | int irq = MIPS_CPU_IRQ_BASE + 1; |
1008 | ||
1009 | irq_enter(); | |
1010 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | |
1011 | cd = &per_cpu(mips_clockevent_device, cpu); | |
1012 | cd->event_handler(cd); | |
1013 | irq_exit(); | |
1014 | } | |
1015 | ||
1016 | void ipi_decode(struct smtc_ipi *pipi) | |
1017 | { | |
41c594ab RB |
1018 | void *arg_copy = pipi->arg; |
1019 | int type_copy = pipi->type; | |
d2287f5e | 1020 | |
41c594ab | 1021 | smtc_ipi_nq(&freeIPIq, pipi); |
dbc1d911 | 1022 | |
41c594ab | 1023 | switch (type_copy) { |
4bf42d42 | 1024 | case SMTC_CLOCK_TICK: |
8f99a162 | 1025 | smtc_clock_tick_interrupt(); |
4bf42d42 | 1026 | break; |
ea580401 | 1027 | |
4bf42d42 RB |
1028 | case LINUX_SMP_IPI: |
1029 | switch ((int)arg_copy) { | |
1030 | case SMP_RESCHEDULE_YOURSELF: | |
937a8015 | 1031 | ipi_resched_interrupt(); |
41c594ab | 1032 | break; |
4bf42d42 | 1033 | case SMP_CALL_FUNCTION: |
937a8015 | 1034 | ipi_call_interrupt(); |
41c594ab RB |
1035 | break; |
1036 | default: | |
fa90c872 | 1037 | printk("Impossible SMTC IPI Argument %p\n", arg_copy); |
41c594ab | 1038 | break; |
4bf42d42 RB |
1039 | } |
1040 | break; | |
f571eff0 KK |
1041 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
1042 | case IRQ_AFFINITY_IPI: | |
1043 | /* | |
1044 | * Accept a "forwarded" interrupt that was initially | |
1045 | * taken by a TC who doesn't have affinity for the IRQ. | |
1046 | */ | |
1047 | do_IRQ_no_affinity((int)arg_copy); | |
1048 | break; | |
1049 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | |
4bf42d42 RB |
1050 | default: |
1051 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); | |
1052 | break; | |
41c594ab RB |
1053 | } |
1054 | } | |
1055 | ||
8531a35e KK |
1056 | /* |
1057 | * Similar to smtc_ipi_replay(), but invoked from context restore, | |
1058 | * so it reuses the current exception frame rather than set up a | |
1059 | * new one with self_ipi. | |
1060 | */ | |
1061 | ||
937a8015 | 1062 | void deferred_smtc_ipi(void) |
41c594ab | 1063 | { |
8531a35e | 1064 | int cpu = smp_processor_id(); |
41c594ab RB |
1065 | |
1066 | /* | |
1067 | * Test is not atomic, but much faster than a dequeue, | |
1068 | * and the vast majority of invocations will have a null queue. | |
8531a35e KK |
1069 | * If irq_disabled when this was called, then any IPIs queued |
1070 | * after we test last will be taken on the next irq_enable/restore. | |
1071 | * If interrupts were enabled, then any IPIs added after the | |
1072 | * last test will be taken directly. | |
41c594ab | 1073 | */ |
8531a35e KK |
1074 | |
1075 | while (IPIQ[cpu].head != NULL) { | |
1076 | struct smtc_ipi_q *q = &IPIQ[cpu]; | |
1077 | struct smtc_ipi *pipi; | |
1078 | unsigned long flags; | |
1079 | ||
1080 | /* | |
1081 | * It may be possible we'll come in with interrupts | |
1082 | * already enabled. | |
1083 | */ | |
1084 | local_irq_save(flags); | |
8531a35e KK |
1085 | spin_lock(&q->lock); |
1086 | pipi = __smtc_ipi_dq(q); | |
1087 | spin_unlock(&q->lock); | |
2e41f91d JP |
1088 | if (pipi != NULL) { |
1089 | if (pipi->type == LINUX_SMP_IPI && | |
1090 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) | |
1091 | IPIQ[cpu].resched_flag = 0; | |
937a8015 | 1092 | ipi_decode(pipi); |
2e41f91d | 1093 | } |
8531a35e KK |
1094 | /* |
1095 | * The use of the __raw_local restore isn't | |
1096 | * as obviously necessary here as in smtc_ipi_replay(), | |
1097 | * but it's more efficient, given that we're already | |
1098 | * running down the IPI queue. | |
1099 | */ | |
df9ee292 | 1100 | __arch_local_irq_restore(flags); |
41c594ab RB |
1101 | } |
1102 | } | |
1103 | ||
41c594ab RB |
1104 | /* |
1105 | * Cross-VPE interrupts in the SMTC prototype use "software interrupts" | |
1106 | * set via cross-VPE MTTR manipulation of the Cause register. It would be | |
1107 | * in some regards preferable to have external logic for "doorbell" hardware | |
1108 | * interrupts. | |
1109 | */ | |
1110 | ||
97dcb82d | 1111 | static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; |
41c594ab | 1112 | |
937a8015 | 1113 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm) |
41c594ab RB |
1114 | { |
1115 | int my_vpe = cpu_data[smp_processor_id()].vpe_id; | |
1116 | int my_tc = cpu_data[smp_processor_id()].tc_id; | |
1117 | int cpu; | |
1118 | struct smtc_ipi *pipi; | |
1119 | unsigned long tcstatus; | |
1120 | int sent; | |
b7e4226e | 1121 | unsigned long flags; |
41c594ab RB |
1122 | unsigned int mtflags; |
1123 | unsigned int vpflags; | |
1124 | ||
1125 | /* | |
1126 | * So long as cross-VPE interrupts are done via | |
1127 | * MFTR/MTTR read-modify-writes of Cause, we need | |
1128 | * to stop other VPEs whenever the local VPE does | |
1129 | * anything similar. | |
1130 | */ | |
1131 | local_irq_save(flags); | |
1132 | vpflags = dvpe(); | |
1133 | clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); | |
1134 | set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); | |
1135 | irq_enable_hazard(); | |
1136 | evpe(vpflags); | |
1137 | local_irq_restore(flags); | |
1138 | ||
1139 | /* | |
1140 | * Cross-VPE Interrupt handler: Try to directly deliver IPIs | |
1141 | * queued for TCs on this VPE other than the current one. | |
1142 | * Return-from-interrupt should cause us to drain the queue | |
1143 | * for the current TC, so we ought not to have to do it explicitly here. | |
1144 | */ | |
1145 | ||
1146 | for_each_online_cpu(cpu) { | |
1147 | if (cpu_data[cpu].vpe_id != my_vpe) | |
1148 | continue; | |
1149 | ||
1150 | pipi = smtc_ipi_dq(&IPIQ[cpu]); | |
1151 | if (pipi != NULL) { | |
1152 | if (cpu_data[cpu].tc_id != my_tc) { | |
1153 | sent = 0; | |
1154 | LOCK_MT_PRA(); | |
1155 | settc(cpu_data[cpu].tc_id); | |
1156 | write_tc_c0_tchalt(TCHALT_H); | |
1157 | mips_ihb(); | |
1158 | tcstatus = read_tc_c0_tcstatus(); | |
1159 | if ((tcstatus & TCSTATUS_IXMT) == 0) { | |
1160 | post_direct_ipi(cpu, pipi); | |
1161 | sent = 1; | |
1162 | } | |
1163 | write_tc_c0_tchalt(0); | |
1164 | UNLOCK_MT_PRA(); | |
1165 | if (!sent) { | |
1166 | smtc_ipi_req(&IPIQ[cpu], pipi); | |
1167 | } | |
1168 | } else { | |
1169 | /* | |
1170 | * ipi_decode() should be called | |
1171 | * with interrupts off | |
1172 | */ | |
1173 | local_irq_save(flags); | |
2e41f91d JP |
1174 | if (pipi->type == LINUX_SMP_IPI && |
1175 | (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) | |
1176 | IPIQ[cpu].resched_flag = 0; | |
937a8015 | 1177 | ipi_decode(pipi); |
41c594ab RB |
1178 | local_irq_restore(flags); |
1179 | } | |
1180 | } | |
1181 | } | |
1182 | ||
1183 | return IRQ_HANDLED; | |
1184 | } | |
1185 | ||
937a8015 | 1186 | static void ipi_irq_dispatch(void) |
41c594ab | 1187 | { |
937a8015 | 1188 | do_IRQ(cpu_ipi_irq); |
41c594ab RB |
1189 | } |
1190 | ||
033890b0 RB |
1191 | static struct irqaction irq_ipi = { |
1192 | .handler = ipi_interrupt, | |
8b5690f8 | 1193 | .flags = IRQF_PERCPU, |
b2651583 | 1194 | .name = "SMTC_IPI" |
033890b0 | 1195 | }; |
41c594ab | 1196 | |
20bb25d1 | 1197 | static void setup_cross_vpe_interrupts(unsigned int nvpe) |
41c594ab | 1198 | { |
20bb25d1 RB |
1199 | if (nvpe < 1) |
1200 | return; | |
1201 | ||
41c594ab | 1202 | if (!cpu_has_vint) |
603e82ed | 1203 | panic("SMTC Kernel requires Vectored Interrupt support"); |
41c594ab RB |
1204 | |
1205 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); | |
1206 | ||
41c594ab RB |
1207 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); |
1208 | ||
e4ec7989 | 1209 | irq_set_handler(cpu_ipi_irq, handle_percpu_irq); |
41c594ab RB |
1210 | } |
1211 | ||
1212 | /* | |
1213 | * SMTC-specific hacks invoked from elsewhere in the kernel. | |
1214 | */ | |
1215 | ||
8531a35e KK |
1216 | /* |
1217 | * smtc_ipi_replay is called from raw_local_irq_restore | |
1218 | */ | |
1219 | ||
1220 | void smtc_ipi_replay(void) | |
ac8be955 | 1221 | { |
8a1e97ee RB |
1222 | unsigned int cpu = smp_processor_id(); |
1223 | ||
ac8be955 RB |
1224 | /* |
1225 | * To the extent that we've ever turned interrupts off, | |
1226 | * we may have accumulated deferred IPIs. This is subtle. | |
ac8be955 RB |
1227 | * we should be OK: If we pick up something and dispatch |
1228 | * it here, that's great. If we see nothing, but concurrent | |
1229 | * with this operation, another TC sends us an IPI, IXMT | |
1230 | * is clear, and we'll handle it as a real pseudo-interrupt | |
8531a35e KK |
1231 | * and not a pseudo-pseudo interrupt. The important thing |
1232 | * is to do the last check for queued message *after* the | |
1233 | * re-enabling of interrupts. | |
ac8be955 | 1234 | */ |
8531a35e KK |
1235 | while (IPIQ[cpu].head != NULL) { |
1236 | struct smtc_ipi_q *q = &IPIQ[cpu]; | |
1237 | struct smtc_ipi *pipi; | |
1238 | unsigned long flags; | |
ac8be955 | 1239 | |
8531a35e KK |
1240 | /* |
1241 | * It's just possible we'll come in with interrupts | |
1242 | * already enabled. | |
1243 | */ | |
1244 | local_irq_save(flags); | |
1245 | ||
1246 | spin_lock(&q->lock); | |
1247 | pipi = __smtc_ipi_dq(q); | |
1248 | spin_unlock(&q->lock); | |
1249 | /* | |
1250 | ** But use a raw restore here to avoid recursion. | |
1251 | */ | |
df9ee292 | 1252 | __arch_local_irq_restore(flags); |
8531a35e KK |
1253 | |
1254 | if (pipi) { | |
ac8be955 | 1255 | self_ipi(pipi); |
8a1e97ee | 1256 | smtc_cpu_stats[cpu].selfipis++; |
ac8be955 RB |
1257 | } |
1258 | } | |
1259 | } | |
1260 | ||
ec43c014 RB |
1261 | EXPORT_SYMBOL(smtc_ipi_replay); |
1262 | ||
41c594ab RB |
1263 | void smtc_idle_loop_hook(void) |
1264 | { | |
c68644d3 | 1265 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
41c594ab RB |
1266 | int im; |
1267 | int flags; | |
1268 | int mtflags; | |
1269 | int bit; | |
1270 | int vpe; | |
1271 | int tc; | |
1272 | int hook_ntcs; | |
1273 | /* | |
1274 | * printk within DMT-protected regions can deadlock, | |
1275 | * so buffer diagnostic messages for later output. | |
1276 | */ | |
1277 | char *pdb_msg; | |
1278 | char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ | |
1279 | ||
1280 | if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ | |
1281 | if (atomic_add_return(1, &idle_hook_initialized) == 1) { | |
1282 | int mvpconf0; | |
1283 | /* Tedious stuff to just do once */ | |
1284 | mvpconf0 = read_c0_mvpconf0(); | |
1285 | hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | |
1286 | if (hook_ntcs > NR_CPUS) | |
1287 | hook_ntcs = NR_CPUS; | |
1288 | for (tc = 0; tc < hook_ntcs; tc++) { | |
1289 | tcnoprog[tc] = 0; | |
1290 | clock_hang_reported[tc] = 0; | |
70342287 | 1291 | } |
41c594ab RB |
1292 | for (vpe = 0; vpe < 2; vpe++) |
1293 | for (im = 0; im < 8; im++) | |
1294 | imstuckcount[vpe][im] = 0; | |
1295 | printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); | |
1296 | atomic_set(&idle_hook_initialized, 1000); | |
1297 | } else { | |
1298 | /* Someone else is initializing in parallel - let 'em finish */ | |
1299 | while (atomic_read(&idle_hook_initialized) < 1000) | |
1300 | ; | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | /* Have we stupidly left IXMT set somewhere? */ | |
1305 | if (read_c0_tcstatus() & 0x400) { | |
1306 | write_c0_tcstatus(read_c0_tcstatus() & ~0x400); | |
1307 | ehb(); | |
1308 | printk("Dangling IXMT in cpu_idle()\n"); | |
1309 | } | |
1310 | ||
1311 | /* Have we stupidly left an IM bit turned off? */ | |
1312 | #define IM_LIMIT 2000 | |
1313 | local_irq_save(flags); | |
1314 | mtflags = dmt(); | |
1315 | pdb_msg = &id_ho_db_msg[0]; | |
1316 | im = read_c0_status(); | |
8f8771a0 | 1317 | vpe = current_cpu_data.vpe_id; |
41c594ab RB |
1318 | for (bit = 0; bit < 8; bit++) { |
1319 | /* | |
1320 | * In current prototype, I/O interrupts | |
1321 | * are masked for VPE > 0 | |
1322 | */ | |
1323 | if (vpemask[vpe][bit]) { | |
1324 | if (!(im & (0x100 << bit))) | |
1325 | imstuckcount[vpe][bit]++; | |
1326 | else | |
1327 | imstuckcount[vpe][bit] = 0; | |
1328 | if (imstuckcount[vpe][bit] > IM_LIMIT) { | |
1329 | set_c0_status(0x100 << bit); | |
1330 | ehb(); | |
1331 | imstuckcount[vpe][bit] = 0; | |
1332 | pdb_msg += sprintf(pdb_msg, | |
1333 | "Dangling IM %d fixed for VPE %d\n", bit, | |
1334 | vpe); | |
1335 | } | |
1336 | } | |
1337 | } | |
1338 | ||
41c594ab RB |
1339 | emt(mtflags); |
1340 | local_irq_restore(flags); | |
1341 | if (pdb_msg != &id_ho_db_msg[0]) | |
1342 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | |
c68644d3 | 1343 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
ac8be955 | 1344 | |
8531a35e | 1345 | smtc_ipi_replay(); |
41c594ab RB |
1346 | } |
1347 | ||
1348 | void smtc_soft_dump(void) | |
1349 | { | |
1350 | int i; | |
1351 | ||
1352 | printk("Counter Interrupts taken per CPU (TC)\n"); | |
1353 | for (i=0; i < NR_CPUS; i++) { | |
1354 | printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); | |
1355 | } | |
1356 | printk("Self-IPI invocations:\n"); | |
1357 | for (i=0; i < NR_CPUS; i++) { | |
1358 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | |
1359 | } | |
1360 | smtc_ipi_qdump(); | |
41c594ab RB |
1361 | printk("%d Recoveries of \"stolen\" FPU\n", |
1362 | atomic_read(&smtc_fpu_recoveries)); | |
1363 | } | |
1364 | ||
1365 | ||
1366 | /* | |
1367 | * TLB management routines special to SMTC | |
1368 | */ | |
1369 | ||
1370 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |
1371 | { | |
1372 | unsigned long flags, mtflags, tcstat, prevhalt, asid; | |
1373 | int tlb, i; | |
1374 | ||
1375 | /* | |
1376 | * It would be nice to be able to use a spinlock here, | |
1377 | * but this is invoked from within TLB flush routines | |
1378 | * that protect themselves with DVPE, so if a lock is | |
e0daad44 | 1379 | * held by another TC, it'll never be freed. |
41c594ab RB |
1380 | * |
1381 | * DVPE/DMT must not be done with interrupts enabled, | |
1382 | * so even so most callers will already have disabled | |
1383 | * them, let's be really careful... | |
1384 | */ | |
1385 | ||
1386 | local_irq_save(flags); | |
1387 | if (smtc_status & SMTC_TLB_SHARED) { | |
1388 | mtflags = dvpe(); | |
1389 | tlb = 0; | |
1390 | } else { | |
1391 | mtflags = dmt(); | |
1392 | tlb = cpu_data[cpu].vpe_id; | |
1393 | } | |
1394 | asid = asid_cache(cpu); | |
1395 | ||
1396 | do { | |
48c4ac97 | 1397 | if (!((asid += ASID_INC) & ASID_MASK) ) { |
41c594ab RB |
1398 | if (cpu_has_vtag_icache) |
1399 | flush_icache_all(); | |
af901ca1 | 1400 | /* Traverse all online CPUs (hack requires contiguous range) */ |
b5eb5511 | 1401 | for_each_online_cpu(i) { |
41c594ab RB |
1402 | /* |
1403 | * We don't need to worry about our own CPU, nor those of | |
1404 | * CPUs who don't share our TLB. | |
1405 | */ | |
1406 | if ((i != smp_processor_id()) && | |
1407 | ((smtc_status & SMTC_TLB_SHARED) || | |
1408 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { | |
1409 | settc(cpu_data[i].tc_id); | |
1410 | prevhalt = read_tc_c0_tchalt() & TCHALT_H; | |
1411 | if (!prevhalt) { | |
1412 | write_tc_c0_tchalt(TCHALT_H); | |
1413 | mips_ihb(); | |
1414 | } | |
1415 | tcstat = read_tc_c0_tcstatus(); | |
48c4ac97 | 1416 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); |
41c594ab RB |
1417 | if (!prevhalt) |
1418 | write_tc_c0_tchalt(0); | |
1419 | } | |
1420 | } | |
1421 | if (!asid) /* fix version if needed */ | |
1422 | asid = ASID_FIRST_VERSION; | |
1423 | local_flush_tlb_all(); /* start new asid cycle */ | |
1424 | } | |
48c4ac97 | 1425 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); |
41c594ab RB |
1426 | |
1427 | /* | |
1428 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | |
1429 | */ | |
b5eb5511 | 1430 | for_each_online_cpu(i) { |
41c594ab RB |
1431 | if ((smtc_status & SMTC_TLB_SHARED) || |
1432 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | |
1433 | cpu_context(i, mm) = asid_cache(i) = asid; | |
1434 | } | |
1435 | ||
1436 | if (smtc_status & SMTC_TLB_SHARED) | |
1437 | evpe(mtflags); | |
1438 | else | |
1439 | emt(mtflags); | |
1440 | local_irq_restore(flags); | |
1441 | } | |
1442 | ||
1443 | /* | |
1444 | * Invoked from macros defined in mmu_context.h | |
1445 | * which must already have disabled interrupts | |
1446 | * and done a DVPE or DMT as appropriate. | |
1447 | */ | |
1448 | ||
1449 | void smtc_flush_tlb_asid(unsigned long asid) | |
1450 | { | |
1451 | int entry; | |
1452 | unsigned long ehi; | |
1453 | ||
1454 | entry = read_c0_wired(); | |
1455 | ||
1456 | /* Traverse all non-wired entries */ | |
1457 | while (entry < current_cpu_data.tlbsize) { | |
1458 | write_c0_index(entry); | |
1459 | ehb(); | |
1460 | tlb_read(); | |
1461 | ehb(); | |
1462 | ehi = read_c0_entryhi(); | |
48c4ac97 | 1463 | if ((ehi & ASID_MASK) == asid) { |
41c594ab RB |
1464 | /* |
1465 | * Invalidate only entries with specified ASID, | |
1466 | * makiing sure all entries differ. | |
1467 | */ | |
1468 | write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); | |
1469 | write_c0_entrylo0(0); | |
1470 | write_c0_entrylo1(0); | |
1471 | mtc0_tlbw_hazard(); | |
1472 | tlb_write_indexed(); | |
1473 | } | |
1474 | entry++; | |
1475 | } | |
1476 | write_c0_index(PARKED_INDEX); | |
1477 | tlbw_use_hazard(); | |
1478 | } | |
1479 | ||
1480 | /* | |
1481 | * Support for single-threading cache flush operations. | |
1482 | */ | |
1483 | ||
5868756d | 1484 | static int halt_state_save[NR_CPUS]; |
41c594ab RB |
1485 | |
1486 | /* | |
1487 | * To really, really be sure that nothing is being done | |
70342287 | 1488 | * by other TCs, halt them all. This code assumes that |
41c594ab RB |
1489 | * a DVPE has already been done, so while their Halted |
1490 | * state is theoretically architecturally unstable, in | |
1491 | * practice, it's not going to change while we're looking | |
1492 | * at it. | |
1493 | */ | |
1494 | ||
1495 | void smtc_cflush_lockdown(void) | |
1496 | { | |
1497 | int cpu; | |
1498 | ||
1499 | for_each_online_cpu(cpu) { | |
1500 | if (cpu != smp_processor_id()) { | |
1501 | settc(cpu_data[cpu].tc_id); | |
1502 | halt_state_save[cpu] = read_tc_c0_tchalt(); | |
1503 | write_tc_c0_tchalt(TCHALT_H); | |
1504 | } | |
1505 | } | |
1506 | mips_ihb(); | |
1507 | } | |
1508 | ||
1509 | /* It would be cheating to change the cpu_online states during a flush! */ | |
1510 | ||
1511 | void smtc_cflush_release(void) | |
1512 | { | |
1513 | int cpu; | |
1514 | ||
1515 | /* | |
1516 | * Start with a hazard barrier to ensure | |
1517 | * that all CACHE ops have played through. | |
1518 | */ | |
1519 | mips_ihb(); | |
1520 | ||
1521 | for_each_online_cpu(cpu) { | |
1522 | if (cpu != smp_processor_id()) { | |
1523 | settc(cpu_data[cpu].tc_id); | |
1524 | write_tc_c0_tchalt(halt_state_save[cpu]); | |
1525 | } | |
1526 | } | |
1527 | mips_ihb(); | |
1528 | } |