]>
Commit | Line | Data |
---|---|---|
41c594ab RB |
1 | /* Copyright (C) 2004 Mips Technologies, Inc */ |
2 | ||
3 | #include <linux/kernel.h> | |
4 | #include <linux/sched.h> | |
5 | #include <linux/cpumask.h> | |
6 | #include <linux/interrupt.h> | |
ec43c014 | 7 | #include <linux/module.h> |
41c594ab RB |
8 | |
9 | #include <asm/cpu.h> | |
10 | #include <asm/processor.h> | |
11 | #include <asm/atomic.h> | |
12 | #include <asm/system.h> | |
13 | #include <asm/hardirq.h> | |
14 | #include <asm/hazards.h> | |
15 | #include <asm/mmu_context.h> | |
16 | #include <asm/smp.h> | |
17 | #include <asm/mipsregs.h> | |
18 | #include <asm/cacheflush.h> | |
19 | #include <asm/time.h> | |
20 | #include <asm/addrspace.h> | |
21 | #include <asm/smtc.h> | |
22 | #include <asm/smtc_ipi.h> | |
23 | #include <asm/smtc_proc.h> | |
24 | ||
25 | /* | |
26 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. | |
27 | */ | |
28 | ||
41c594ab RB |
29 | #define MIPS_CPU_IPI_IRQ 1 |
30 | ||
31 | #define LOCK_MT_PRA() \ | |
32 | local_irq_save(flags); \ | |
33 | mtflags = dmt() | |
34 | ||
35 | #define UNLOCK_MT_PRA() \ | |
36 | emt(mtflags); \ | |
37 | local_irq_restore(flags) | |
38 | ||
39 | #define LOCK_CORE_PRA() \ | |
40 | local_irq_save(flags); \ | |
41 | mtflags = dvpe() | |
42 | ||
43 | #define UNLOCK_CORE_PRA() \ | |
44 | evpe(mtflags); \ | |
45 | local_irq_restore(flags) | |
46 | ||
47 | /* | |
48 | * Data structures purely associated with SMTC parallelism | |
49 | */ | |
50 | ||
51 | ||
52 | /* | |
53 | * Table for tracking ASIDs whose lifetime is prolonged. | |
54 | */ | |
55 | ||
56 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | |
57 | ||
58 | /* | |
59 | * Clock interrupt "latch" buffers, per "CPU" | |
60 | */ | |
61 | ||
62 | unsigned int ipi_timer_latch[NR_CPUS]; | |
63 | ||
64 | /* | |
65 | * Number of InterProcessor Interupt (IPI) message buffers to allocate | |
66 | */ | |
67 | ||
68 | #define IPIBUF_PER_CPU 4 | |
69 | ||
5868756d RB |
70 | static struct smtc_ipi_q IPIQ[NR_CPUS]; |
71 | static struct smtc_ipi_q freeIPIq; | |
41c594ab RB |
72 | |
73 | ||
74 | /* Forward declarations */ | |
75 | ||
937a8015 | 76 | void ipi_decode(struct smtc_ipi *); |
5868756d RB |
77 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); |
78 | static void setup_cross_vpe_interrupts(void); | |
41c594ab RB |
79 | void init_smtc_stats(void); |
80 | ||
81 | /* Global SMTC Status */ | |
82 | ||
83 | unsigned int smtc_status = 0; | |
84 | ||
85 | /* Boot command line configuration overrides */ | |
86 | ||
87 | static int vpelimit = 0; | |
88 | static int tclimit = 0; | |
89 | static int ipibuffers = 0; | |
90 | static int nostlb = 0; | |
91 | static int asidmask = 0; | |
92 | unsigned long smtc_asid_mask = 0xff; | |
93 | ||
94 | static int __init maxvpes(char *str) | |
95 | { | |
96 | get_option(&str, &vpelimit); | |
97 | return 1; | |
98 | } | |
99 | ||
100 | static int __init maxtcs(char *str) | |
101 | { | |
102 | get_option(&str, &tclimit); | |
103 | return 1; | |
104 | } | |
105 | ||
106 | static int __init ipibufs(char *str) | |
107 | { | |
108 | get_option(&str, &ipibuffers); | |
109 | return 1; | |
110 | } | |
111 | ||
112 | static int __init stlb_disable(char *s) | |
113 | { | |
114 | nostlb = 1; | |
115 | return 1; | |
116 | } | |
117 | ||
118 | static int __init asidmask_set(char *str) | |
119 | { | |
120 | get_option(&str, &asidmask); | |
4bf42d42 | 121 | switch (asidmask) { |
41c594ab RB |
122 | case 0x1: |
123 | case 0x3: | |
124 | case 0x7: | |
125 | case 0xf: | |
126 | case 0x1f: | |
127 | case 0x3f: | |
128 | case 0x7f: | |
129 | case 0xff: | |
130 | smtc_asid_mask = (unsigned long)asidmask; | |
131 | break; | |
132 | default: | |
133 | printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); | |
134 | } | |
135 | return 1; | |
136 | } | |
137 | ||
138 | __setup("maxvpes=", maxvpes); | |
139 | __setup("maxtcs=", maxtcs); | |
140 | __setup("ipibufs=", ipibufs); | |
141 | __setup("nostlb", stlb_disable); | |
142 | __setup("asidmask=", asidmask_set); | |
143 | ||
c68644d3 | 144 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
41c594ab RB |
145 | |
146 | static int hang_trig = 0; | |
147 | ||
148 | static int __init hangtrig_enable(char *s) | |
149 | { | |
150 | hang_trig = 1; | |
151 | return 1; | |
152 | } | |
153 | ||
154 | ||
155 | __setup("hangtrig", hangtrig_enable); | |
156 | ||
157 | #define DEFAULT_BLOCKED_IPI_LIMIT 32 | |
158 | ||
159 | static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; | |
160 | ||
161 | static int __init tintq(char *str) | |
162 | { | |
163 | get_option(&str, &timerq_limit); | |
164 | return 1; | |
165 | } | |
166 | ||
167 | __setup("tintq=", tintq); | |
168 | ||
169 | int imstuckcount[2][8]; | |
170 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ | |
171 | int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}}; | |
172 | int tcnoprog[NR_CPUS]; | |
173 | static atomic_t idle_hook_initialized = {0}; | |
174 | static int clock_hang_reported[NR_CPUS]; | |
175 | ||
c68644d3 | 176 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
41c594ab RB |
177 | |
178 | /* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */ | |
179 | ||
180 | void __init sanitize_tlb_entries(void) | |
181 | { | |
182 | printk("Deprecated sanitize_tlb_entries() invoked\n"); | |
183 | } | |
184 | ||
185 | ||
186 | /* | |
187 | * Configure shared TLB - VPC configuration bit must be set by caller | |
188 | */ | |
189 | ||
5868756d | 190 | static void smtc_configure_tlb(void) |
41c594ab RB |
191 | { |
192 | int i,tlbsiz,vpes; | |
193 | unsigned long mvpconf0; | |
194 | unsigned long config1val; | |
195 | ||
196 | /* Set up ASID preservation table */ | |
197 | for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { | |
198 | for(i = 0; i < MAX_SMTC_ASIDS; i++) { | |
199 | smtc_live_asid[vpes][i] = 0; | |
200 | } | |
201 | } | |
202 | mvpconf0 = read_c0_mvpconf0(); | |
203 | ||
204 | if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) | |
205 | >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { | |
206 | /* If we have multiple VPEs, try to share the TLB */ | |
207 | if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { | |
208 | /* | |
209 | * If TLB sizing is programmable, shared TLB | |
210 | * size is the total available complement. | |
211 | * Otherwise, we have to take the sum of all | |
212 | * static VPE TLB entries. | |
213 | */ | |
214 | if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) | |
215 | >> MVPCONF0_PTLBE_SHIFT)) == 0) { | |
216 | /* | |
217 | * If there's more than one VPE, there had better | |
218 | * be more than one TC, because we need one to bind | |
219 | * to each VPE in turn to be able to read | |
220 | * its configuration state! | |
221 | */ | |
222 | settc(1); | |
223 | /* Stop the TC from doing anything foolish */ | |
224 | write_tc_c0_tchalt(TCHALT_H); | |
225 | mips_ihb(); | |
226 | /* No need to un-Halt - that happens later anyway */ | |
227 | for (i=0; i < vpes; i++) { | |
228 | write_tc_c0_tcbind(i); | |
229 | /* | |
230 | * To be 100% sure we're really getting the right | |
231 | * information, we exit the configuration state | |
232 | * and do an IHB after each rebinding. | |
233 | */ | |
234 | write_c0_mvpcontrol( | |
235 | read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | |
236 | mips_ihb(); | |
237 | /* | |
238 | * Only count if the MMU Type indicated is TLB | |
239 | */ | |
4bf42d42 | 240 | if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { |
41c594ab RB |
241 | config1val = read_vpe_c0_config1(); |
242 | tlbsiz += ((config1val >> 25) & 0x3f) + 1; | |
243 | } | |
244 | ||
245 | /* Put core back in configuration state */ | |
246 | write_c0_mvpcontrol( | |
247 | read_c0_mvpcontrol() | MVPCONTROL_VPC ); | |
248 | mips_ihb(); | |
249 | } | |
250 | } | |
251 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); | |
c80697b3 | 252 | ehb(); |
41c594ab RB |
253 | |
254 | /* | |
255 | * Setup kernel data structures to use software total, | |
256 | * rather than read the per-VPE Config1 value. The values | |
257 | * for "CPU 0" gets copied to all the other CPUs as part | |
258 | * of their initialization in smtc_cpu_setup(). | |
259 | */ | |
260 | ||
a0b62180 RB |
261 | /* MIPS32 limits TLB indices to 64 */ |
262 | if (tlbsiz > 64) | |
263 | tlbsiz = 64; | |
264 | cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; | |
41c594ab | 265 | smtc_status |= SMTC_TLB_SHARED; |
a0b62180 | 266 | local_flush_tlb_all(); |
41c594ab RB |
267 | |
268 | printk("TLB of %d entry pairs shared by %d VPEs\n", | |
269 | tlbsiz, vpes); | |
270 | } else { | |
271 | printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); | |
272 | } | |
273 | } | |
274 | } | |
275 | ||
276 | ||
277 | /* | |
278 | * Incrementally build the CPU map out of constituent MIPS MT cores, | |
279 | * using the specified available VPEs and TCs. Plaform code needs | |
280 | * to ensure that each MIPS MT core invokes this routine on reset, | |
281 | * one at a time(!). | |
282 | * | |
283 | * This version of the build_cpu_map and prepare_cpus routines assumes | |
284 | * that *all* TCs of a MIPS MT core will be used for Linux, and that | |
285 | * they will be spread across *all* available VPEs (to minimise the | |
286 | * loss of efficiency due to exception service serialization). | |
287 | * An improved version would pick up configuration information and | |
288 | * possibly leave some TCs/VPEs as "slave" processors. | |
289 | * | |
290 | * Use c0_MVPConf0 to find out how many TCs are available, setting up | |
291 | * phys_cpu_present_map and the logical/physical mappings. | |
292 | */ | |
293 | ||
294 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | |
295 | { | |
296 | int i, ntcs; | |
297 | ||
298 | /* | |
299 | * The CPU map isn't actually used for anything at this point, | |
300 | * so it's not clear what else we should do apart from set | |
301 | * everything up so that "logical" = "physical". | |
302 | */ | |
303 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | |
304 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { | |
305 | cpu_set(i, phys_cpu_present_map); | |
306 | __cpu_number_map[i] = i; | |
307 | __cpu_logical_map[i] = i; | |
308 | } | |
309 | /* Initialize map of CPUs with FPUs */ | |
310 | cpus_clear(mt_fpu_cpumask); | |
311 | ||
312 | /* One of those TC's is the one booting, and not a secondary... */ | |
313 | printk("%i available secondary CPU TC(s)\n", i - 1); | |
314 | ||
315 | return i; | |
316 | } | |
317 | ||
318 | /* | |
319 | * Common setup before any secondaries are started | |
320 | * Make sure all CPU's are in a sensible state before we boot any of the | |
321 | * secondaries. | |
322 | * | |
323 | * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly | |
324 | * as possible across the available VPEs. | |
325 | */ | |
326 | ||
327 | static void smtc_tc_setup(int vpe, int tc, int cpu) | |
328 | { | |
329 | settc(tc); | |
330 | write_tc_c0_tchalt(TCHALT_H); | |
331 | mips_ihb(); | |
332 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | |
333 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | |
334 | | TCSTATUS_A); | |
335 | write_tc_c0_tccontext(0); | |
336 | /* Bind tc to vpe */ | |
337 | write_tc_c0_tcbind(vpe); | |
338 | /* In general, all TCs should have the same cpu_data indications */ | |
339 | memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); | |
340 | /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ | |
341 | if (cpu_data[0].cputype == CPU_34K) | |
342 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | |
343 | cpu_data[cpu].vpe_id = vpe; | |
344 | cpu_data[cpu].tc_id = tc; | |
345 | } | |
346 | ||
347 | ||
348 | void mipsmt_prepare_cpus(void) | |
349 | { | |
350 | int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu; | |
351 | unsigned long flags; | |
352 | unsigned long val; | |
353 | int nipi; | |
354 | struct smtc_ipi *pipi; | |
355 | ||
356 | /* disable interrupts so we can disable MT */ | |
357 | local_irq_save(flags); | |
358 | /* disable MT so we can configure */ | |
359 | dvpe(); | |
360 | dmt(); | |
361 | ||
34af946a | 362 | spin_lock_init(&freeIPIq.lock); |
41c594ab RB |
363 | |
364 | /* | |
365 | * We probably don't have as many VPEs as we do SMP "CPUs", | |
366 | * but it's possible - and in any case we'll never use more! | |
367 | */ | |
368 | for (i=0; i<NR_CPUS; i++) { | |
369 | IPIQ[i].head = IPIQ[i].tail = NULL; | |
34af946a | 370 | spin_lock_init(&IPIQ[i].lock); |
41c594ab RB |
371 | IPIQ[i].depth = 0; |
372 | ipi_timer_latch[i] = 0; | |
373 | } | |
374 | ||
375 | /* cpu_data index starts at zero */ | |
376 | cpu = 0; | |
377 | cpu_data[cpu].vpe_id = 0; | |
378 | cpu_data[cpu].tc_id = 0; | |
379 | cpu++; | |
380 | ||
381 | /* Report on boot-time options */ | |
382 | mips_mt_set_cpuoptions (); | |
383 | if (vpelimit > 0) | |
384 | printk("Limit of %d VPEs set\n", vpelimit); | |
385 | if (tclimit > 0) | |
386 | printk("Limit of %d TCs set\n", tclimit); | |
387 | if (nostlb) { | |
388 | printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); | |
389 | } | |
390 | if (asidmask) | |
391 | printk("ASID mask value override to 0x%x\n", asidmask); | |
392 | ||
393 | /* Temporary */ | |
c68644d3 | 394 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
41c594ab RB |
395 | if (hang_trig) |
396 | printk("Logic Analyser Trigger on suspected TC hang\n"); | |
c68644d3 | 397 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
41c594ab RB |
398 | |
399 | /* Put MVPE's into 'configuration state' */ | |
400 | write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); | |
401 | ||
402 | val = read_c0_mvpconf0(); | |
403 | nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | |
404 | if (vpelimit > 0 && nvpe > vpelimit) | |
405 | nvpe = vpelimit; | |
406 | ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | |
407 | if (ntc > NR_CPUS) | |
408 | ntc = NR_CPUS; | |
409 | if (tclimit > 0 && ntc > tclimit) | |
410 | ntc = tclimit; | |
411 | tcpervpe = ntc / nvpe; | |
412 | slop = ntc % nvpe; /* Residual TCs, < NVPE */ | |
413 | ||
414 | /* Set up shared TLB */ | |
415 | smtc_configure_tlb(); | |
416 | ||
417 | for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { | |
418 | /* | |
419 | * Set the MVP bits. | |
420 | */ | |
421 | settc(tc); | |
422 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP); | |
423 | if (vpe != 0) | |
424 | printk(", "); | |
425 | printk("VPE %d: TC", vpe); | |
426 | for (i = 0; i < tcpervpe; i++) { | |
427 | /* | |
428 | * TC 0 is bound to VPE 0 at reset, | |
429 | * and is presumably executing this | |
430 | * code. Leave it alone! | |
431 | */ | |
432 | if (tc != 0) { | |
433 | smtc_tc_setup(vpe,tc, cpu); | |
434 | cpu++; | |
435 | } | |
436 | printk(" %d", tc); | |
437 | tc++; | |
438 | } | |
439 | if (slop) { | |
440 | if (tc != 0) { | |
441 | smtc_tc_setup(vpe,tc, cpu); | |
442 | cpu++; | |
443 | } | |
444 | printk(" %d", tc); | |
445 | tc++; | |
446 | slop--; | |
447 | } | |
448 | if (vpe != 0) { | |
449 | /* | |
450 | * Clear any stale software interrupts from VPE's Cause | |
451 | */ | |
452 | write_vpe_c0_cause(0); | |
453 | ||
454 | /* | |
455 | * Clear ERL/EXL of VPEs other than 0 | |
456 | * and set restricted interrupt enable/mask. | |
457 | */ | |
458 | write_vpe_c0_status((read_vpe_c0_status() | |
459 | & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) | |
460 | | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 | |
461 | | ST0_IE)); | |
462 | /* | |
463 | * set config to be the same as vpe0, | |
464 | * particularly kseg0 coherency alg | |
465 | */ | |
466 | write_vpe_c0_config(read_c0_config()); | |
467 | /* Clear any pending timer interrupt */ | |
468 | write_vpe_c0_compare(0); | |
469 | /* Propagate Config7 */ | |
470 | write_vpe_c0_config7(read_c0_config7()); | |
64c590b7 | 471 | write_vpe_c0_count(read_c0_count()); |
41c594ab RB |
472 | } |
473 | /* enable multi-threading within VPE */ | |
474 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | |
475 | /* enable the VPE */ | |
476 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | |
477 | } | |
478 | ||
479 | /* | |
480 | * Pull any physically present but unused TCs out of circulation. | |
481 | */ | |
482 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { | |
483 | cpu_clear(tc, phys_cpu_present_map); | |
484 | cpu_clear(tc, cpu_present_map); | |
485 | tc++; | |
486 | } | |
487 | ||
488 | /* release config state */ | |
489 | write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); | |
490 | ||
491 | printk("\n"); | |
492 | ||
493 | /* Set up coprocessor affinity CPU mask(s) */ | |
494 | ||
495 | for (tc = 0; tc < ntc; tc++) { | |
4bf42d42 | 496 | if (cpu_data[tc].options & MIPS_CPU_FPU) |
41c594ab RB |
497 | cpu_set(tc, mt_fpu_cpumask); |
498 | } | |
499 | ||
500 | /* set up ipi interrupts... */ | |
501 | ||
502 | /* If we have multiple VPEs running, set up the cross-VPE interrupt */ | |
503 | ||
504 | if (nvpe > 1) | |
505 | setup_cross_vpe_interrupts(); | |
506 | ||
507 | /* Set up queue of free IPI "messages". */ | |
508 | nipi = NR_CPUS * IPIBUF_PER_CPU; | |
509 | if (ipibuffers > 0) | |
510 | nipi = ipibuffers; | |
511 | ||
512 | pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); | |
513 | if (pipi == NULL) | |
514 | panic("kmalloc of IPI message buffers failed\n"); | |
515 | else | |
516 | printk("IPI buffer pool of %d buffers\n", nipi); | |
517 | for (i = 0; i < nipi; i++) { | |
518 | smtc_ipi_nq(&freeIPIq, pipi); | |
519 | pipi++; | |
520 | } | |
521 | ||
522 | /* Arm multithreading and enable other VPEs - but all TCs are Halted */ | |
523 | emt(EMT_ENABLE); | |
524 | evpe(EVPE_ENABLE); | |
525 | local_irq_restore(flags); | |
526 | /* Initialize SMTC /proc statistics/diagnostics */ | |
527 | init_smtc_stats(); | |
528 | } | |
529 | ||
530 | ||
531 | /* | |
532 | * Setup the PC, SP, and GP of a secondary processor and start it | |
533 | * running! | |
534 | * smp_bootstrap is the place to resume from | |
535 | * __KSTK_TOS(idle) is apparently the stack pointer | |
536 | * (unsigned long)idle->thread_info the gp | |
537 | * | |
538 | */ | |
539 | void smtc_boot_secondary(int cpu, struct task_struct *idle) | |
540 | { | |
541 | extern u32 kernelsp[NR_CPUS]; | |
542 | long flags; | |
543 | int mtflags; | |
544 | ||
545 | LOCK_MT_PRA(); | |
546 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | |
547 | dvpe(); | |
548 | } | |
549 | settc(cpu_data[cpu].tc_id); | |
550 | ||
551 | /* pc */ | |
552 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | |
553 | ||
554 | /* stack pointer */ | |
555 | kernelsp[cpu] = __KSTK_TOS(idle); | |
556 | write_tc_gpr_sp(__KSTK_TOS(idle)); | |
557 | ||
558 | /* global pointer */ | |
559 | write_tc_gpr_gp((unsigned long)idle->thread_info); | |
560 | ||
561 | smtc_status |= SMTC_MTC_ACTIVE; | |
562 | write_tc_c0_tchalt(0); | |
563 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | |
564 | evpe(EVPE_ENABLE); | |
565 | } | |
566 | UNLOCK_MT_PRA(); | |
567 | } | |
568 | ||
569 | void smtc_init_secondary(void) | |
570 | { | |
571 | /* | |
572 | * Start timer on secondary VPEs if necessary. | |
54d0a216 | 573 | * plat_timer_setup has already have been invoked by init/main |
41c594ab RB |
574 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that |
575 | * SMTC init code assigns TCs consdecutively and in ascending order | |
576 | * to across available VPEs. | |
577 | */ | |
4bf42d42 RB |
578 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && |
579 | ((read_c0_tcbind() & TCBIND_CURVPE) | |
41c594ab RB |
580 | != cpu_data[smp_processor_id() - 1].vpe_id)){ |
581 | write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ); | |
582 | } | |
583 | ||
584 | local_irq_enable(); | |
585 | } | |
586 | ||
587 | void smtc_smp_finish(void) | |
588 | { | |
589 | printk("TC %d going on-line as CPU %d\n", | |
590 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | |
591 | } | |
592 | ||
593 | void smtc_cpus_done(void) | |
594 | { | |
595 | } | |
596 | ||
597 | /* | |
598 | * Support for SMTC-optimized driver IRQ registration | |
599 | */ | |
600 | ||
601 | /* | |
602 | * SMTC Kernel needs to manipulate low-level CPU interrupt mask | |
603 | * in do_IRQ. These are passed in setup_irq_smtc() and stored | |
604 | * in this table. | |
605 | */ | |
606 | ||
607 | int setup_irq_smtc(unsigned int irq, struct irqaction * new, | |
608 | unsigned long hwmask) | |
609 | { | |
610 | irq_hwmask[irq] = hwmask; | |
611 | ||
612 | return setup_irq(irq, new); | |
613 | } | |
614 | ||
615 | /* | |
616 | * IPI model for SMTC is tricky, because interrupts aren't TC-specific. | |
617 | * Within a VPE one TC can interrupt another by different approaches. | |
618 | * The easiest to get right would probably be to make all TCs except | |
619 | * the target IXMT and set a software interrupt, but an IXMT-based | |
620 | * scheme requires that a handler must run before a new IPI could | |
621 | * be sent, which would break the "broadcast" loops in MIPS MT. | |
622 | * A more gonzo approach within a VPE is to halt the TC, extract | |
623 | * its Restart, Status, and a couple of GPRs, and program the Restart | |
624 | * address to emulate an interrupt. | |
625 | * | |
626 | * Within a VPE, one can be confident that the target TC isn't in | |
627 | * a critical EXL state when halted, since the write to the Halt | |
628 | * register could not have issued on the writing thread if the | |
629 | * halting thread had EXL set. So k0 and k1 of the target TC | |
630 | * can be used by the injection code. Across VPEs, one can't | |
631 | * be certain that the target TC isn't in a critical exception | |
632 | * state. So we try a two-step process of sending a software | |
633 | * interrupt to the target VPE, which either handles the event | |
634 | * itself (if it was the target) or injects the event within | |
635 | * the VPE. | |
636 | */ | |
637 | ||
5868756d | 638 | static void smtc_ipi_qdump(void) |
41c594ab RB |
639 | { |
640 | int i; | |
641 | ||
642 | for (i = 0; i < NR_CPUS ;i++) { | |
643 | printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", | |
644 | i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, | |
645 | IPIQ[i].depth); | |
646 | } | |
647 | } | |
648 | ||
649 | /* | |
650 | * The standard atomic.h primitives don't quite do what we want | |
651 | * here: We need an atomic add-and-return-previous-value (which | |
652 | * could be done with atomic_add_return and a decrement) and an | |
653 | * atomic set/zero-and-return-previous-value (which can't really | |
654 | * be done with the atomic.h primitives). And since this is | |
655 | * MIPS MT, we can assume that we have LL/SC. | |
656 | */ | |
657 | static __inline__ int atomic_postincrement(unsigned int *pv) | |
658 | { | |
659 | unsigned long result; | |
660 | ||
661 | unsigned long temp; | |
662 | ||
663 | __asm__ __volatile__( | |
664 | "1: ll %0, %2 \n" | |
665 | " addu %1, %0, 1 \n" | |
666 | " sc %1, %2 \n" | |
667 | " beqz %1, 1b \n" | |
668 | " sync \n" | |
669 | : "=&r" (result), "=&r" (temp), "=m" (*pv) | |
670 | : "m" (*pv) | |
671 | : "memory"); | |
672 | ||
673 | return result; | |
674 | } | |
675 | ||
41c594ab RB |
676 | void smtc_send_ipi(int cpu, int type, unsigned int action) |
677 | { | |
678 | int tcstatus; | |
679 | struct smtc_ipi *pipi; | |
680 | long flags; | |
681 | int mtflags; | |
682 | ||
683 | if (cpu == smp_processor_id()) { | |
684 | printk("Cannot Send IPI to self!\n"); | |
685 | return; | |
686 | } | |
687 | /* Set up a descriptor, to be delivered either promptly or queued */ | |
688 | pipi = smtc_ipi_dq(&freeIPIq); | |
689 | if (pipi == NULL) { | |
690 | bust_spinlocks(1); | |
691 | mips_mt_regdump(dvpe()); | |
692 | panic("IPI Msg. Buffers Depleted\n"); | |
693 | } | |
694 | pipi->type = type; | |
695 | pipi->arg = (void *)action; | |
696 | pipi->dest = cpu; | |
697 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | |
698 | /* If not on same VPE, enqueue and send cross-VPE interupt */ | |
699 | smtc_ipi_nq(&IPIQ[cpu], pipi); | |
700 | LOCK_CORE_PRA(); | |
701 | settc(cpu_data[cpu].tc_id); | |
702 | write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); | |
703 | UNLOCK_CORE_PRA(); | |
704 | } else { | |
705 | /* | |
706 | * Not sufficient to do a LOCK_MT_PRA (dmt) here, | |
707 | * since ASID shootdown on the other VPE may | |
708 | * collide with this operation. | |
709 | */ | |
710 | LOCK_CORE_PRA(); | |
711 | settc(cpu_data[cpu].tc_id); | |
712 | /* Halt the targeted TC */ | |
713 | write_tc_c0_tchalt(TCHALT_H); | |
714 | mips_ihb(); | |
715 | ||
716 | /* | |
717 | * Inspect TCStatus - if IXMT is set, we have to queue | |
718 | * a message. Otherwise, we set up the "interrupt" | |
719 | * of the other TC | |
720 | */ | |
721 | tcstatus = read_tc_c0_tcstatus(); | |
722 | ||
723 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | |
724 | /* | |
725 | * Spin-waiting here can deadlock, | |
726 | * so we queue the message for the target TC. | |
727 | */ | |
728 | write_tc_c0_tchalt(0); | |
729 | UNLOCK_CORE_PRA(); | |
730 | /* Try to reduce redundant timer interrupt messages */ | |
4bf42d42 RB |
731 | if (type == SMTC_CLOCK_TICK) { |
732 | if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ | |
41c594ab RB |
733 | smtc_ipi_nq(&freeIPIq, pipi); |
734 | return; | |
735 | } | |
736 | } | |
737 | smtc_ipi_nq(&IPIQ[cpu], pipi); | |
738 | } else { | |
739 | post_direct_ipi(cpu, pipi); | |
740 | write_tc_c0_tchalt(0); | |
741 | UNLOCK_CORE_PRA(); | |
742 | } | |
743 | } | |
744 | } | |
745 | ||
746 | /* | |
747 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set | |
748 | */ | |
5868756d | 749 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) |
41c594ab RB |
750 | { |
751 | struct pt_regs *kstack; | |
752 | unsigned long tcstatus; | |
753 | unsigned long tcrestart; | |
754 | extern u32 kernelsp[NR_CPUS]; | |
755 | extern void __smtc_ipi_vector(void); | |
756 | ||
757 | /* Extract Status, EPC from halted TC */ | |
758 | tcstatus = read_tc_c0_tcstatus(); | |
759 | tcrestart = read_tc_c0_tcrestart(); | |
760 | /* If TCRestart indicates a WAIT instruction, advance the PC */ | |
761 | if ((tcrestart & 0x80000000) | |
762 | && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { | |
763 | tcrestart += 4; | |
764 | } | |
765 | /* | |
766 | * Save on TC's future kernel stack | |
767 | * | |
768 | * CU bit of Status is indicator that TC was | |
769 | * already running on a kernel stack... | |
770 | */ | |
4bf42d42 | 771 | if (tcstatus & ST0_CU0) { |
41c594ab RB |
772 | /* Note that this "- 1" is pointer arithmetic */ |
773 | kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; | |
774 | } else { | |
775 | kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; | |
776 | } | |
777 | ||
778 | kstack->cp0_epc = (long)tcrestart; | |
779 | /* Save TCStatus */ | |
780 | kstack->cp0_tcstatus = tcstatus; | |
781 | /* Pass token of operation to be performed kernel stack pad area */ | |
782 | kstack->pad0[4] = (unsigned long)pipi; | |
783 | /* Pass address of function to be called likewise */ | |
784 | kstack->pad0[5] = (unsigned long)&ipi_decode; | |
785 | /* Set interrupt exempt and kernel mode */ | |
786 | tcstatus |= TCSTATUS_IXMT; | |
787 | tcstatus &= ~TCSTATUS_TKSU; | |
788 | write_tc_c0_tcstatus(tcstatus); | |
789 | ehb(); | |
790 | /* Set TC Restart address to be SMTC IPI vector */ | |
791 | write_tc_c0_tcrestart(__smtc_ipi_vector); | |
792 | } | |
793 | ||
937a8015 | 794 | static void ipi_resched_interrupt(void) |
41c594ab RB |
795 | { |
796 | /* Return from interrupt should be enough to cause scheduler check */ | |
797 | } | |
798 | ||
799 | ||
937a8015 | 800 | static void ipi_call_interrupt(void) |
41c594ab RB |
801 | { |
802 | /* Invoke generic function invocation code in smp.c */ | |
803 | smp_call_function_interrupt(); | |
804 | } | |
805 | ||
937a8015 | 806 | void ipi_decode(struct smtc_ipi *pipi) |
41c594ab RB |
807 | { |
808 | void *arg_copy = pipi->arg; | |
809 | int type_copy = pipi->type; | |
810 | int dest_copy = pipi->dest; | |
811 | ||
812 | smtc_ipi_nq(&freeIPIq, pipi); | |
813 | switch (type_copy) { | |
4bf42d42 RB |
814 | case SMTC_CLOCK_TICK: |
815 | /* Invoke Clock "Interrupt" */ | |
816 | ipi_timer_latch[dest_copy] = 0; | |
c68644d3 | 817 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
4bf42d42 | 818 | clock_hang_reported[dest_copy] = 0; |
c68644d3 | 819 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
937a8015 | 820 | local_timer_interrupt(0, NULL); |
4bf42d42 RB |
821 | break; |
822 | case LINUX_SMP_IPI: | |
823 | switch ((int)arg_copy) { | |
824 | case SMP_RESCHEDULE_YOURSELF: | |
937a8015 | 825 | ipi_resched_interrupt(); |
41c594ab | 826 | break; |
4bf42d42 | 827 | case SMP_CALL_FUNCTION: |
937a8015 | 828 | ipi_call_interrupt(); |
41c594ab RB |
829 | break; |
830 | default: | |
4bf42d42 RB |
831 | printk("Impossible SMTC IPI Argument 0x%x\n", |
832 | (int)arg_copy); | |
41c594ab | 833 | break; |
4bf42d42 RB |
834 | } |
835 | break; | |
836 | default: | |
837 | printk("Impossible SMTC IPI Type 0x%x\n", type_copy); | |
838 | break; | |
41c594ab RB |
839 | } |
840 | } | |
841 | ||
937a8015 | 842 | void deferred_smtc_ipi(void) |
41c594ab RB |
843 | { |
844 | struct smtc_ipi *pipi; | |
845 | unsigned long flags; | |
846 | /* DEBUG */ | |
847 | int q = smp_processor_id(); | |
848 | ||
849 | /* | |
850 | * Test is not atomic, but much faster than a dequeue, | |
851 | * and the vast majority of invocations will have a null queue. | |
852 | */ | |
4bf42d42 | 853 | if (IPIQ[q].head != NULL) { |
41c594ab RB |
854 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { |
855 | /* ipi_decode() should be called with interrupts off */ | |
856 | local_irq_save(flags); | |
937a8015 | 857 | ipi_decode(pipi); |
41c594ab RB |
858 | local_irq_restore(flags); |
859 | } | |
860 | } | |
861 | } | |
862 | ||
863 | /* | |
864 | * Send clock tick to all TCs except the one executing the funtion | |
865 | */ | |
866 | ||
867 | void smtc_timer_broadcast(int vpe) | |
868 | { | |
869 | int cpu; | |
870 | int myTC = cpu_data[smp_processor_id()].tc_id; | |
871 | int myVPE = cpu_data[smp_processor_id()].vpe_id; | |
872 | ||
873 | smtc_cpu_stats[smp_processor_id()].timerints++; | |
874 | ||
875 | for_each_online_cpu(cpu) { | |
876 | if (cpu_data[cpu].vpe_id == myVPE && | |
877 | cpu_data[cpu].tc_id != myTC) | |
878 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | |
879 | } | |
880 | } | |
881 | ||
882 | /* | |
883 | * Cross-VPE interrupts in the SMTC prototype use "software interrupts" | |
884 | * set via cross-VPE MTTR manipulation of the Cause register. It would be | |
885 | * in some regards preferable to have external logic for "doorbell" hardware | |
886 | * interrupts. | |
887 | */ | |
888 | ||
97dcb82d | 889 | static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; |
41c594ab | 890 | |
937a8015 | 891 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm) |
41c594ab RB |
892 | { |
893 | int my_vpe = cpu_data[smp_processor_id()].vpe_id; | |
894 | int my_tc = cpu_data[smp_processor_id()].tc_id; | |
895 | int cpu; | |
896 | struct smtc_ipi *pipi; | |
897 | unsigned long tcstatus; | |
898 | int sent; | |
899 | long flags; | |
900 | unsigned int mtflags; | |
901 | unsigned int vpflags; | |
902 | ||
903 | /* | |
904 | * So long as cross-VPE interrupts are done via | |
905 | * MFTR/MTTR read-modify-writes of Cause, we need | |
906 | * to stop other VPEs whenever the local VPE does | |
907 | * anything similar. | |
908 | */ | |
909 | local_irq_save(flags); | |
910 | vpflags = dvpe(); | |
911 | clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); | |
912 | set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); | |
913 | irq_enable_hazard(); | |
914 | evpe(vpflags); | |
915 | local_irq_restore(flags); | |
916 | ||
917 | /* | |
918 | * Cross-VPE Interrupt handler: Try to directly deliver IPIs | |
919 | * queued for TCs on this VPE other than the current one. | |
920 | * Return-from-interrupt should cause us to drain the queue | |
921 | * for the current TC, so we ought not to have to do it explicitly here. | |
922 | */ | |
923 | ||
924 | for_each_online_cpu(cpu) { | |
925 | if (cpu_data[cpu].vpe_id != my_vpe) | |
926 | continue; | |
927 | ||
928 | pipi = smtc_ipi_dq(&IPIQ[cpu]); | |
929 | if (pipi != NULL) { | |
930 | if (cpu_data[cpu].tc_id != my_tc) { | |
931 | sent = 0; | |
932 | LOCK_MT_PRA(); | |
933 | settc(cpu_data[cpu].tc_id); | |
934 | write_tc_c0_tchalt(TCHALT_H); | |
935 | mips_ihb(); | |
936 | tcstatus = read_tc_c0_tcstatus(); | |
937 | if ((tcstatus & TCSTATUS_IXMT) == 0) { | |
938 | post_direct_ipi(cpu, pipi); | |
939 | sent = 1; | |
940 | } | |
941 | write_tc_c0_tchalt(0); | |
942 | UNLOCK_MT_PRA(); | |
943 | if (!sent) { | |
944 | smtc_ipi_req(&IPIQ[cpu], pipi); | |
945 | } | |
946 | } else { | |
947 | /* | |
948 | * ipi_decode() should be called | |
949 | * with interrupts off | |
950 | */ | |
951 | local_irq_save(flags); | |
937a8015 | 952 | ipi_decode(pipi); |
41c594ab RB |
953 | local_irq_restore(flags); |
954 | } | |
955 | } | |
956 | } | |
957 | ||
958 | return IRQ_HANDLED; | |
959 | } | |
960 | ||
937a8015 | 961 | static void ipi_irq_dispatch(void) |
41c594ab | 962 | { |
937a8015 | 963 | do_IRQ(cpu_ipi_irq); |
41c594ab RB |
964 | } |
965 | ||
966 | static struct irqaction irq_ipi; | |
967 | ||
5868756d | 968 | static void setup_cross_vpe_interrupts(void) |
41c594ab RB |
969 | { |
970 | if (!cpu_has_vint) | |
971 | panic("SMTC Kernel requires Vectored Interupt support"); | |
972 | ||
973 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); | |
974 | ||
975 | irq_ipi.handler = ipi_interrupt; | |
f40298fd | 976 | irq_ipi.flags = IRQF_DISABLED; |
41c594ab RB |
977 | irq_ipi.name = "SMTC_IPI"; |
978 | ||
979 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); | |
980 | ||
981 | irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU; | |
1417836e | 982 | set_irq_handler(cpu_ipi_irq, handle_percpu_irq); |
41c594ab RB |
983 | } |
984 | ||
985 | /* | |
986 | * SMTC-specific hacks invoked from elsewhere in the kernel. | |
987 | */ | |
988 | ||
ac8be955 RB |
989 | void smtc_ipi_replay(void) |
990 | { | |
991 | /* | |
992 | * To the extent that we've ever turned interrupts off, | |
993 | * we may have accumulated deferred IPIs. This is subtle. | |
994 | * If we use the smtc_ipi_qdepth() macro, we'll get an | |
995 | * exact number - but we'll also disable interrupts | |
996 | * and create a window of failure where a new IPI gets | |
997 | * queued after we test the depth but before we re-enable | |
998 | * interrupts. So long as IXMT never gets set, however, | |
999 | * we should be OK: If we pick up something and dispatch | |
1000 | * it here, that's great. If we see nothing, but concurrent | |
1001 | * with this operation, another TC sends us an IPI, IXMT | |
1002 | * is clear, and we'll handle it as a real pseudo-interrupt | |
1003 | * and not a pseudo-pseudo interrupt. | |
1004 | */ | |
1005 | if (IPIQ[smp_processor_id()].depth > 0) { | |
1006 | struct smtc_ipi *pipi; | |
1007 | extern void self_ipi(struct smtc_ipi *); | |
1008 | ||
1009 | while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) { | |
1010 | self_ipi(pipi); | |
1011 | smtc_cpu_stats[smp_processor_id()].selfipis++; | |
1012 | } | |
1013 | } | |
1014 | } | |
1015 | ||
ec43c014 RB |
1016 | EXPORT_SYMBOL(smtc_ipi_replay); |
1017 | ||
41c594ab RB |
1018 | void smtc_idle_loop_hook(void) |
1019 | { | |
c68644d3 | 1020 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG |
41c594ab RB |
1021 | int im; |
1022 | int flags; | |
1023 | int mtflags; | |
1024 | int bit; | |
1025 | int vpe; | |
1026 | int tc; | |
1027 | int hook_ntcs; | |
1028 | /* | |
1029 | * printk within DMT-protected regions can deadlock, | |
1030 | * so buffer diagnostic messages for later output. | |
1031 | */ | |
1032 | char *pdb_msg; | |
1033 | char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ | |
1034 | ||
1035 | if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ | |
1036 | if (atomic_add_return(1, &idle_hook_initialized) == 1) { | |
1037 | int mvpconf0; | |
1038 | /* Tedious stuff to just do once */ | |
1039 | mvpconf0 = read_c0_mvpconf0(); | |
1040 | hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | |
1041 | if (hook_ntcs > NR_CPUS) | |
1042 | hook_ntcs = NR_CPUS; | |
1043 | for (tc = 0; tc < hook_ntcs; tc++) { | |
1044 | tcnoprog[tc] = 0; | |
1045 | clock_hang_reported[tc] = 0; | |
1046 | } | |
1047 | for (vpe = 0; vpe < 2; vpe++) | |
1048 | for (im = 0; im < 8; im++) | |
1049 | imstuckcount[vpe][im] = 0; | |
1050 | printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); | |
1051 | atomic_set(&idle_hook_initialized, 1000); | |
1052 | } else { | |
1053 | /* Someone else is initializing in parallel - let 'em finish */ | |
1054 | while (atomic_read(&idle_hook_initialized) < 1000) | |
1055 | ; | |
1056 | } | |
1057 | } | |
1058 | ||
1059 | /* Have we stupidly left IXMT set somewhere? */ | |
1060 | if (read_c0_tcstatus() & 0x400) { | |
1061 | write_c0_tcstatus(read_c0_tcstatus() & ~0x400); | |
1062 | ehb(); | |
1063 | printk("Dangling IXMT in cpu_idle()\n"); | |
1064 | } | |
1065 | ||
1066 | /* Have we stupidly left an IM bit turned off? */ | |
1067 | #define IM_LIMIT 2000 | |
1068 | local_irq_save(flags); | |
1069 | mtflags = dmt(); | |
1070 | pdb_msg = &id_ho_db_msg[0]; | |
1071 | im = read_c0_status(); | |
1072 | vpe = cpu_data[smp_processor_id()].vpe_id; | |
1073 | for (bit = 0; bit < 8; bit++) { | |
1074 | /* | |
1075 | * In current prototype, I/O interrupts | |
1076 | * are masked for VPE > 0 | |
1077 | */ | |
1078 | if (vpemask[vpe][bit]) { | |
1079 | if (!(im & (0x100 << bit))) | |
1080 | imstuckcount[vpe][bit]++; | |
1081 | else | |
1082 | imstuckcount[vpe][bit] = 0; | |
1083 | if (imstuckcount[vpe][bit] > IM_LIMIT) { | |
1084 | set_c0_status(0x100 << bit); | |
1085 | ehb(); | |
1086 | imstuckcount[vpe][bit] = 0; | |
1087 | pdb_msg += sprintf(pdb_msg, | |
1088 | "Dangling IM %d fixed for VPE %d\n", bit, | |
1089 | vpe); | |
1090 | } | |
1091 | } | |
1092 | } | |
1093 | ||
1094 | /* | |
1095 | * Now that we limit outstanding timer IPIs, check for hung TC | |
1096 | */ | |
1097 | for (tc = 0; tc < NR_CPUS; tc++) { | |
1098 | /* Don't check ourself - we'll dequeue IPIs just below */ | |
1099 | if ((tc != smp_processor_id()) && | |
1100 | ipi_timer_latch[tc] > timerq_limit) { | |
1101 | if (clock_hang_reported[tc] == 0) { | |
1102 | pdb_msg += sprintf(pdb_msg, | |
1103 | "TC %d looks hung with timer latch at %d\n", | |
1104 | tc, ipi_timer_latch[tc]); | |
1105 | clock_hang_reported[tc]++; | |
1106 | } | |
1107 | } | |
1108 | } | |
1109 | emt(mtflags); | |
1110 | local_irq_restore(flags); | |
1111 | if (pdb_msg != &id_ho_db_msg[0]) | |
1112 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | |
c68644d3 | 1113 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
ac8be955 | 1114 | |
41c594ab | 1115 | /* |
ac8be955 RB |
1116 | * Replay any accumulated deferred IPIs. If "Instant Replay" |
1117 | * is in use, there should never be any. | |
41c594ab | 1118 | */ |
ac8be955 RB |
1119 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY |
1120 | smtc_ipi_replay(); | |
1121 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | |
41c594ab RB |
1122 | } |
1123 | ||
1124 | void smtc_soft_dump(void) | |
1125 | { | |
1126 | int i; | |
1127 | ||
1128 | printk("Counter Interrupts taken per CPU (TC)\n"); | |
1129 | for (i=0; i < NR_CPUS; i++) { | |
1130 | printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); | |
1131 | } | |
1132 | printk("Self-IPI invocations:\n"); | |
1133 | for (i=0; i < NR_CPUS; i++) { | |
1134 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | |
1135 | } | |
1136 | smtc_ipi_qdump(); | |
1137 | printk("Timer IPI Backlogs:\n"); | |
1138 | for (i=0; i < NR_CPUS; i++) { | |
1139 | printk("%d: %d\n", i, ipi_timer_latch[i]); | |
1140 | } | |
1141 | printk("%d Recoveries of \"stolen\" FPU\n", | |
1142 | atomic_read(&smtc_fpu_recoveries)); | |
1143 | } | |
1144 | ||
1145 | ||
1146 | /* | |
1147 | * TLB management routines special to SMTC | |
1148 | */ | |
1149 | ||
1150 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |
1151 | { | |
1152 | unsigned long flags, mtflags, tcstat, prevhalt, asid; | |
1153 | int tlb, i; | |
1154 | ||
1155 | /* | |
1156 | * It would be nice to be able to use a spinlock here, | |
1157 | * but this is invoked from within TLB flush routines | |
1158 | * that protect themselves with DVPE, so if a lock is | |
e0daad44 | 1159 | * held by another TC, it'll never be freed. |
41c594ab RB |
1160 | * |
1161 | * DVPE/DMT must not be done with interrupts enabled, | |
1162 | * so even so most callers will already have disabled | |
1163 | * them, let's be really careful... | |
1164 | */ | |
1165 | ||
1166 | local_irq_save(flags); | |
1167 | if (smtc_status & SMTC_TLB_SHARED) { | |
1168 | mtflags = dvpe(); | |
1169 | tlb = 0; | |
1170 | } else { | |
1171 | mtflags = dmt(); | |
1172 | tlb = cpu_data[cpu].vpe_id; | |
1173 | } | |
1174 | asid = asid_cache(cpu); | |
1175 | ||
1176 | do { | |
1177 | if (!((asid += ASID_INC) & ASID_MASK) ) { | |
1178 | if (cpu_has_vtag_icache) | |
1179 | flush_icache_all(); | |
1180 | /* Traverse all online CPUs (hack requires contigous range) */ | |
1181 | for (i = 0; i < num_online_cpus(); i++) { | |
1182 | /* | |
1183 | * We don't need to worry about our own CPU, nor those of | |
1184 | * CPUs who don't share our TLB. | |
1185 | */ | |
1186 | if ((i != smp_processor_id()) && | |
1187 | ((smtc_status & SMTC_TLB_SHARED) || | |
1188 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { | |
1189 | settc(cpu_data[i].tc_id); | |
1190 | prevhalt = read_tc_c0_tchalt() & TCHALT_H; | |
1191 | if (!prevhalt) { | |
1192 | write_tc_c0_tchalt(TCHALT_H); | |
1193 | mips_ihb(); | |
1194 | } | |
1195 | tcstat = read_tc_c0_tcstatus(); | |
1196 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); | |
1197 | if (!prevhalt) | |
1198 | write_tc_c0_tchalt(0); | |
1199 | } | |
1200 | } | |
1201 | if (!asid) /* fix version if needed */ | |
1202 | asid = ASID_FIRST_VERSION; | |
1203 | local_flush_tlb_all(); /* start new asid cycle */ | |
1204 | } | |
1205 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); | |
1206 | ||
1207 | /* | |
1208 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | |
1209 | */ | |
1210 | for (i = 0; i < num_online_cpus(); i++) { | |
1211 | if ((smtc_status & SMTC_TLB_SHARED) || | |
1212 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | |
1213 | cpu_context(i, mm) = asid_cache(i) = asid; | |
1214 | } | |
1215 | ||
1216 | if (smtc_status & SMTC_TLB_SHARED) | |
1217 | evpe(mtflags); | |
1218 | else | |
1219 | emt(mtflags); | |
1220 | local_irq_restore(flags); | |
1221 | } | |
1222 | ||
1223 | /* | |
1224 | * Invoked from macros defined in mmu_context.h | |
1225 | * which must already have disabled interrupts | |
1226 | * and done a DVPE or DMT as appropriate. | |
1227 | */ | |
1228 | ||
1229 | void smtc_flush_tlb_asid(unsigned long asid) | |
1230 | { | |
1231 | int entry; | |
1232 | unsigned long ehi; | |
1233 | ||
1234 | entry = read_c0_wired(); | |
1235 | ||
1236 | /* Traverse all non-wired entries */ | |
1237 | while (entry < current_cpu_data.tlbsize) { | |
1238 | write_c0_index(entry); | |
1239 | ehb(); | |
1240 | tlb_read(); | |
1241 | ehb(); | |
1242 | ehi = read_c0_entryhi(); | |
4bf42d42 | 1243 | if ((ehi & ASID_MASK) == asid) { |
41c594ab RB |
1244 | /* |
1245 | * Invalidate only entries with specified ASID, | |
1246 | * makiing sure all entries differ. | |
1247 | */ | |
1248 | write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); | |
1249 | write_c0_entrylo0(0); | |
1250 | write_c0_entrylo1(0); | |
1251 | mtc0_tlbw_hazard(); | |
1252 | tlb_write_indexed(); | |
1253 | } | |
1254 | entry++; | |
1255 | } | |
1256 | write_c0_index(PARKED_INDEX); | |
1257 | tlbw_use_hazard(); | |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * Support for single-threading cache flush operations. | |
1262 | */ | |
1263 | ||
5868756d | 1264 | static int halt_state_save[NR_CPUS]; |
41c594ab RB |
1265 | |
1266 | /* | |
1267 | * To really, really be sure that nothing is being done | |
1268 | * by other TCs, halt them all. This code assumes that | |
1269 | * a DVPE has already been done, so while their Halted | |
1270 | * state is theoretically architecturally unstable, in | |
1271 | * practice, it's not going to change while we're looking | |
1272 | * at it. | |
1273 | */ | |
1274 | ||
1275 | void smtc_cflush_lockdown(void) | |
1276 | { | |
1277 | int cpu; | |
1278 | ||
1279 | for_each_online_cpu(cpu) { | |
1280 | if (cpu != smp_processor_id()) { | |
1281 | settc(cpu_data[cpu].tc_id); | |
1282 | halt_state_save[cpu] = read_tc_c0_tchalt(); | |
1283 | write_tc_c0_tchalt(TCHALT_H); | |
1284 | } | |
1285 | } | |
1286 | mips_ihb(); | |
1287 | } | |
1288 | ||
1289 | /* It would be cheating to change the cpu_online states during a flush! */ | |
1290 | ||
1291 | void smtc_cflush_release(void) | |
1292 | { | |
1293 | int cpu; | |
1294 | ||
1295 | /* | |
1296 | * Start with a hazard barrier to ensure | |
1297 | * that all CACHE ops have played through. | |
1298 | */ | |
1299 | mips_ihb(); | |
1300 | ||
1301 | for_each_online_cpu(cpu) { | |
1302 | if (cpu != smp_processor_id()) { | |
1303 | settc(cpu_data[cpu].tc_id); | |
1304 | write_tc_c0_tchalt(halt_state_save[cpu]); | |
1305 | } | |
1306 | } | |
1307 | mips_ihb(); | |
1308 | } |