1 // SPDX-License-Identifier: GPL-2.0-only
3 * TLB support routines.
5 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
9 * Modified RID allocation for SMP
11 * IPI based ptc implementation and A-step IPI implementation.
15 * Copyright (C) 2007 Intel Corp
17 * Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
25 #include <linux/memblock.h>
26 #include <linux/slab.h>
28 #include <asm/delay.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
34 #include <asm/processor.h>
39 u64 mask; /* mask of supported purge page-sizes */
40 unsigned long max_bits; /* log2 of largest supported purge page-size */
43 struct ia64_ctx ia64_ctx = {
44 .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
49 DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
50 DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
51 DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
53 struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
56 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
57 * Called after cpu_init() has setup ia64_ctx.max_ctx based on
58 * maximum RID that is supported by boot CPU.
61 mmu_context_init (void)
63 ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
66 panic("%s: Failed to allocate %u bytes\n", __func__,
67 (ia64_ctx.max_ctx + 1) >> 3);
68 ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
70 if (!ia64_ctx.flushmap)
71 panic("%s: Failed to allocate %u bytes\n", __func__,
72 (ia64_ctx.max_ctx + 1) >> 3);
76 * Acquire the ia64_ctx.lock before calling this function!
79 wrap_mmu_context (struct mm_struct *mm)
82 unsigned long flush_bit;
84 for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
85 flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
86 ia64_ctx.bitmap[i] ^= flush_bit;
89 /* use offset at 300 to skip daemons */
90 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
91 ia64_ctx.max_ctx, 300);
92 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
93 ia64_ctx.max_ctx, ia64_ctx.next);
96 * can't call flush_tlb_all() here because of race condition
97 * with O(1) scheduler [EF]
99 cpu = get_cpu(); /* prevent preemption/migration */
100 for_each_online_cpu(i)
102 per_cpu(ia64_need_tlb_flush, i) = 1;
104 local_flush_tlb_all();
108 * Implement "spinaphores" ... like counting semaphores, but they
109 * spin instead of sleeping. If there are ever any other users for
110 * this primitive it can be moved up to a spinaphore.h header.
113 unsigned long ticket;
117 static inline void spinaphore_init(struct spinaphore *ss, int val)
123 static inline void down_spin(struct spinaphore *ss)
125 unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
127 if (time_before(t, ss->serve))
133 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
134 if (time_before(t, serve))
140 static inline void up_spin(struct spinaphore *ss)
142 ia64_fetchadd(1, &ss->serve, rel);
145 static struct spinaphore ptcg_sem;
146 static u16 nptcg = 1;
147 static int need_ptcg_sem = 1;
148 static int toolatetochangeptcgsem = 0;
151 * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
152 * purges which is reported from either PAL or SAL PALO.
154 * We don't have sanity checking for nptcg value. It's the user's responsibility
155 * for valid nptcg value on the platform. Otherwise, kernel may hang in some
163 get_option(&str, &value);
164 setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
169 __setup("nptcg=", set_nptcg);
172 * Maximum number of simultaneous ptc.g purges in the system can
173 * be defined by PAL_VM_SUMMARY (in which case we should take
174 * the smallest value for any cpu in the system) or by the PAL
175 * override table (in which case we should ignore the value from
178 * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
179 * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
180 * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
182 * Complicating the logic here is the fact that num_possible_cpus()
183 * isn't fully setup until we start bringing cpus online.
186 setup_ptcg_sem(int max_purges, int nptcg_from)
188 static int kp_override;
189 static int palo_override;
190 static int firstcpu = 1;
192 if (toolatetochangeptcgsem) {
193 if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
196 BUG_ON(max_purges < nptcg);
200 if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
206 need_ptcg_sem = num_possible_cpus() > nptcg;
210 if (nptcg_from == NPTCG_FROM_PALO) {
213 /* In PALO max_purges == 0 really means it! */
215 panic("Whoa! Platform does not support global TLB purges.\n");
217 if (nptcg == PALO_MAX_TLB_PURGES) {
224 if (nptcg != PALO_MAX_TLB_PURGES)
225 need_ptcg_sem = (num_possible_cpus() > nptcg);
229 /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
230 if (max_purges == 0) max_purges = 1;
236 if (max_purges < nptcg)
238 if (nptcg == PAL_MAX_PURGES) {
242 need_ptcg_sem = (num_possible_cpus() > nptcg);
245 spinaphore_init(&ptcg_sem, max_purges);
250 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
251 unsigned long end, unsigned long nbits)
253 struct mm_struct *active_mm = current->active_mm;
255 toolatetochangeptcgsem = 1;
257 if (mm != active_mm) {
258 /* Restore region IDs for mm */
259 if (mm && active_mm) {
260 activate_context(mm);
268 down_spin(&ptcg_sem);
272 * Flush ALAT entries also.
274 ia64_ptcga(start, (nbits << 2));
276 start += (1UL << nbits);
277 } while (start < end);
282 if (mm != active_mm) {
283 activate_context(active_mm);
286 #endif /* CONFIG_SMP */
289 local_flush_tlb_all (void)
291 unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
293 addr = local_cpu_data->ptce_base;
294 count0 = local_cpu_data->ptce_count[0];
295 count1 = local_cpu_data->ptce_count[1];
296 stride0 = local_cpu_data->ptce_stride[0];
297 stride1 = local_cpu_data->ptce_stride[1];
299 local_irq_save(flags);
300 for (i = 0; i < count0; ++i) {
301 for (j = 0; j < count1; ++j) {
307 local_irq_restore(flags);
308 ia64_srlz_i(); /* srlz.i implies srlz.d */
312 __flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
315 struct mm_struct *mm = vma->vm_mm;
316 unsigned long size = end - start;
320 if (mm != current->active_mm) {
326 nbits = ia64_fls(size + 0xfff);
327 while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
328 (nbits < purge.max_bits))
330 if (nbits > purge.max_bits)
331 nbits = purge.max_bits;
332 start &= ~((1UL << nbits) - 1);
336 if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
337 ia64_global_tlb_purge(mm, start, end, nbits);
343 ia64_ptcl(start, (nbits<<2));
344 start += (1UL << nbits);
345 } while (start < end);
347 ia64_srlz_i(); /* srlz.i implies srlz.d */
350 void flush_tlb_range(struct vm_area_struct *vma,
351 unsigned long start, unsigned long end)
353 if (unlikely(end - start >= 1024*1024*1024*1024UL
354 || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
356 * If we flush more than a tera-byte or across regions, we're
357 * probably better off just flushing the entire TLB(s). This
358 * should be very rare and is not worth optimizing for.
362 /* flush the address range from the tlb */
363 __flush_tlb_range(vma, start, end);
364 /* flush the virt. page-table area mapping the addr range */
365 __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
368 EXPORT_SYMBOL(flush_tlb_range);
370 void ia64_tlb_init(void)
372 ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
375 pal_vm_info_1_u_t vm_info_1;
376 pal_vm_info_2_u_t vm_info_2;
377 int cpu = smp_processor_id();
379 if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
380 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
381 "defaulting to architected purge page-sizes.\n", status);
382 purge.mask = 0x115557000UL;
384 purge.max_bits = ia64_fls(purge.mask);
386 ia64_get_ptce(&ptce_info);
387 local_cpu_data->ptce_base = ptce_info.base;
388 local_cpu_data->ptce_count[0] = ptce_info.count[0];
389 local_cpu_data->ptce_count[1] = ptce_info.count[1];
390 local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
391 local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
393 local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
394 status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
397 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
398 per_cpu(ia64_tr_num, cpu) = 8;
401 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
402 if (per_cpu(ia64_tr_num, cpu) >
403 (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
404 per_cpu(ia64_tr_num, cpu) =
405 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
406 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
407 static int justonce = 1;
408 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
411 printk(KERN_DEBUG "TR register number exceeds "
412 "IA64_TR_ALLOC_MAX!\n");
420 * Check overlap with inserted TRs.
422 static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
426 u64 va_rr = ia64_get_rr(va);
427 u64 va_rid = RR_TO_RID(va_rr);
428 u64 va_end = va + (1<<log_size) - 1;
430 if (va_rid != RR_TO_RID(p->rr))
432 tr_log_size = (p->itir & 0xff) >> 2;
433 tr_end = p->ifa + (1<<tr_log_size) - 1;
435 if (va > tr_end || p->ifa > va_end)
442 * ia64_insert_tr in virtual mode. Allocate a TR slot
444 * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
446 * va : virtual address.
447 * pte : pte entries inserted.
448 * log_size: range to be covered.
450 * Return value: <0 : error No.
452 * >=0 : slot number allocated for TR.
453 * Must be called with preemption disabled.
455 int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
459 struct ia64_tr_entry *p;
460 int cpu = smp_processor_id();
462 if (!ia64_idtrs[cpu]) {
463 ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
464 sizeof(struct ia64_tr_entry),
466 if (!ia64_idtrs[cpu])
470 /*Check overlap with existing TR entries*/
471 if (target_mask & 0x1) {
473 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
476 if (is_tr_overlap(p, va, log_size)) {
477 printk(KERN_DEBUG "Overlapped Entry"
478 "Inserted for TR Register!!\n");
483 if (target_mask & 0x2) {
484 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
485 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
488 if (is_tr_overlap(p, va, log_size)) {
489 printk(KERN_DEBUG "Overlapped Entry"
490 "Inserted for TR Register!!\n");
496 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
497 switch (target_mask & 0x3) {
499 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
503 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
507 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
508 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
517 if (i >= per_cpu(ia64_tr_num, cpu))
520 /*Record tr info for mca hander use!*/
521 if (i > per_cpu(ia64_tr_used, cpu))
522 per_cpu(ia64_tr_used, cpu) = i;
524 psr = ia64_clear_ic();
525 if (target_mask & 0x1) {
526 ia64_itr(0x1, i, va, pte, log_size);
528 p = ia64_idtrs[cpu] + i;
531 p->itir = log_size << 2;
532 p->rr = ia64_get_rr(va);
534 if (target_mask & 0x2) {
535 ia64_itr(0x2, i, va, pte, log_size);
537 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
540 p->itir = log_size << 2;
541 p->rr = ia64_get_rr(va);
548 EXPORT_SYMBOL_GPL(ia64_itr_entry);
553 * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
554 * slot: slot number to be freed.
556 * Must be called with preemption disabled.
558 void ia64_ptr_entry(u64 target_mask, int slot)
560 int cpu = smp_processor_id();
562 struct ia64_tr_entry *p;
564 if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
567 if (target_mask & 0x1) {
568 p = ia64_idtrs[cpu] + slot;
569 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
571 ia64_ptr(0x1, p->ifa, p->itir>>2);
576 if (target_mask & 0x2) {
577 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
578 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
580 ia64_ptr(0x2, p->ifa, p->itir>>2);
585 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
586 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
587 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
590 per_cpu(ia64_tr_used, cpu) = i;
592 EXPORT_SYMBOL_GPL(ia64_ptr_entry);