2 * native hashtable management.
4 * SMP scalability work:
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
21 #include <asm/machdep.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
27 #include <asm/cputable.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
35 #define DBG_LOW(fmt...)
38 #define HPTE_LOCK_BIT 3
40 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
42 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
49 * We need 14 to 65 bits of va for a tlibe of 4K page
50 * With vpn we ignore the lower VPN_SHIFT bits already.
51 * And top two bits are already ignored because we can
52 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
55 va = vpn << VPN_SHIFT;
57 * clear top 16 bits of 64bit va, non SLS segment
58 * Older versions of the architecture (2.02 and earler) require the
59 * masking of the top 16 bits.
61 va &= ~(0xffffULL << 48);
65 /* clear out bits after (52) [0....52.....63] */
66 va &= ~((1ul << (64 - 52)) - 1);
68 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
69 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
71 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
72 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
76 /* We need 14 to 14 + i bits of va */
77 penc = mmu_psize_defs[psize].penc[apsize];
78 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
82 if (psize != apsize) {
84 * MPSS, 64K base page size and 16MB parge page size
85 * We don't need all the bits, but rest of the bits
86 * must be ignored by the processor.
87 * vpn cover upto 65 bits of va. (0...65) and we need
93 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
94 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
100 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
106 /* VPN_SHIFT can be atmost 12 */
107 va = vpn << VPN_SHIFT;
109 * clear top 16 bits of 64 bit va, non SLS segment
110 * Older versions of the architecture (2.02 and earler) require the
111 * masking of the top 16 bits.
113 va &= ~(0xffffULL << 48);
117 /* clear out bits after(52) [0....52.....63] */
118 va &= ~((1ul << (64 - 52)) - 1);
120 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
121 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
123 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
124 : : "r"(va) : "memory");
127 /* We need 14 to 14 + i bits of va */
128 penc = mmu_psize_defs[psize].penc[apsize];
129 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
133 if (psize != apsize) {
135 * MPSS, 64K base page size and 16MB parge page size
136 * We don't need all the bits, but rest of the bits
137 * must be ignored by the processor.
138 * vpn cover upto 65 bits of va. (0...65) and we need
144 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
145 : : "r"(va) : "memory");
151 static inline void tlbie(unsigned long vpn, int psize, int apsize,
152 int ssize, int local)
154 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
155 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
158 use_local = mmu_psize_defs[psize].tlbiel;
159 if (lock_tlbie && !use_local)
160 raw_spin_lock(&native_tlbie_lock);
161 asm volatile("ptesync": : :"memory");
163 __tlbiel(vpn, psize, apsize, ssize);
164 asm volatile("ptesync": : :"memory");
166 __tlbie(vpn, psize, apsize, ssize);
167 asm volatile("eieio; tlbsync; ptesync": : :"memory");
169 if (lock_tlbie && !use_local)
170 raw_spin_unlock(&native_tlbie_lock);
173 static inline void native_lock_hpte(struct hash_pte *hptep)
175 unsigned long *word = &hptep->v;
178 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
180 while(test_bit(HPTE_LOCK_BIT, word))
185 static inline void native_unlock_hpte(struct hash_pte *hptep)
187 unsigned long *word = &hptep->v;
189 clear_bit_unlock(HPTE_LOCK_BIT, word);
192 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
193 unsigned long pa, unsigned long rflags,
194 unsigned long vflags, int psize, int apsize, int ssize)
196 struct hash_pte *hptep = htab_address + hpte_group;
197 unsigned long hpte_v, hpte_r;
200 if (!(vflags & HPTE_V_BOLTED)) {
201 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
202 " rflags=%lx, vflags=%lx, psize=%d)\n",
203 hpte_group, vpn, pa, rflags, vflags, psize);
206 for (i = 0; i < HPTES_PER_GROUP; i++) {
207 if (! (hptep->v & HPTE_V_VALID)) {
208 /* retry with lock held */
209 native_lock_hpte(hptep);
210 if (! (hptep->v & HPTE_V_VALID))
212 native_unlock_hpte(hptep);
218 if (i == HPTES_PER_GROUP)
221 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
222 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
224 if (!(vflags & HPTE_V_BOLTED)) {
225 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
230 /* Guarantee the second dword is visible before the valid bit */
233 * Now set the first dword including the valid bit
234 * NOTE: this also unlocks the hpte
238 __asm__ __volatile__ ("ptesync" : : : "memory");
240 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
243 static long native_hpte_remove(unsigned long hpte_group)
245 struct hash_pte *hptep;
248 unsigned long hpte_v;
250 DBG_LOW(" remove(group=%lx)\n", hpte_group);
252 /* pick a random entry to start at */
253 slot_offset = mftb() & 0x7;
255 for (i = 0; i < HPTES_PER_GROUP; i++) {
256 hptep = htab_address + hpte_group + slot_offset;
259 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
260 /* retry with lock held */
261 native_lock_hpte(hptep);
263 if ((hpte_v & HPTE_V_VALID)
264 && !(hpte_v & HPTE_V_BOLTED))
266 native_unlock_hpte(hptep);
273 if (i == HPTES_PER_GROUP)
276 /* Invalidate the hpte. NOTE: this also unlocks it */
282 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
283 unsigned long vpn, int bpsize,
284 int apsize, int ssize, int local)
286 struct hash_pte *hptep = htab_address + slot;
287 unsigned long hpte_v, want_v;
290 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
292 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
293 vpn, want_v & HPTE_V_AVPN, slot, newpp);
295 native_lock_hpte(hptep);
299 * We need to invalidate the TLB always because hpte_remove doesn't do
300 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
301 * random entry from it. When we do that we don't invalidate the TLB
302 * (hpte_remove) because we assume the old translation is still
303 * technically "valid".
305 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
306 DBG_LOW(" -> miss\n");
309 DBG_LOW(" -> hit\n");
310 /* Update the HPTE */
311 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
312 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
314 native_unlock_hpte(hptep);
316 /* Ensure it is out of the tlb too. */
317 tlbie(vpn, bpsize, apsize, ssize, local);
322 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
324 struct hash_pte *hptep;
328 unsigned long want_v, hpte_v;
330 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
331 want_v = hpte_encode_avpn(vpn, psize, ssize);
333 /* Bolted mappings are only ever in the primary group */
334 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
335 for (i = 0; i < HPTES_PER_GROUP; i++) {
336 hptep = htab_address + slot;
339 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
349 * Update the page protection bits. Intended to be used to create
350 * guard pages for kernel data structures on pages which are bolted
351 * in the HPT. Assumes pages being operated on will not be stolen.
353 * No need to lock here because we should be the only user.
355 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
356 int psize, int ssize)
361 struct hash_pte *hptep;
363 vsid = get_kernel_vsid(ea, ssize);
364 vpn = hpt_vpn(ea, vsid, ssize);
366 slot = native_hpte_find(vpn, psize, ssize);
368 panic("could not find page to bolt\n");
369 hptep = htab_address + slot;
371 /* Update the HPTE */
372 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
373 (newpp & (HPTE_R_PP | HPTE_R_N));
375 * Ensure it is out of the tlb too. Bolted entries base and
376 * actual page size will be same.
378 tlbie(vpn, psize, psize, ssize, 0);
381 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
382 int bpsize, int apsize, int ssize, int local)
384 struct hash_pte *hptep = htab_address + slot;
385 unsigned long hpte_v;
386 unsigned long want_v;
389 local_irq_save(flags);
391 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
393 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
394 native_lock_hpte(hptep);
398 * We need to invalidate the TLB always because hpte_remove doesn't do
399 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
400 * random entry from it. When we do that we don't invalidate the TLB
401 * (hpte_remove) because we assume the old translation is still
402 * technically "valid".
404 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
405 native_unlock_hpte(hptep);
407 /* Invalidate the hpte. NOTE: this also unlocks it */
410 /* Invalidate the TLB */
411 tlbie(vpn, bpsize, apsize, ssize, local);
413 local_irq_restore(flags);
416 static void native_hugepage_invalidate(struct mm_struct *mm,
417 unsigned char *hpte_slot_array,
418 unsigned long addr, int psize)
422 struct hash_pte *hptep;
423 int actual_psize = MMU_PAGE_16M;
424 unsigned int max_hpte_count, valid;
425 unsigned long flags, s_addr = addr;
426 unsigned long hpte_v, want_v, shift;
427 unsigned long hidx, vpn = 0, vsid, hash, slot;
429 shift = mmu_psize_defs[psize].shift;
430 max_hpte_count = 1U << (PMD_SHIFT - shift);
432 local_irq_save(flags);
433 for (i = 0; i < max_hpte_count; i++) {
434 valid = hpte_valid(hpte_slot_array, i);
437 hidx = hpte_hash_index(hpte_slot_array, i);
440 addr = s_addr + (i * (1ul << shift));
441 if (!is_kernel_addr(addr)) {
442 ssize = user_segment_size(addr);
443 vsid = get_vsid(mm->context.id, addr, ssize);
446 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
447 ssize = mmu_kernel_ssize;
450 vpn = hpt_vpn(addr, vsid, ssize);
451 hash = hpt_hash(vpn, shift, ssize);
452 if (hidx & _PTEIDX_SECONDARY)
455 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
456 slot += hidx & _PTEIDX_GROUP_IX;
458 hptep = htab_address + slot;
459 want_v = hpte_encode_avpn(vpn, psize, ssize);
460 native_lock_hpte(hptep);
463 /* Even if we miss, we need to invalidate the TLB */
464 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
465 native_unlock_hpte(hptep);
467 /* Invalidate the hpte. NOTE: this also unlocks it */
471 * Since this is a hugepage, we just need a single tlbie.
474 lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
476 raw_spin_lock(&native_tlbie_lock);
478 asm volatile("ptesync":::"memory");
479 __tlbie(vpn, psize, actual_psize, ssize);
480 asm volatile("eieio; tlbsync; ptesync":::"memory");
483 raw_spin_unlock(&native_tlbie_lock);
485 local_irq_restore(flags);
488 static inline int __hpte_actual_psize(unsigned int lp, int psize)
493 /* start from 1 ignoring MMU_PAGE_4K */
494 for (i = 1; i < MMU_PAGE_COUNT; i++) {
497 if (mmu_psize_defs[psize].penc[i] == -1)
500 * encoding bits per actual page size
501 * PTE LP actual page size
508 shift = mmu_psize_defs[i].shift - LP_SHIFT;
511 mask = (1 << shift) - 1;
512 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
518 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
519 int *psize, int *apsize, int *ssize, unsigned long *vpn)
521 unsigned long avpn, pteg, vpi;
522 unsigned long hpte_v = hpte->v;
523 unsigned long vsid, seg_off;
524 int size, a_size, shift;
525 /* Look at the 8 bit LP value */
526 unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
528 if (!(hpte_v & HPTE_V_LARGE)) {
530 a_size = MMU_PAGE_4K;
532 for (size = 0; size < MMU_PAGE_COUNT; size++) {
534 /* valid entries have a shift value */
535 if (!mmu_psize_defs[size].shift)
538 a_size = __hpte_actual_psize(lp, size);
543 /* This works for all page sizes, and for 256M and 1T segments */
544 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
545 shift = mmu_psize_defs[size].shift;
547 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
548 pteg = slot / HPTES_PER_GROUP;
549 if (hpte_v & HPTE_V_SECONDARY)
553 case MMU_SEGSIZE_256M:
554 /* We only have 28 - 23 bits of seg_off in avpn */
555 seg_off = (avpn & 0x1f) << 23;
557 /* We can find more bits from the pteg value */
559 vpi = (vsid ^ pteg) & htab_hash_mask;
560 seg_off |= vpi << shift;
562 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
565 /* We only have 40 - 23 bits of seg_off in avpn */
566 seg_off = (avpn & 0x1ffff) << 23;
569 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
570 seg_off |= vpi << shift;
572 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
582 * clear all mappings on kexec. All cpus are in real mode (or they will
583 * be when they isi), and we are the only one left. We rely on our kernel
584 * mapping being 0xC0's and the hardware ignoring those two real bits.
586 * TODO: add batching support when enabled. remember, no dynamic memory here,
587 * athough there is the control page available...
589 static void native_hpte_clear(void)
591 unsigned long vpn = 0;
592 unsigned long slot, slots, flags;
593 struct hash_pte *hptep = htab_address;
594 unsigned long hpte_v;
595 unsigned long pteg_count;
596 int psize, apsize, ssize;
598 pteg_count = htab_hash_mask + 1;
600 local_irq_save(flags);
602 /* we take the tlbie lock and hold it. Some hardware will
603 * deadlock if we try to tlbie from two processors at once.
605 raw_spin_lock(&native_tlbie_lock);
607 slots = pteg_count * HPTES_PER_GROUP;
609 for (slot = 0; slot < slots; slot++, hptep++) {
611 * we could lock the pte here, but we are the only cpu
612 * running, right? and for crash dump, we probably
613 * don't want to wait for a maybe bad cpu.
618 * Call __tlbie() here rather than tlbie() since we
619 * already hold the native_tlbie_lock.
621 if (hpte_v & HPTE_V_VALID) {
622 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
624 __tlbie(vpn, psize, apsize, ssize);
628 asm volatile("eieio; tlbsync; ptesync":::"memory");
629 raw_spin_unlock(&native_tlbie_lock);
630 local_irq_restore(flags);
634 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
635 * the lock all the time
637 static void native_flush_hash_range(unsigned long number, int local)
640 unsigned long hash, index, hidx, shift, slot;
641 struct hash_pte *hptep;
642 unsigned long hpte_v;
643 unsigned long want_v;
646 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
647 unsigned long psize = batch->psize;
648 int ssize = batch->ssize;
651 local_irq_save(flags);
653 for (i = 0; i < number; i++) {
657 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
658 hash = hpt_hash(vpn, shift, ssize);
659 hidx = __rpte_to_hidx(pte, index);
660 if (hidx & _PTEIDX_SECONDARY)
662 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
663 slot += hidx & _PTEIDX_GROUP_IX;
664 hptep = htab_address + slot;
665 want_v = hpte_encode_avpn(vpn, psize, ssize);
666 native_lock_hpte(hptep);
668 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
669 !(hpte_v & HPTE_V_VALID))
670 native_unlock_hpte(hptep);
673 } pte_iterate_hashed_end();
676 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
677 mmu_psize_defs[psize].tlbiel && local) {
678 asm volatile("ptesync":::"memory");
679 for (i = 0; i < number; i++) {
683 pte_iterate_hashed_subpages(pte, psize,
685 __tlbiel(vpn, psize, psize, ssize);
686 } pte_iterate_hashed_end();
688 asm volatile("ptesync":::"memory");
690 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
693 raw_spin_lock(&native_tlbie_lock);
695 asm volatile("ptesync":::"memory");
696 for (i = 0; i < number; i++) {
700 pte_iterate_hashed_subpages(pte, psize,
702 __tlbie(vpn, psize, psize, ssize);
703 } pte_iterate_hashed_end();
705 asm volatile("eieio; tlbsync; ptesync":::"memory");
708 raw_spin_unlock(&native_tlbie_lock);
711 local_irq_restore(flags);
714 void __init hpte_init_native(void)
716 ppc_md.hpte_invalidate = native_hpte_invalidate;
717 ppc_md.hpte_updatepp = native_hpte_updatepp;
718 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
719 ppc_md.hpte_insert = native_hpte_insert;
720 ppc_md.hpte_remove = native_hpte_remove;
721 ppc_md.hpte_clear_all = native_hpte_clear;
722 ppc_md.flush_hash_range = native_flush_hash_range;
723 ppc_md.hugepage_invalidate = native_hugepage_invalidate;