]> Git Repo - linux.git/blame - arch/x86/xen/mmu.c
Linux 3.9-rc6
[linux.git] / arch / x86 / xen / mmu.c
CommitLineData
3b827c1b
JF
1/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
40 */
f120f13e 41#include <linux/sched.h>
f4f97b3e 42#include <linux/highmem.h>
994025ca 43#include <linux/debugfs.h>
3b827c1b 44#include <linux/bug.h>
d2cb2145 45#include <linux/vmalloc.h>
44408ad7 46#include <linux/module.h>
5a0e3ad6 47#include <linux/gfp.h>
a9ce6bc1 48#include <linux/memblock.h>
2222e71b 49#include <linux/seq_file.h>
34b6f01a 50#include <linux/crash_dump.h>
3b827c1b 51
84708807
JF
52#include <trace/events/xen.h>
53
3b827c1b
JF
54#include <asm/pgtable.h>
55#include <asm/tlbflush.h>
5deb30d1 56#include <asm/fixmap.h>
3b827c1b 57#include <asm/mmu_context.h>
319f3ba5 58#include <asm/setup.h>
f4f97b3e 59#include <asm/paravirt.h>
7347b408 60#include <asm/e820.h>
cbcd79c2 61#include <asm/linkage.h>
08bbc9da 62#include <asm/page.h>
fef5ba79 63#include <asm/init.h>
41f2e477 64#include <asm/pat.h>
900cba88 65#include <asm/smp.h>
3b827c1b
JF
66
67#include <asm/xen/hypercall.h>
f4f97b3e 68#include <asm/xen/hypervisor.h>
3b827c1b 69
c0011dbf 70#include <xen/xen.h>
3b827c1b
JF
71#include <xen/page.h>
72#include <xen/interface/xen.h>
59151001 73#include <xen/interface/hvm/hvm_op.h>
319f3ba5 74#include <xen/interface/version.h>
c0011dbf 75#include <xen/interface/memory.h>
319f3ba5 76#include <xen/hvc-console.h>
3b827c1b 77
f4f97b3e 78#include "multicalls.h"
3b827c1b 79#include "mmu.h"
994025ca
JF
80#include "debugfs.h"
81
19001c8c
AN
82/*
83 * Protects atomic reservation decrease/increase against concurrent increases.
06f521d5 84 * Also protects non-atomic updates of current_pages and balloon lists.
19001c8c
AN
85 */
86DEFINE_SPINLOCK(xen_reservation_lock);
87
caaf9ecf 88#ifdef CONFIG_X86_32
319f3ba5
JF
89/*
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
93 */
764f0138
JF
94#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
caaf9ecf 96#endif
319f3ba5
JF
97#ifdef CONFIG_X86_64
98/* l3 pud for userspace vsyscall mapping */
99static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100#endif /* CONFIG_X86_64 */
101
102/*
103 * Note about cr3 (pagetable base) values:
104 *
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
110 *
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
115 */
116DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
118
119
d6182fbf
JF
120/*
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
123 */
124#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
9976b39b
JF
126unsigned long arbitrary_virt_to_mfn(void *vaddr)
127{
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129
130 return PFN_DOWN(maddr.maddr);
131}
132
ce803e70 133xmaddr_t arbitrary_virt_to_machine(void *vaddr)
3b827c1b 134{
ce803e70 135 unsigned long address = (unsigned long)vaddr;
da7bfc50 136 unsigned int level;
9f32d21c
CL
137 pte_t *pte;
138 unsigned offset;
3b827c1b 139
9f32d21c
CL
140 /*
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
143 */
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
146
147 /* otherwise we have to do a (slower) full page-table walk */
3b827c1b 148
9f32d21c
CL
149 pte = lookup_address(address, &level);
150 BUG_ON(pte == NULL);
151 offset = address & ~PAGE_MASK;
ebd879e3 152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
3b827c1b 153}
de23be5f 154EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
3b827c1b
JF
155
156void make_lowmem_page_readonly(void *vaddr)
157{
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
da7bfc50 160 unsigned int level;
3b827c1b 161
f0646e43 162 pte = lookup_address(address, &level);
fef5ba79
JF
163 if (pte == NULL)
164 return; /* vaddr missing */
3b827c1b
JF
165
166 ptev = pte_wrprotect(*pte);
167
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
170}
171
172void make_lowmem_page_readwrite(void *vaddr)
173{
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
da7bfc50 176 unsigned int level;
3b827c1b 177
f0646e43 178 pte = lookup_address(address, &level);
fef5ba79
JF
179 if (pte == NULL)
180 return; /* vaddr missing */
3b827c1b
JF
181
182 ptev = pte_mkwrite(*pte);
183
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
186}
187
188
7708ad64 189static bool xen_page_pinned(void *ptr)
e2426cf8
JF
190{
191 struct page *page = virt_to_page(ptr);
192
193 return PagePinned(page);
194}
195
eba3ff8b 196void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
c0011dbf
JF
197{
198 struct multicall_space mcs;
199 struct mmu_update *u;
200
84708807
JF
201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202
c0011dbf
JF
203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
205
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
d5108316 207 u->ptr = virt_to_machine(ptep).maddr;
c0011dbf
JF
208 u->val = pte_val_ma(pteval);
209
eba3ff8b 210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
c0011dbf
JF
211
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
213}
eba3ff8b
JF
214EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215
7708ad64 216static void xen_extend_mmu_update(const struct mmu_update *update)
3b827c1b 217{
d66bf8fc
JF
218 struct multicall_space mcs;
219 struct mmu_update *u;
3b827c1b 220
400d3494
JF
221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222
994025ca 223 if (mcs.mc != NULL) {
400d3494 224 mcs.mc->args[1]++;
994025ca 225 } else {
400d3494
JF
226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 }
d66bf8fc 229
d66bf8fc 230 u = mcs.args;
400d3494
JF
231 *u = *update;
232}
233
dcf7435c
JF
234static void xen_extend_mmuext_op(const struct mmuext_op *op)
235{
236 struct multicall_space mcs;
237 struct mmuext_op *u;
238
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 }
247
248 u = mcs.args;
249 *u = *op;
250}
251
4c13629f 252static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
400d3494
JF
253{
254 struct mmu_update u;
255
256 preempt_disable();
257
258 xen_mc_batch();
259
ce803e70
JF
260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
400d3494 262 u.val = pmd_val_ma(val);
7708ad64 263 xen_extend_mmu_update(&u);
d66bf8fc
JF
264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 preempt_enable();
3b827c1b
JF
268}
269
4c13629f 270static void xen_set_pmd(pmd_t *ptr, pmd_t val)
e2426cf8 271{
84708807
JF
272 trace_xen_mmu_set_pmd(ptr, val);
273
e2426cf8
JF
274 /* If page is not pinned, we can just update the entry
275 directly */
7708ad64 276 if (!xen_page_pinned(ptr)) {
e2426cf8
JF
277 *ptr = val;
278 return;
279 }
280
281 xen_set_pmd_hyper(ptr, val);
282}
283
3b827c1b
JF
284/*
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
287 */
288void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289{
836fe2f2 290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
3b827c1b
JF
291}
292
4a35c13c 293static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
3b827c1b 294{
4a35c13c 295 struct mmu_update u;
c0011dbf 296
4a35c13c
JF
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
994025ca 299
4a35c13c 300 xen_mc_batch();
d66bf8fc 301
4a35c13c
JF
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
a99ac5e8 305
4a35c13c 306 xen_mc_issue(PARAVIRT_LAZY_MMU);
2bd50036 307
4a35c13c
JF
308 return true;
309}
310
84708807 311static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
4a35c13c 312{
d095d43e
DV
313 if (!xen_batched_set_pte(ptep, pteval)) {
314 /*
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
319 * instead.
320 */
321 struct mmu_update u;
322
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
326 }
3b827c1b
JF
327}
328
84708807
JF
329static void xen_set_pte(pte_t *ptep, pte_t pteval)
330{
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
333}
334
4c13629f 335static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
4a35c13c
JF
336 pte_t *ptep, pte_t pteval)
337{
84708807
JF
338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
3b827c1b
JF
340}
341
f63c2f24
T
342pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
947a69c9 344{
e57778a1 345 /* Just return the pte as-is. We preserve the bits on commit */
84708807 346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
e57778a1
JF
347 return *ptep;
348}
349
350void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
352{
400d3494 353 struct mmu_update u;
e57778a1 354
84708807 355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
400d3494 356 xen_mc_batch();
947a69c9 357
d5108316 358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
400d3494 359 u.val = pte_val_ma(pte);
7708ad64 360 xen_extend_mmu_update(&u);
947a69c9 361
e57778a1 362 xen_mc_issue(PARAVIRT_LAZY_MMU);
947a69c9
JF
363}
364
ebb9cfe2
JF
365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val)
947a69c9 367{
ebb9cfe2 368 if (val & _PAGE_PRESENT) {
59438c9f 369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
b7e5ffe5
KRW
370 unsigned long pfn = mfn_to_pfn(mfn);
371
77be1fab 372 pteval_t flags = val & PTE_FLAGS_MASK;
b7e5ffe5
KRW
373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
ebb9cfe2 377 }
947a69c9 378
ebb9cfe2 379 return val;
947a69c9
JF
380}
381
ebb9cfe2 382static pteval_t pte_pfn_to_mfn(pteval_t val)
947a69c9 383{
ebb9cfe2 384 if (val & _PAGE_PRESENT) {
59438c9f 385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
77be1fab 386 pteval_t flags = val & PTE_FLAGS_MASK;
fb38923e 387 unsigned long mfn;
cfd8951e 388
fb38923e
KRW
389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
391 else
392 mfn = pfn;
cfd8951e
JF
393 /*
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
398 */
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
fb38923e
KRW
402 } else {
403 /*
404 * Paramount to do this test _after_ the
405 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
406 * IDENTITY_FRAME_BIT resolves to true.
407 */
408 mfn &= ~FOREIGN_FRAME_BIT;
409 if (mfn & IDENTITY_FRAME_BIT) {
410 mfn &= ~IDENTITY_FRAME_BIT;
411 flags |= _PAGE_IOMAP;
412 }
cfd8951e 413 }
cfd8951e 414 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
947a69c9
JF
415 }
416
ebb9cfe2 417 return val;
947a69c9
JF
418}
419
c0011dbf
JF
420static pteval_t iomap_pte(pteval_t val)
421{
422 if (val & _PAGE_PRESENT) {
423 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
424 pteval_t flags = val & PTE_FLAGS_MASK;
425
426 /* We assume the pte frame number is a MFN, so
427 just use it as-is. */
428 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
429 }
430
431 return val;
432}
433
4c13629f 434static pteval_t xen_pte_val(pte_t pte)
947a69c9 435{
41f2e477 436 pteval_t pteval = pte.pte;
8eaffa67 437#if 0
41f2e477
JF
438 /* If this is a WC pte, convert back from Xen WC to Linux WC */
439 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
440 WARN_ON(!pat_enabled);
441 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
442 }
8eaffa67 443#endif
41f2e477
JF
444 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
445 return pteval;
446
447 return pte_mfn_to_pfn(pteval);
947a69c9 448}
da5de7c2 449PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
947a69c9 450
4c13629f 451static pgdval_t xen_pgd_val(pgd_t pgd)
947a69c9 452{
ebb9cfe2 453 return pte_mfn_to_pfn(pgd.pgd);
947a69c9 454}
da5de7c2 455PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
947a69c9 456
41f2e477
JF
457/*
458 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
459 * are reserved for now, to correspond to the Intel-reserved PAT
460 * types.
461 *
462 * We expect Linux's PAT set as follows:
463 *
464 * Idx PTE flags Linux Xen Default
465 * 0 WB WB WB
466 * 1 PWT WC WT WT
467 * 2 PCD UC- UC- UC-
468 * 3 PCD PWT UC UC UC
469 * 4 PAT WB WC WB
470 * 5 PAT PWT WC WP WT
471 * 6 PAT PCD UC- UC UC-
472 * 7 PAT PCD PWT UC UC UC
473 */
474
475void xen_set_pat(u64 pat)
476{
477 /* We expect Linux to use a PAT setting of
478 * UC UC- WC WB (ignoring the PAT flag) */
479 WARN_ON(pat != 0x0007010600070106ull);
480}
481
4c13629f 482static pte_t xen_make_pte(pteval_t pte)
947a69c9 483{
7347b408 484 phys_addr_t addr = (pte & PTE_PFN_MASK);
8eaffa67 485#if 0
41f2e477
JF
486 /* If Linux is trying to set a WC pte, then map to the Xen WC.
487 * If _PAGE_PAT is set, then it probably means it is really
488 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
489 * things work out OK...
490 *
491 * (We should never see kernel mappings with _PAGE_PSE set,
492 * but we could see hugetlbfs mappings, I think.).
493 */
494 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
495 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
496 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
497 }
8eaffa67 498#endif
7347b408
AN
499 /*
500 * Unprivileged domains are allowed to do IOMAPpings for
501 * PCI passthrough, but not map ISA space. The ISA
502 * mappings are just dummy local mappings to keep other
503 * parts of the kernel happy.
504 */
505 if (unlikely(pte & _PAGE_IOMAP) &&
506 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
c0011dbf 507 pte = iomap_pte(pte);
7347b408
AN
508 } else {
509 pte &= ~_PAGE_IOMAP;
c0011dbf 510 pte = pte_pfn_to_mfn(pte);
7347b408 511 }
c0011dbf 512
ebb9cfe2 513 return native_make_pte(pte);
947a69c9 514}
da5de7c2 515PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
947a69c9 516
4c13629f 517static pgd_t xen_make_pgd(pgdval_t pgd)
947a69c9 518{
ebb9cfe2
JF
519 pgd = pte_pfn_to_mfn(pgd);
520 return native_make_pgd(pgd);
947a69c9 521}
da5de7c2 522PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
947a69c9 523
4c13629f 524static pmdval_t xen_pmd_val(pmd_t pmd)
947a69c9 525{
ebb9cfe2 526 return pte_mfn_to_pfn(pmd.pmd);
947a69c9 527}
da5de7c2 528PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
28499143 529
4c13629f 530static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
f4f97b3e 531{
400d3494 532 struct mmu_update u;
f4f97b3e 533
d66bf8fc
JF
534 preempt_disable();
535
400d3494
JF
536 xen_mc_batch();
537
ce803e70
JF
538 /* ptr may be ioremapped for 64-bit pagetable setup */
539 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
400d3494 540 u.val = pud_val_ma(val);
7708ad64 541 xen_extend_mmu_update(&u);
d66bf8fc
JF
542
543 xen_mc_issue(PARAVIRT_LAZY_MMU);
544
545 preempt_enable();
f4f97b3e
JF
546}
547
4c13629f 548static void xen_set_pud(pud_t *ptr, pud_t val)
e2426cf8 549{
84708807
JF
550 trace_xen_mmu_set_pud(ptr, val);
551
e2426cf8
JF
552 /* If page is not pinned, we can just update the entry
553 directly */
7708ad64 554 if (!xen_page_pinned(ptr)) {
e2426cf8
JF
555 *ptr = val;
556 return;
557 }
558
559 xen_set_pud_hyper(ptr, val);
560}
561
f6e58732 562#ifdef CONFIG_X86_PAE
4c13629f 563static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
3b827c1b 564{
84708807 565 trace_xen_mmu_set_pte_atomic(ptep, pte);
f6e58732 566 set_64bit((u64 *)ptep, native_pte_val(pte));
3b827c1b
JF
567}
568
4c13629f 569static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
3b827c1b 570{
84708807 571 trace_xen_mmu_pte_clear(mm, addr, ptep);
4a35c13c
JF
572 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
573 native_pte_clear(mm, addr, ptep);
3b827c1b
JF
574}
575
4c13629f 576static void xen_pmd_clear(pmd_t *pmdp)
3b827c1b 577{
84708807 578 trace_xen_mmu_pmd_clear(pmdp);
e2426cf8 579 set_pmd(pmdp, __pmd(0));
3b827c1b 580}
f6e58732 581#endif /* CONFIG_X86_PAE */
3b827c1b 582
4c13629f 583static pmd_t xen_make_pmd(pmdval_t pmd)
3b827c1b 584{
ebb9cfe2 585 pmd = pte_pfn_to_mfn(pmd);
947a69c9 586 return native_make_pmd(pmd);
3b827c1b 587}
da5de7c2 588PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
3b827c1b 589
f6e58732 590#if PAGETABLE_LEVELS == 4
4c13629f 591static pudval_t xen_pud_val(pud_t pud)
f6e58732
JF
592{
593 return pte_mfn_to_pfn(pud.pud);
594}
da5de7c2 595PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
f6e58732 596
4c13629f 597static pud_t xen_make_pud(pudval_t pud)
f6e58732
JF
598{
599 pud = pte_pfn_to_mfn(pud);
600
601 return native_make_pud(pud);
602}
da5de7c2 603PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
f6e58732 604
4c13629f 605static pgd_t *xen_get_user_pgd(pgd_t *pgd)
f6e58732 606{
d6182fbf
JF
607 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
608 unsigned offset = pgd - pgd_page;
609 pgd_t *user_ptr = NULL;
f6e58732 610
d6182fbf
JF
611 if (offset < pgd_index(USER_LIMIT)) {
612 struct page *page = virt_to_page(pgd_page);
613 user_ptr = (pgd_t *)page->private;
614 if (user_ptr)
615 user_ptr += offset;
616 }
f6e58732 617
d6182fbf
JF
618 return user_ptr;
619}
620
621static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
622{
623 struct mmu_update u;
f6e58732
JF
624
625 u.ptr = virt_to_machine(ptr).maddr;
626 u.val = pgd_val_ma(val);
7708ad64 627 xen_extend_mmu_update(&u);
d6182fbf
JF
628}
629
630/*
631 * Raw hypercall-based set_pgd, intended for in early boot before
632 * there's a page structure. This implies:
633 * 1. The only existing pagetable is the kernel's
634 * 2. It is always pinned
635 * 3. It has no user pagetable attached to it
636 */
4c13629f 637static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
d6182fbf
JF
638{
639 preempt_disable();
640
641 xen_mc_batch();
642
643 __xen_set_pgd_hyper(ptr, val);
f6e58732
JF
644
645 xen_mc_issue(PARAVIRT_LAZY_MMU);
646
647 preempt_enable();
648}
649
4c13629f 650static void xen_set_pgd(pgd_t *ptr, pgd_t val)
f6e58732 651{
d6182fbf
JF
652 pgd_t *user_ptr = xen_get_user_pgd(ptr);
653
84708807
JF
654 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
655
f6e58732
JF
656 /* If page is not pinned, we can just update the entry
657 directly */
7708ad64 658 if (!xen_page_pinned(ptr)) {
f6e58732 659 *ptr = val;
d6182fbf 660 if (user_ptr) {
7708ad64 661 WARN_ON(xen_page_pinned(user_ptr));
d6182fbf
JF
662 *user_ptr = val;
663 }
f6e58732
JF
664 return;
665 }
666
d6182fbf
JF
667 /* If it's pinned, then we can at least batch the kernel and
668 user updates together. */
669 xen_mc_batch();
670
671 __xen_set_pgd_hyper(ptr, val);
672 if (user_ptr)
673 __xen_set_pgd_hyper(user_ptr, val);
674
675 xen_mc_issue(PARAVIRT_LAZY_MMU);
f6e58732
JF
676}
677#endif /* PAGETABLE_LEVELS == 4 */
678
f4f97b3e 679/*
5deb30d1
JF
680 * (Yet another) pagetable walker. This one is intended for pinning a
681 * pagetable. This means that it walks a pagetable and calls the
682 * callback function on each page it finds making up the page table,
683 * at every level. It walks the entire pagetable, but it only bothers
684 * pinning pte pages which are below limit. In the normal case this
685 * will be STACK_TOP_MAX, but at boot we need to pin up to
686 * FIXADDR_TOP.
687 *
688 * For 32-bit the important bit is that we don't pin beyond there,
689 * because then we start getting into Xen's ptes.
690 *
691 * For 64-bit, we must skip the Xen hole in the middle of the address
692 * space, just after the big x86-64 virtual hole.
693 */
86bbc2c2
IC
694static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
695 int (*func)(struct mm_struct *mm, struct page *,
696 enum pt_level),
697 unsigned long limit)
3b827c1b 698{
f4f97b3e 699 int flush = 0;
5deb30d1
JF
700 unsigned hole_low, hole_high;
701 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
702 unsigned pgdidx, pudidx, pmdidx;
f4f97b3e 703
5deb30d1
JF
704 /* The limit is the last byte to be touched */
705 limit--;
706 BUG_ON(limit >= FIXADDR_TOP);
3b827c1b
JF
707
708 if (xen_feature(XENFEAT_auto_translated_physmap))
f4f97b3e
JF
709 return 0;
710
5deb30d1
JF
711 /*
712 * 64-bit has a great big hole in the middle of the address
713 * space, which contains the Xen mappings. On 32-bit these
714 * will end up making a zero-sized hole and so is a no-op.
715 */
d6182fbf 716 hole_low = pgd_index(USER_LIMIT);
5deb30d1
JF
717 hole_high = pgd_index(PAGE_OFFSET);
718
719 pgdidx_limit = pgd_index(limit);
720#if PTRS_PER_PUD > 1
721 pudidx_limit = pud_index(limit);
722#else
723 pudidx_limit = 0;
724#endif
725#if PTRS_PER_PMD > 1
726 pmdidx_limit = pmd_index(limit);
727#else
728 pmdidx_limit = 0;
729#endif
730
5deb30d1 731 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
f4f97b3e 732 pud_t *pud;
3b827c1b 733
5deb30d1
JF
734 if (pgdidx >= hole_low && pgdidx < hole_high)
735 continue;
f4f97b3e 736
5deb30d1 737 if (!pgd_val(pgd[pgdidx]))
3b827c1b 738 continue;
f4f97b3e 739
5deb30d1 740 pud = pud_offset(&pgd[pgdidx], 0);
3b827c1b
JF
741
742 if (PTRS_PER_PUD > 1) /* not folded */
eefb47f6 743 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
f4f97b3e 744
5deb30d1 745 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
f4f97b3e 746 pmd_t *pmd;
f4f97b3e 747
5deb30d1
JF
748 if (pgdidx == pgdidx_limit &&
749 pudidx > pudidx_limit)
750 goto out;
3b827c1b 751
5deb30d1 752 if (pud_none(pud[pudidx]))
3b827c1b 753 continue;
f4f97b3e 754
5deb30d1 755 pmd = pmd_offset(&pud[pudidx], 0);
3b827c1b
JF
756
757 if (PTRS_PER_PMD > 1) /* not folded */
eefb47f6 758 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
f4f97b3e 759
5deb30d1
JF
760 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
761 struct page *pte;
762
763 if (pgdidx == pgdidx_limit &&
764 pudidx == pudidx_limit &&
765 pmdidx > pmdidx_limit)
766 goto out;
3b827c1b 767
5deb30d1 768 if (pmd_none(pmd[pmdidx]))
3b827c1b
JF
769 continue;
770
5deb30d1 771 pte = pmd_page(pmd[pmdidx]);
eefb47f6 772 flush |= (*func)(mm, pte, PT_PTE);
3b827c1b
JF
773 }
774 }
775 }
11ad93e5 776
5deb30d1 777out:
11ad93e5
JF
778 /* Do the top level last, so that the callbacks can use it as
779 a cue to do final things like tlb flushes. */
eefb47f6 780 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
f4f97b3e
JF
781
782 return flush;
3b827c1b
JF
783}
784
86bbc2c2
IC
785static int xen_pgd_walk(struct mm_struct *mm,
786 int (*func)(struct mm_struct *mm, struct page *,
787 enum pt_level),
788 unsigned long limit)
789{
790 return __xen_pgd_walk(mm, mm->pgd, func, limit);
791}
792
7708ad64
JF
793/* If we're using split pte locks, then take the page's lock and
794 return a pointer to it. Otherwise return NULL. */
eefb47f6 795static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
74260714
JF
796{
797 spinlock_t *ptl = NULL;
798
f7d0b926 799#if USE_SPLIT_PTLOCKS
74260714 800 ptl = __pte_lockptr(page);
eefb47f6 801 spin_lock_nest_lock(ptl, &mm->page_table_lock);
74260714
JF
802#endif
803
804 return ptl;
805}
806
7708ad64 807static void xen_pte_unlock(void *v)
74260714
JF
808{
809 spinlock_t *ptl = v;
810 spin_unlock(ptl);
811}
812
813static void xen_do_pin(unsigned level, unsigned long pfn)
814{
dcf7435c 815 struct mmuext_op op;
74260714 816
dcf7435c
JF
817 op.cmd = level;
818 op.arg1.mfn = pfn_to_mfn(pfn);
819
820 xen_extend_mmuext_op(&op);
74260714
JF
821}
822
eefb47f6
JF
823static int xen_pin_page(struct mm_struct *mm, struct page *page,
824 enum pt_level level)
f4f97b3e 825{
d60cd46b 826 unsigned pgfl = TestSetPagePinned(page);
f4f97b3e
JF
827 int flush;
828
829 if (pgfl)
830 flush = 0; /* already pinned */
831 else if (PageHighMem(page))
832 /* kmaps need flushing if we found an unpinned
833 highpage */
834 flush = 1;
835 else {
836 void *pt = lowmem_page_address(page);
837 unsigned long pfn = page_to_pfn(page);
838 struct multicall_space mcs = __xen_mc_entry(0);
74260714 839 spinlock_t *ptl;
f4f97b3e
JF
840
841 flush = 0;
842
11ad93e5
JF
843 /*
844 * We need to hold the pagetable lock between the time
845 * we make the pagetable RO and when we actually pin
846 * it. If we don't, then other users may come in and
847 * attempt to update the pagetable by writing it,
848 * which will fail because the memory is RO but not
849 * pinned, so Xen won't do the trap'n'emulate.
850 *
851 * If we're using split pte locks, we can't hold the
852 * entire pagetable's worth of locks during the
853 * traverse, because we may wrap the preempt count (8
854 * bits). The solution is to mark RO and pin each PTE
855 * page while holding the lock. This means the number
856 * of locks we end up holding is never more than a
857 * batch size (~32 entries, at present).
858 *
859 * If we're not using split pte locks, we needn't pin
860 * the PTE pages independently, because we're
861 * protected by the overall pagetable lock.
862 */
74260714
JF
863 ptl = NULL;
864 if (level == PT_PTE)
eefb47f6 865 ptl = xen_pte_lock(page, mm);
74260714 866
f4f97b3e
JF
867 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
868 pfn_pte(pfn, PAGE_KERNEL_RO),
74260714
JF
869 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
870
11ad93e5 871 if (ptl) {
74260714
JF
872 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
873
74260714
JF
874 /* Queue a deferred unlock for when this batch
875 is completed. */
7708ad64 876 xen_mc_callback(xen_pte_unlock, ptl);
74260714 877 }
f4f97b3e
JF
878 }
879
880 return flush;
881}
3b827c1b 882
f4f97b3e
JF
883/* This is called just after a mm has been created, but it has not
884 been used yet. We need to make sure that its pagetable is all
885 read-only, and can be pinned. */
eefb47f6 886static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
3b827c1b 887{
5f94fb5b
JF
888 trace_xen_mmu_pgd_pin(mm, pgd);
889
f4f97b3e 890 xen_mc_batch();
3b827c1b 891
86bbc2c2 892 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
d05fdf31 893 /* re-enable interrupts for flushing */
f87e4cac 894 xen_mc_issue(0);
d05fdf31 895
f4f97b3e 896 kmap_flush_unused();
d05fdf31 897
f87e4cac
JF
898 xen_mc_batch();
899 }
f4f97b3e 900
d6182fbf
JF
901#ifdef CONFIG_X86_64
902 {
903 pgd_t *user_pgd = xen_get_user_pgd(pgd);
904
905 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
906
907 if (user_pgd) {
eefb47f6 908 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
f63c2f24
T
909 xen_do_pin(MMUEXT_PIN_L4_TABLE,
910 PFN_DOWN(__pa(user_pgd)));
d6182fbf
JF
911 }
912 }
913#else /* CONFIG_X86_32 */
5deb30d1
JF
914#ifdef CONFIG_X86_PAE
915 /* Need to make sure unshared kernel PMD is pinnable */
47cb2ed9 916 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
eefb47f6 917 PT_PMD);
5deb30d1 918#endif
28499143 919 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
d6182fbf 920#endif /* CONFIG_X86_64 */
f4f97b3e 921 xen_mc_issue(0);
3b827c1b
JF
922}
923
eefb47f6
JF
924static void xen_pgd_pin(struct mm_struct *mm)
925{
926 __xen_pgd_pin(mm, mm->pgd);
927}
928
0e91398f
JF
929/*
930 * On save, we need to pin all pagetables to make sure they get their
931 * mfns turned into pfns. Search the list for any unpinned pgds and pin
932 * them (unpinned pgds are not currently in use, probably because the
933 * process is under construction or destruction).
eefb47f6
JF
934 *
935 * Expected to be called in stop_machine() ("equivalent to taking
936 * every spinlock in the system"), so the locking doesn't really
937 * matter all that much.
0e91398f
JF
938 */
939void xen_mm_pin_all(void)
940{
0e91398f 941 struct page *page;
74260714 942
a79e53d8 943 spin_lock(&pgd_lock);
f4f97b3e 944
0e91398f
JF
945 list_for_each_entry(page, &pgd_list, lru) {
946 if (!PagePinned(page)) {
eefb47f6 947 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
0e91398f
JF
948 SetPageSavePinned(page);
949 }
950 }
951
a79e53d8 952 spin_unlock(&pgd_lock);
3b827c1b
JF
953}
954
c1f2f09e
EH
955/*
956 * The init_mm pagetable is really pinned as soon as its created, but
957 * that's before we have page structures to store the bits. So do all
958 * the book-keeping now.
959 */
3f508953 960static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
eefb47f6 961 enum pt_level level)
3b827c1b 962{
f4f97b3e
JF
963 SetPagePinned(page);
964 return 0;
965}
3b827c1b 966
b96229b5 967static void __init xen_mark_init_mm_pinned(void)
f4f97b3e 968{
eefb47f6 969 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
f4f97b3e 970}
3b827c1b 971
eefb47f6
JF
972static int xen_unpin_page(struct mm_struct *mm, struct page *page,
973 enum pt_level level)
f4f97b3e 974{
d60cd46b 975 unsigned pgfl = TestClearPagePinned(page);
3b827c1b 976
f4f97b3e
JF
977 if (pgfl && !PageHighMem(page)) {
978 void *pt = lowmem_page_address(page);
979 unsigned long pfn = page_to_pfn(page);
74260714
JF
980 spinlock_t *ptl = NULL;
981 struct multicall_space mcs;
982
11ad93e5
JF
983 /*
984 * Do the converse to pin_page. If we're using split
985 * pte locks, we must be holding the lock for while
986 * the pte page is unpinned but still RO to prevent
987 * concurrent updates from seeing it in this
988 * partially-pinned state.
989 */
74260714 990 if (level == PT_PTE) {
eefb47f6 991 ptl = xen_pte_lock(page, mm);
74260714 992
11ad93e5
JF
993 if (ptl)
994 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
74260714
JF
995 }
996
997 mcs = __xen_mc_entry(0);
f4f97b3e
JF
998
999 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1000 pfn_pte(pfn, PAGE_KERNEL),
74260714
JF
1001 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1002
1003 if (ptl) {
1004 /* unlock when batch completed */
7708ad64 1005 xen_mc_callback(xen_pte_unlock, ptl);
74260714 1006 }
f4f97b3e
JF
1007 }
1008
1009 return 0; /* never need to flush on unpin */
3b827c1b
JF
1010}
1011
f4f97b3e 1012/* Release a pagetables pages back as normal RW */
eefb47f6 1013static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
f4f97b3e 1014{
5f94fb5b
JF
1015 trace_xen_mmu_pgd_unpin(mm, pgd);
1016
f4f97b3e
JF
1017 xen_mc_batch();
1018
74260714 1019 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
f4f97b3e 1020
d6182fbf
JF
1021#ifdef CONFIG_X86_64
1022 {
1023 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1024
1025 if (user_pgd) {
f63c2f24
T
1026 xen_do_pin(MMUEXT_UNPIN_TABLE,
1027 PFN_DOWN(__pa(user_pgd)));
eefb47f6 1028 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
d6182fbf
JF
1029 }
1030 }
1031#endif
1032
5deb30d1
JF
1033#ifdef CONFIG_X86_PAE
1034 /* Need to make sure unshared kernel PMD is unpinned */
47cb2ed9 1035 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
eefb47f6 1036 PT_PMD);
5deb30d1 1037#endif
d6182fbf 1038
86bbc2c2 1039 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
f4f97b3e
JF
1040
1041 xen_mc_issue(0);
1042}
3b827c1b 1043
eefb47f6
JF
1044static void xen_pgd_unpin(struct mm_struct *mm)
1045{
1046 __xen_pgd_unpin(mm, mm->pgd);
1047}
1048
0e91398f
JF
1049/*
1050 * On resume, undo any pinning done at save, so that the rest of the
1051 * kernel doesn't see any unexpected pinned pagetables.
1052 */
1053void xen_mm_unpin_all(void)
1054{
0e91398f
JF
1055 struct page *page;
1056
a79e53d8 1057 spin_lock(&pgd_lock);
0e91398f
JF
1058
1059 list_for_each_entry(page, &pgd_list, lru) {
1060 if (PageSavePinned(page)) {
1061 BUG_ON(!PagePinned(page));
eefb47f6 1062 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
0e91398f
JF
1063 ClearPageSavePinned(page);
1064 }
1065 }
1066
a79e53d8 1067 spin_unlock(&pgd_lock);
0e91398f
JF
1068}
1069
4c13629f 1070static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
3b827c1b 1071{
f4f97b3e 1072 spin_lock(&next->page_table_lock);
eefb47f6 1073 xen_pgd_pin(next);
f4f97b3e 1074 spin_unlock(&next->page_table_lock);
3b827c1b
JF
1075}
1076
4c13629f 1077static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
3b827c1b 1078{
f4f97b3e 1079 spin_lock(&mm->page_table_lock);
eefb47f6 1080 xen_pgd_pin(mm);
f4f97b3e 1081 spin_unlock(&mm->page_table_lock);
3b827c1b
JF
1082}
1083
3b827c1b 1084
f87e4cac
JF
1085#ifdef CONFIG_SMP
1086/* Another cpu may still have their %cr3 pointing at the pagetable, so
1087 we need to repoint it somewhere else before we can unpin it. */
1088static void drop_other_mm_ref(void *info)
1089{
1090 struct mm_struct *mm = info;
ce87b3d3 1091 struct mm_struct *active_mm;
3b827c1b 1092
2113f469 1093 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
ce87b3d3 1094
2113f469 1095 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
f87e4cac 1096 leave_mm(smp_processor_id());
9f79991d
JF
1097
1098 /* If this cpu still has a stale cr3 reference, then make sure
1099 it has been flushed. */
2113f469 1100 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
9f79991d 1101 load_cr3(swapper_pg_dir);
f87e4cac 1102}
3b827c1b 1103
7708ad64 1104static void xen_drop_mm_ref(struct mm_struct *mm)
f87e4cac 1105{
e4d98207 1106 cpumask_var_t mask;
9f79991d
JF
1107 unsigned cpu;
1108
f87e4cac
JF
1109 if (current->active_mm == mm) {
1110 if (current->mm == mm)
1111 load_cr3(swapper_pg_dir);
1112 else
1113 leave_mm(smp_processor_id());
9f79991d
JF
1114 }
1115
1116 /* Get the "official" set of cpus referring to our pagetable. */
e4d98207
MT
1117 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1118 for_each_online_cpu(cpu) {
78f1c4d6 1119 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
e4d98207
MT
1120 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1121 continue;
1122 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1123 }
1124 return;
1125 }
78f1c4d6 1126 cpumask_copy(mask, mm_cpumask(mm));
9f79991d
JF
1127
1128 /* It's possible that a vcpu may have a stale reference to our
1129 cr3, because its in lazy mode, and it hasn't yet flushed
1130 its set of pending hypercalls yet. In this case, we can
1131 look at its actual current cr3 value, and force it to flush
1132 if needed. */
1133 for_each_online_cpu(cpu) {
1134 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
e4d98207 1135 cpumask_set_cpu(cpu, mask);
3b827c1b
JF
1136 }
1137
e4d98207
MT
1138 if (!cpumask_empty(mask))
1139 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1140 free_cpumask_var(mask);
f87e4cac
JF
1141}
1142#else
7708ad64 1143static void xen_drop_mm_ref(struct mm_struct *mm)
f87e4cac
JF
1144{
1145 if (current->active_mm == mm)
1146 load_cr3(swapper_pg_dir);
1147}
1148#endif
1149
1150/*
1151 * While a process runs, Xen pins its pagetables, which means that the
1152 * hypervisor forces it to be read-only, and it controls all updates
1153 * to it. This means that all pagetable updates have to go via the
1154 * hypervisor, which is moderately expensive.
1155 *
1156 * Since we're pulling the pagetable down, we switch to use init_mm,
1157 * unpin old process pagetable and mark it all read-write, which
1158 * allows further operations on it to be simple memory accesses.
1159 *
1160 * The only subtle point is that another CPU may be still using the
1161 * pagetable because of lazy tlb flushing. This means we need need to
1162 * switch all CPUs off this pagetable before we can unpin it.
1163 */
4c13629f 1164static void xen_exit_mmap(struct mm_struct *mm)
f87e4cac
JF
1165{
1166 get_cpu(); /* make sure we don't move around */
7708ad64 1167 xen_drop_mm_ref(mm);
f87e4cac 1168 put_cpu();
3b827c1b 1169
f120f13e 1170 spin_lock(&mm->page_table_lock);
df912ea4
JF
1171
1172 /* pgd may not be pinned in the error exit path of execve */
7708ad64 1173 if (xen_page_pinned(mm->pgd))
eefb47f6 1174 xen_pgd_unpin(mm);
74260714 1175
f120f13e 1176 spin_unlock(&mm->page_table_lock);
3b827c1b 1177}
994025ca 1178
c7112887
AR
1179static void xen_post_allocator_init(void);
1180
7f914062
KRW
1181#ifdef CONFIG_X86_64
1182static void __init xen_cleanhighmap(unsigned long vaddr,
1183 unsigned long vaddr_end)
1184{
1185 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1186 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1187
1188 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1189 * We include the PMD passed in on _both_ boundaries. */
1190 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1191 pmd++, vaddr += PMD_SIZE) {
1192 if (pmd_none(*pmd))
1193 continue;
1194 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1195 set_pmd(pmd, __pmd(0));
1196 }
1197 /* In case we did something silly, we should crash in this function
1198 * instead of somewhere later and be confusing. */
1199 xen_mc_flush();
1200}
1201#endif
98104c34 1202static void __init xen_pagetable_init(void)
319f3ba5 1203{
7f914062
KRW
1204#ifdef CONFIG_X86_64
1205 unsigned long size;
1206 unsigned long addr;
1207#endif
98104c34 1208 paging_init();
319f3ba5 1209 xen_setup_shared_info();
7f914062
KRW
1210#ifdef CONFIG_X86_64
1211 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1212 unsigned long new_mfn_list;
1213
1214 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1215
1216 /* On 32-bit, we get zero so this never gets executed. */
1217 new_mfn_list = xen_revector_p2m_tree();
1218 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1219 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1220 memset((void *)xen_start_info->mfn_list, 0xff, size);
1221
1222 /* We should be in __ka space. */
1223 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1224 addr = xen_start_info->mfn_list;
7f914062
KRW
1225 /* We roundup to the PMD, which means that if anybody at this stage is
1226 * using the __ka address of xen_start_info or xen_start_info->shared_info
1227 * they are in going to crash. Fortunatly we have already revectored
1228 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1229 size = roundup(size, PMD_SIZE);
1230 xen_cleanhighmap(addr, addr + size);
1231
785f6231 1232 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
7f914062
KRW
1233 memblock_free(__pa(xen_start_info->mfn_list), size);
1234 /* And revector! Bye bye old array */
1235 xen_start_info->mfn_list = new_mfn_list;
32873187
KRW
1236 } else
1237 goto skip;
7f914062 1238 }
3aca7fbc
KRW
1239 /* At this stage, cleanup_highmap has already cleaned __ka space
1240 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1241 * the ramdisk). We continue on, erasing PMD entries that point to page
1242 * tables - do note that they are accessible at this stage via __va.
1243 * For good measure we also round up to the PMD - which means that if
1244 * anybody is using __ka address to the initial boot-stack - and try
1245 * to use it - they are going to crash. The xen_start_info has been
1246 * taken care of already in xen_setup_kernel_pagetable. */
1247 addr = xen_start_info->pt_base;
1248 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1249
1250 xen_cleanhighmap(addr, addr + size);
1251 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1252#ifdef DEBUG
1253 /* This is superflous and is not neccessary, but you know what
1254 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1255 * anything at this stage. */
1256 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1257#endif
32873187 1258skip:
7f914062 1259#endif
f1d7062a 1260 xen_post_allocator_init();
319f3ba5 1261}
319f3ba5
JF
1262static void xen_write_cr2(unsigned long cr2)
1263{
2113f469 1264 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
319f3ba5
JF
1265}
1266
1267static unsigned long xen_read_cr2(void)
1268{
2113f469 1269 return this_cpu_read(xen_vcpu)->arch.cr2;
319f3ba5
JF
1270}
1271
1272unsigned long xen_read_cr2_direct(void)
1273{
2113f469 1274 return this_cpu_read(xen_vcpu_info.arch.cr2);
319f3ba5
JF
1275}
1276
95a7d768
KRW
1277void xen_flush_tlb_all(void)
1278{
1279 struct mmuext_op *op;
1280 struct multicall_space mcs;
1281
1282 trace_xen_mmu_flush_tlb_all(0);
1283
1284 preempt_disable();
1285
1286 mcs = xen_mc_entry(sizeof(*op));
1287
1288 op = mcs.args;
1289 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1290 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1291
1292 xen_mc_issue(PARAVIRT_LAZY_MMU);
1293
1294 preempt_enable();
1295}
319f3ba5
JF
1296static void xen_flush_tlb(void)
1297{
1298 struct mmuext_op *op;
1299 struct multicall_space mcs;
1300
c8eed171
JF
1301 trace_xen_mmu_flush_tlb(0);
1302
319f3ba5
JF
1303 preempt_disable();
1304
1305 mcs = xen_mc_entry(sizeof(*op));
1306
1307 op = mcs.args;
1308 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1309 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1310
1311 xen_mc_issue(PARAVIRT_LAZY_MMU);
1312
1313 preempt_enable();
1314}
1315
1316static void xen_flush_tlb_single(unsigned long addr)
1317{
1318 struct mmuext_op *op;
1319 struct multicall_space mcs;
1320
c8eed171
JF
1321 trace_xen_mmu_flush_tlb_single(addr);
1322
319f3ba5
JF
1323 preempt_disable();
1324
1325 mcs = xen_mc_entry(sizeof(*op));
1326 op = mcs.args;
1327 op->cmd = MMUEXT_INVLPG_LOCAL;
1328 op->arg1.linear_addr = addr & PAGE_MASK;
1329 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1330
1331 xen_mc_issue(PARAVIRT_LAZY_MMU);
1332
1333 preempt_enable();
1334}
1335
1336static void xen_flush_tlb_others(const struct cpumask *cpus,
e7b52ffd
AS
1337 struct mm_struct *mm, unsigned long start,
1338 unsigned long end)
319f3ba5
JF
1339{
1340 struct {
1341 struct mmuext_op op;
32dd1194 1342#ifdef CONFIG_SMP
900cba88 1343 DECLARE_BITMAP(mask, num_processors);
32dd1194
KRW
1344#else
1345 DECLARE_BITMAP(mask, NR_CPUS);
1346#endif
319f3ba5
JF
1347 } *args;
1348 struct multicall_space mcs;
1349
e7b52ffd 1350 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
c8eed171 1351
e3f8a74e
JF
1352 if (cpumask_empty(cpus))
1353 return; /* nothing to do */
319f3ba5
JF
1354
1355 mcs = xen_mc_entry(sizeof(*args));
1356 args = mcs.args;
1357 args->op.arg2.vcpumask = to_cpumask(args->mask);
1358
1359 /* Remove us, and any offline CPUS. */
1360 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1361 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
319f3ba5 1362
e7b52ffd 1363 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
ce7184bd 1364 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
319f3ba5 1365 args->op.cmd = MMUEXT_INVLPG_MULTI;
e7b52ffd 1366 args->op.arg1.linear_addr = start;
319f3ba5
JF
1367 }
1368
1369 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1370
319f3ba5
JF
1371 xen_mc_issue(PARAVIRT_LAZY_MMU);
1372}
1373
1374static unsigned long xen_read_cr3(void)
1375{
2113f469 1376 return this_cpu_read(xen_cr3);
319f3ba5
JF
1377}
1378
1379static void set_current_cr3(void *v)
1380{
2113f469 1381 this_cpu_write(xen_current_cr3, (unsigned long)v);
319f3ba5
JF
1382}
1383
1384static void __xen_write_cr3(bool kernel, unsigned long cr3)
1385{
dcf7435c 1386 struct mmuext_op op;
319f3ba5
JF
1387 unsigned long mfn;
1388
c8eed171
JF
1389 trace_xen_mmu_write_cr3(kernel, cr3);
1390
319f3ba5
JF
1391 if (cr3)
1392 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1393 else
1394 mfn = 0;
1395
1396 WARN_ON(mfn == 0 && kernel);
1397
dcf7435c
JF
1398 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1399 op.arg1.mfn = mfn;
319f3ba5 1400
dcf7435c 1401 xen_extend_mmuext_op(&op);
319f3ba5
JF
1402
1403 if (kernel) {
2113f469 1404 this_cpu_write(xen_cr3, cr3);
319f3ba5
JF
1405
1406 /* Update xen_current_cr3 once the batch has actually
1407 been submitted. */
1408 xen_mc_callback(set_current_cr3, (void *)cr3);
1409 }
1410}
319f3ba5
JF
1411static void xen_write_cr3(unsigned long cr3)
1412{
1413 BUG_ON(preemptible());
1414
1415 xen_mc_batch(); /* disables interrupts */
1416
1417 /* Update while interrupts are disabled, so its atomic with
1418 respect to ipis */
2113f469 1419 this_cpu_write(xen_cr3, cr3);
319f3ba5
JF
1420
1421 __xen_write_cr3(true, cr3);
1422
1423#ifdef CONFIG_X86_64
1424 {
1425 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1426 if (user_pgd)
1427 __xen_write_cr3(false, __pa(user_pgd));
1428 else
1429 __xen_write_cr3(false, 0);
1430 }
1431#endif
1432
1433 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1434}
1435
0cc9129d
KRW
1436#ifdef CONFIG_X86_64
1437/*
1438 * At the start of the day - when Xen launches a guest, it has already
1439 * built pagetables for the guest. We diligently look over them
1440 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1441 * init_level4_pgt and its friends. Then when we are happy we load
1442 * the new init_level4_pgt - and continue on.
1443 *
1444 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1445 * up the rest of the pagetables. When it has completed it loads the cr3.
1446 * N.B. that baremetal would start at 'start_kernel' (and the early
1447 * #PF handler would create bootstrap pagetables) - so we are running
1448 * with the same assumptions as what to do when write_cr3 is executed
1449 * at this point.
1450 *
1451 * Since there are no user-page tables at all, we have two variants
1452 * of xen_write_cr3 - the early bootup (this one), and the late one
1453 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1454 * the Linux kernel and user-space are both in ring 3 while the
1455 * hypervisor is in ring 0.
1456 */
1457static void __init xen_write_cr3_init(unsigned long cr3)
1458{
1459 BUG_ON(preemptible());
1460
1461 xen_mc_batch(); /* disables interrupts */
1462
1463 /* Update while interrupts are disabled, so its atomic with
1464 respect to ipis */
1465 this_cpu_write(xen_cr3, cr3);
1466
1467 __xen_write_cr3(true, cr3);
1468
1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
0cc9129d
KRW
1470}
1471#endif
1472
319f3ba5
JF
1473static int xen_pgd_alloc(struct mm_struct *mm)
1474{
1475 pgd_t *pgd = mm->pgd;
1476 int ret = 0;
1477
1478 BUG_ON(PagePinned(virt_to_page(pgd)));
1479
1480#ifdef CONFIG_X86_64
1481 {
1482 struct page *page = virt_to_page(pgd);
1483 pgd_t *user_pgd;
1484
1485 BUG_ON(page->private != 0);
1486
1487 ret = -ENOMEM;
1488
1489 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1490 page->private = (unsigned long)user_pgd;
1491
1492 if (user_pgd != NULL) {
1493 user_pgd[pgd_index(VSYSCALL_START)] =
1494 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1495 ret = 0;
1496 }
1497
1498 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1499 }
1500#endif
1501
1502 return ret;
1503}
1504
1505static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1506{
1507#ifdef CONFIG_X86_64
1508 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1509
1510 if (user_pgd)
1511 free_page((unsigned long)user_pgd);
1512#endif
1513}
1514
ee176455 1515#ifdef CONFIG_X86_32
3f508953 1516static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1f4f9315
JF
1517{
1518 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1519 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1520 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1521 pte_val_ma(pte));
ee176455
SS
1522
1523 return pte;
1524}
1525#else /* CONFIG_X86_64 */
3f508953 1526static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
ee176455 1527{
1f4f9315
JF
1528 return pte;
1529}
ee176455 1530#endif /* CONFIG_X86_64 */
1f4f9315 1531
d095d43e
DV
1532/*
1533 * Init-time set_pte while constructing initial pagetables, which
1534 * doesn't allow RO page table pages to be remapped RW.
1535 *
66a27dde
DV
1536 * If there is no MFN for this PFN then this page is initially
1537 * ballooned out so clear the PTE (as in decrease_reservation() in
1538 * drivers/xen/balloon.c).
1539 *
d095d43e
DV
1540 * Many of these PTE updates are done on unpinned and writable pages
1541 * and doing a hypercall for these is unnecessary and expensive. At
1542 * this point it is not possible to tell if a page is pinned or not,
1543 * so always write the PTE directly and rely on Xen trapping and
1544 * emulating any updates as necessary.
1545 */
3f508953 1546static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1f4f9315 1547{
66a27dde
DV
1548 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1549 pte = mask_rw_pte(ptep, pte);
1550 else
1551 pte = __pte_ma(0);
1f4f9315 1552
d095d43e 1553 native_set_pte(ptep, pte);
1f4f9315 1554}
319f3ba5 1555
b96229b5
JF
1556static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1557{
1558 struct mmuext_op op;
1559 op.cmd = cmd;
1560 op.arg1.mfn = pfn_to_mfn(pfn);
1561 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1562 BUG();
1563}
1564
319f3ba5
JF
1565/* Early in boot, while setting up the initial pagetable, assume
1566 everything is pinned. */
3f508953 1567static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
319f3ba5 1568{
b96229b5
JF
1569#ifdef CONFIG_FLATMEM
1570 BUG_ON(mem_map); /* should only be used early */
1571#endif
1572 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1573 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1574}
1575
1576/* Used for pmd and pud */
3f508953 1577static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
b96229b5 1578{
319f3ba5
JF
1579#ifdef CONFIG_FLATMEM
1580 BUG_ON(mem_map); /* should only be used early */
1581#endif
1582 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1583}
1584
1585/* Early release_pte assumes that all pts are pinned, since there's
1586 only init_mm and anything attached to that is pinned. */
3f508953 1587static void __init xen_release_pte_init(unsigned long pfn)
319f3ba5 1588{
b96229b5 1589 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
319f3ba5
JF
1590 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1591}
1592
3f508953 1593static void __init xen_release_pmd_init(unsigned long pfn)
319f3ba5 1594{
b96229b5 1595 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
319f3ba5
JF
1596}
1597
bc7fe1d9
JF
1598static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1599{
1600 struct multicall_space mcs;
1601 struct mmuext_op *op;
1602
1603 mcs = __xen_mc_entry(sizeof(*op));
1604 op = mcs.args;
1605 op->cmd = cmd;
1606 op->arg1.mfn = pfn_to_mfn(pfn);
1607
1608 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1609}
1610
1611static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1612{
1613 struct multicall_space mcs;
1614 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1615
1616 mcs = __xen_mc_entry(0);
1617 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1618 pfn_pte(pfn, prot), 0);
1619}
1620
319f3ba5
JF
1621/* This needs to make sure the new pte page is pinned iff its being
1622 attached to a pinned pagetable. */
bc7fe1d9
JF
1623static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1624 unsigned level)
319f3ba5 1625{
bc7fe1d9
JF
1626 bool pinned = PagePinned(virt_to_page(mm->pgd));
1627
c2ba050d 1628 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
319f3ba5 1629
c2ba050d 1630 if (pinned) {
bc7fe1d9 1631 struct page *page = pfn_to_page(pfn);
319f3ba5 1632
319f3ba5
JF
1633 SetPagePinned(page);
1634
319f3ba5 1635 if (!PageHighMem(page)) {
bc7fe1d9
JF
1636 xen_mc_batch();
1637
1638 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1639
319f3ba5 1640 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
bc7fe1d9
JF
1641 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1642
1643 xen_mc_issue(PARAVIRT_LAZY_MMU);
319f3ba5
JF
1644 } else {
1645 /* make sure there are no stray mappings of
1646 this page */
1647 kmap_flush_unused();
1648 }
1649 }
1650}
1651
1652static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1653{
1654 xen_alloc_ptpage(mm, pfn, PT_PTE);
1655}
1656
1657static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1658{
1659 xen_alloc_ptpage(mm, pfn, PT_PMD);
1660}
1661
1662/* This should never happen until we're OK to use struct page */
bc7fe1d9 1663static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
319f3ba5
JF
1664{
1665 struct page *page = pfn_to_page(pfn);
c2ba050d 1666 bool pinned = PagePinned(page);
319f3ba5 1667
c2ba050d 1668 trace_xen_mmu_release_ptpage(pfn, level, pinned);
319f3ba5 1669
c2ba050d 1670 if (pinned) {
319f3ba5 1671 if (!PageHighMem(page)) {
bc7fe1d9
JF
1672 xen_mc_batch();
1673
319f3ba5 1674 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
bc7fe1d9
JF
1675 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1676
1677 __set_pfn_prot(pfn, PAGE_KERNEL);
1678
1679 xen_mc_issue(PARAVIRT_LAZY_MMU);
319f3ba5
JF
1680 }
1681 ClearPagePinned(page);
1682 }
1683}
1684
1685static void xen_release_pte(unsigned long pfn)
1686{
1687 xen_release_ptpage(pfn, PT_PTE);
1688}
1689
1690static void xen_release_pmd(unsigned long pfn)
1691{
1692 xen_release_ptpage(pfn, PT_PMD);
1693}
1694
1695#if PAGETABLE_LEVELS == 4
1696static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1697{
1698 xen_alloc_ptpage(mm, pfn, PT_PUD);
1699}
1700
1701static void xen_release_pud(unsigned long pfn)
1702{
1703 xen_release_ptpage(pfn, PT_PUD);
1704}
1705#endif
1706
1707void __init xen_reserve_top(void)
1708{
1709#ifdef CONFIG_X86_32
1710 unsigned long top = HYPERVISOR_VIRT_START;
1711 struct xen_platform_parameters pp;
1712
1713 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1714 top = pp.virt_start;
1715
1716 reserve_top_address(-top);
1717#endif /* CONFIG_X86_32 */
1718}
1719
1720/*
1721 * Like __va(), but returns address in the kernel mapping (which is
1722 * all we have until the physical memory mapping has been set up.
1723 */
1724static void *__ka(phys_addr_t paddr)
1725{
1726#ifdef CONFIG_X86_64
1727 return (void *)(paddr + __START_KERNEL_map);
1728#else
1729 return __va(paddr);
1730#endif
1731}
1732
1733/* Convert a machine address to physical address */
1734static unsigned long m2p(phys_addr_t maddr)
1735{
1736 phys_addr_t paddr;
1737
1738 maddr &= PTE_PFN_MASK;
1739 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1740
1741 return paddr;
1742}
1743
1744/* Convert a machine address to kernel virtual */
1745static void *m2v(phys_addr_t maddr)
1746{
1747 return __ka(m2p(maddr));
1748}
1749
4ec5387c 1750/* Set the page permissions on an identity-mapped pages */
319f3ba5
JF
1751static void set_page_prot(void *addr, pgprot_t prot)
1752{
1753 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1754 pte_t pte = pfn_pte(pfn, prot);
1755
1756 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1757 BUG();
1758}
caaf9ecf 1759#ifdef CONFIG_X86_32
3f508953 1760static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
319f3ba5
JF
1761{
1762 unsigned pmdidx, pteidx;
1763 unsigned ident_pte;
1764 unsigned long pfn;
1765
764f0138
JF
1766 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1767 PAGE_SIZE);
1768
319f3ba5
JF
1769 ident_pte = 0;
1770 pfn = 0;
1771 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1772 pte_t *pte_page;
1773
1774 /* Reuse or allocate a page of ptes */
1775 if (pmd_present(pmd[pmdidx]))
1776 pte_page = m2v(pmd[pmdidx].pmd);
1777 else {
1778 /* Check for free pte pages */
764f0138 1779 if (ident_pte == LEVEL1_IDENT_ENTRIES)
319f3ba5
JF
1780 break;
1781
1782 pte_page = &level1_ident_pgt[ident_pte];
1783 ident_pte += PTRS_PER_PTE;
1784
1785 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1786 }
1787
1788 /* Install mappings */
1789 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1790 pte_t pte;
1791
a91d9287
SS
1792#ifdef CONFIG_X86_32
1793 if (pfn > max_pfn_mapped)
1794 max_pfn_mapped = pfn;
1795#endif
1796
319f3ba5
JF
1797 if (!pte_none(pte_page[pteidx]))
1798 continue;
1799
1800 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1801 pte_page[pteidx] = pte;
1802 }
1803 }
1804
1805 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1806 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1807
1808 set_page_prot(pmd, PAGE_KERNEL_RO);
1809}
caaf9ecf 1810#endif
7e77506a
IC
1811void __init xen_setup_machphys_mapping(void)
1812{
1813 struct xen_machphys_mapping mapping;
7e77506a
IC
1814
1815 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1816 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
ccbcdf7c 1817 machine_to_phys_nr = mapping.max_mfn + 1;
7e77506a 1818 } else {
ccbcdf7c 1819 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
7e77506a 1820 }
ccbcdf7c 1821#ifdef CONFIG_X86_32
61cca2fa
JB
1822 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1823 < machine_to_phys_mapping);
ccbcdf7c 1824#endif
7e77506a
IC
1825}
1826
319f3ba5
JF
1827#ifdef CONFIG_X86_64
1828static void convert_pfn_mfn(void *v)
1829{
1830 pte_t *pte = v;
1831 int i;
1832
1833 /* All levels are converted the same way, so just treat them
1834 as ptes. */
1835 for (i = 0; i < PTRS_PER_PTE; i++)
1836 pte[i] = xen_make_pte(pte[i].pte);
1837}
488f046d
KRW
1838static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1839 unsigned long addr)
1840{
1841 if (*pt_base == PFN_DOWN(__pa(addr))) {
1842 set_page_prot((void *)addr, PAGE_KERNEL);
1843 clear_page((void *)addr);
1844 (*pt_base)++;
1845 }
1846 if (*pt_end == PFN_DOWN(__pa(addr))) {
1847 set_page_prot((void *)addr, PAGE_KERNEL);
1848 clear_page((void *)addr);
1849 (*pt_end)--;
1850 }
1851}
319f3ba5 1852/*
0d2eb44f 1853 * Set up the initial kernel pagetable.
319f3ba5
JF
1854 *
1855 * We can construct this by grafting the Xen provided pagetable into
1856 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1857 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1858 * means that only the kernel has a physical mapping to start with -
1859 * but that's enough to get __va working. We need to fill in the rest
1860 * of the physical mapping once some sort of allocator has been set
1861 * up.
1862 */
3699aad0 1863void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
319f3ba5
JF
1864{
1865 pud_t *l3;
1866 pmd_t *l2;
488f046d
KRW
1867 unsigned long addr[3];
1868 unsigned long pt_base, pt_end;
1869 unsigned i;
319f3ba5 1870
14988a4d
SS
1871 /* max_pfn_mapped is the last pfn mapped in the initial memory
1872 * mappings. Considering that on Xen after the kernel mappings we
1873 * have the mappings of some pages that don't exist in pfn space, we
1874 * set max_pfn_mapped to the last real pfn mapped. */
1875 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1876
488f046d
KRW
1877 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1878 pt_end = pt_base + xen_start_info->nr_pt_frames;
1879
319f3ba5
JF
1880 /* Zap identity mapping */
1881 init_level4_pgt[0] = __pgd(0);
1882
1883 /* Pre-constructed entries are in pfn, so convert to mfn */
4fac153a
KRW
1884 /* L4[272] -> level3_ident_pgt
1885 * L4[511] -> level3_kernel_pgt */
319f3ba5 1886 convert_pfn_mfn(init_level4_pgt);
4fac153a
KRW
1887
1888 /* L3_i[0] -> level2_ident_pgt */
319f3ba5 1889 convert_pfn_mfn(level3_ident_pgt);
4fac153a
KRW
1890 /* L3_k[510] -> level2_kernel_pgt
1891 * L3_i[511] -> level2_fixmap_pgt */
319f3ba5
JF
1892 convert_pfn_mfn(level3_kernel_pgt);
1893
4fac153a 1894 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
319f3ba5
JF
1895 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1896 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1897
488f046d
KRW
1898 addr[0] = (unsigned long)pgd;
1899 addr[1] = (unsigned long)l3;
1900 addr[2] = (unsigned long)l2;
4fac153a
KRW
1901 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1902 * Both L4[272][0] and L4[511][511] have entries that point to the same
1903 * L2 (PMD) tables. Meaning that if you modify it in __va space
1904 * it will be also modified in the __ka space! (But if you just
1905 * modify the PMD table to point to other PTE's or none, then you
1906 * are OK - which is what cleanup_highmap does) */
ae895ed7 1907 copy_page(level2_ident_pgt, l2);
4fac153a 1908 /* Graft it onto L4[511][511] */
ae895ed7 1909 copy_page(level2_kernel_pgt, l2);
319f3ba5 1910
4fac153a 1911 /* Get [511][510] and graft that in level2_fixmap_pgt */
319f3ba5
JF
1912 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1913 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
ae895ed7 1914 copy_page(level2_fixmap_pgt, l2);
4fac153a
KRW
1915 /* Note that we don't do anything with level1_fixmap_pgt which
1916 * we don't need. */
319f3ba5
JF
1917
1918 /* Make pagetable pieces RO */
1919 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1920 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1921 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1922 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
caaf9ecf 1923 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
319f3ba5
JF
1924 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1925 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1926
1927 /* Pin down new L4 */
1928 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1929 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1930
1931 /* Unpin Xen-provided one */
1932 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1933
319f3ba5
JF
1934 /*
1935 * At this stage there can be no user pgd, and no page
1936 * structure to attach it to, so make sure we just set kernel
1937 * pgd.
1938 */
1939 xen_mc_batch();
488f046d 1940 __xen_write_cr3(true, __pa(init_level4_pgt));
319f3ba5
JF
1941 xen_mc_issue(PARAVIRT_LAZY_CPU);
1942
488f046d
KRW
1943 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1944 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1945 * the initial domain. For guests using the toolstack, they are in:
1946 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1947 * rip out the [L4] (pgd), but for guests we shave off three pages.
1948 */
1949 for (i = 0; i < ARRAY_SIZE(addr); i++)
1950 check_pt_base(&pt_base, &pt_end, addr[i]);
319f3ba5 1951
488f046d
KRW
1952 /* Our (by three pages) smaller Xen pagetable that we are using */
1953 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
7f914062
KRW
1954 /* Revector the xen_start_info */
1955 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
319f3ba5
JF
1956}
1957#else /* !CONFIG_X86_64 */
5b5c1af1
IC
1958static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1959static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1960
3f508953 1961static void __init xen_write_cr3_init(unsigned long cr3)
5b5c1af1
IC
1962{
1963 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1964
1965 BUG_ON(read_cr3() != __pa(initial_page_table));
1966 BUG_ON(cr3 != __pa(swapper_pg_dir));
1967
1968 /*
1969 * We are switching to swapper_pg_dir for the first time (from
1970 * initial_page_table) and therefore need to mark that page
1971 * read-only and then pin it.
1972 *
1973 * Xen disallows sharing of kernel PMDs for PAE
1974 * guests. Therefore we must copy the kernel PMD from
1975 * initial_page_table into a new kernel PMD to be used in
1976 * swapper_pg_dir.
1977 */
1978 swapper_kernel_pmd =
1979 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
ae895ed7 1980 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
5b5c1af1
IC
1981 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1982 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1983 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1984
1985 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1986 xen_write_cr3(cr3);
1987 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1988
1989 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1990 PFN_DOWN(__pa(initial_page_table)));
1991 set_page_prot(initial_page_table, PAGE_KERNEL);
1992 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1993
1994 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1995}
319f3ba5 1996
3699aad0 1997void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
319f3ba5
JF
1998{
1999 pmd_t *kernel_pmd;
2000
5b5c1af1
IC
2001 initial_kernel_pmd =
2002 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
f0991802 2003
a91d9287
SS
2004 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2005 xen_start_info->nr_pt_frames * PAGE_SIZE +
2006 512*1024);
319f3ba5
JF
2007
2008 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
ae895ed7 2009 copy_page(initial_kernel_pmd, kernel_pmd);
319f3ba5 2010
5b5c1af1 2011 xen_map_identity_early(initial_kernel_pmd, max_pfn);
319f3ba5 2012
ae895ed7 2013 copy_page(initial_page_table, pgd);
5b5c1af1
IC
2014 initial_page_table[KERNEL_PGD_BOUNDARY] =
2015 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
319f3ba5 2016
5b5c1af1
IC
2017 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2018 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
319f3ba5
JF
2019 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2020
2021 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2022
5b5c1af1
IC
2023 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2024 PFN_DOWN(__pa(initial_page_table)));
2025 xen_write_cr3(__pa(initial_page_table));
319f3ba5 2026
24aa0788 2027 memblock_reserve(__pa(xen_start_info->pt_base),
dc6821e0 2028 xen_start_info->nr_pt_frames * PAGE_SIZE);
319f3ba5
JF
2029}
2030#endif /* CONFIG_X86_64 */
2031
98511f35
JF
2032static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2033
3b3809ac 2034static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
319f3ba5
JF
2035{
2036 pte_t pte;
2037
2038 phys >>= PAGE_SHIFT;
2039
2040 switch (idx) {
2041 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2042#ifdef CONFIG_X86_F00F_BUG
2043 case FIX_F00F_IDT:
2044#endif
2045#ifdef CONFIG_X86_32
2046 case FIX_WP_TEST:
2047 case FIX_VDSO:
2048# ifdef CONFIG_HIGHMEM
2049 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2050# endif
2051#else
2052 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
5d5791af 2053 case VVAR_PAGE:
319f3ba5 2054#endif
3ecb1b7d
JF
2055 case FIX_TEXT_POKE0:
2056 case FIX_TEXT_POKE1:
2057 /* All local page mappings */
319f3ba5
JF
2058 pte = pfn_pte(phys, prot);
2059 break;
2060
98511f35
JF
2061#ifdef CONFIG_X86_LOCAL_APIC
2062 case FIX_APIC_BASE: /* maps dummy local APIC */
2063 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2064 break;
2065#endif
2066
2067#ifdef CONFIG_X86_IO_APIC
2068 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2069 /*
2070 * We just don't map the IO APIC - all access is via
2071 * hypercalls. Keep the address in the pte for reference.
2072 */
27abd14b 2073 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
98511f35
JF
2074 break;
2075#endif
2076
c0011dbf
JF
2077 case FIX_PARAVIRT_BOOTMAP:
2078 /* This is an MFN, but it isn't an IO mapping from the
2079 IO domain */
319f3ba5
JF
2080 pte = mfn_pte(phys, prot);
2081 break;
c0011dbf
JF
2082
2083 default:
2084 /* By default, set_fixmap is used for hardware mappings */
2085 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2086 break;
319f3ba5
JF
2087 }
2088
2089 __native_set_fixmap(idx, pte);
2090
2091#ifdef CONFIG_X86_64
2092 /* Replicate changes to map the vsyscall page into the user
2093 pagetable vsyscall mapping. */
5d5791af
AL
2094 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2095 idx == VVAR_PAGE) {
319f3ba5
JF
2096 unsigned long vaddr = __fix_to_virt(idx);
2097 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2098 }
2099#endif
2100}
2101
3f508953 2102static void __init xen_post_allocator_init(void)
319f3ba5
JF
2103{
2104 pv_mmu_ops.set_pte = xen_set_pte;
2105 pv_mmu_ops.set_pmd = xen_set_pmd;
2106 pv_mmu_ops.set_pud = xen_set_pud;
2107#if PAGETABLE_LEVELS == 4
2108 pv_mmu_ops.set_pgd = xen_set_pgd;
2109#endif
2110
2111 /* This will work as long as patching hasn't happened yet
2112 (which it hasn't) */
2113 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2114 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2115 pv_mmu_ops.release_pte = xen_release_pte;
2116 pv_mmu_ops.release_pmd = xen_release_pmd;
2117#if PAGETABLE_LEVELS == 4
2118 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2119 pv_mmu_ops.release_pud = xen_release_pud;
2120#endif
2121
2122#ifdef CONFIG_X86_64
d3eb2c89 2123 pv_mmu_ops.write_cr3 = &xen_write_cr3;
319f3ba5
JF
2124 SetPagePinned(virt_to_page(level3_user_vsyscall));
2125#endif
2126 xen_mark_init_mm_pinned();
2127}
2128
b407fc57
JF
2129static void xen_leave_lazy_mmu(void)
2130{
5caecb94 2131 preempt_disable();
b407fc57
JF
2132 xen_mc_flush();
2133 paravirt_leave_lazy_mmu();
5caecb94 2134 preempt_enable();
b407fc57 2135}
319f3ba5 2136
3f508953 2137static const struct pv_mmu_ops xen_mmu_ops __initconst = {
319f3ba5
JF
2138 .read_cr2 = xen_read_cr2,
2139 .write_cr2 = xen_write_cr2,
2140
2141 .read_cr3 = xen_read_cr3,
5b5c1af1 2142 .write_cr3 = xen_write_cr3_init,
319f3ba5
JF
2143
2144 .flush_tlb_user = xen_flush_tlb,
2145 .flush_tlb_kernel = xen_flush_tlb,
2146 .flush_tlb_single = xen_flush_tlb_single,
2147 .flush_tlb_others = xen_flush_tlb_others,
2148
2149 .pte_update = paravirt_nop,
2150 .pte_update_defer = paravirt_nop,
2151
2152 .pgd_alloc = xen_pgd_alloc,
2153 .pgd_free = xen_pgd_free,
2154
2155 .alloc_pte = xen_alloc_pte_init,
2156 .release_pte = xen_release_pte_init,
b96229b5 2157 .alloc_pmd = xen_alloc_pmd_init,
b96229b5 2158 .release_pmd = xen_release_pmd_init,
319f3ba5 2159
319f3ba5 2160 .set_pte = xen_set_pte_init,
319f3ba5
JF
2161 .set_pte_at = xen_set_pte_at,
2162 .set_pmd = xen_set_pmd_hyper,
2163
2164 .ptep_modify_prot_start = __ptep_modify_prot_start,
2165 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2166
da5de7c2
JF
2167 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2168 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
319f3ba5 2169
da5de7c2
JF
2170 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2171 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
319f3ba5
JF
2172
2173#ifdef CONFIG_X86_PAE
2174 .set_pte_atomic = xen_set_pte_atomic,
319f3ba5
JF
2175 .pte_clear = xen_pte_clear,
2176 .pmd_clear = xen_pmd_clear,
2177#endif /* CONFIG_X86_PAE */
2178 .set_pud = xen_set_pud_hyper,
2179
da5de7c2
JF
2180 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2181 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
319f3ba5
JF
2182
2183#if PAGETABLE_LEVELS == 4
da5de7c2
JF
2184 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2185 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
319f3ba5
JF
2186 .set_pgd = xen_set_pgd_hyper,
2187
b96229b5
JF
2188 .alloc_pud = xen_alloc_pmd_init,
2189 .release_pud = xen_release_pmd_init,
319f3ba5
JF
2190#endif /* PAGETABLE_LEVELS == 4 */
2191
2192 .activate_mm = xen_activate_mm,
2193 .dup_mmap = xen_dup_mmap,
2194 .exit_mmap = xen_exit_mmap,
2195
2196 .lazy_mode = {
2197 .enter = paravirt_enter_lazy_mmu,
b407fc57 2198 .leave = xen_leave_lazy_mmu,
319f3ba5
JF
2199 },
2200
2201 .set_fixmap = xen_set_fixmap,
2202};
2203
030cb6c0
TG
2204void __init xen_init_mmu_ops(void)
2205{
7737b215 2206 x86_init.paging.pagetable_init = xen_pagetable_init;
030cb6c0 2207 pv_mmu_ops = xen_mmu_ops;
d2cb2145 2208
98511f35 2209 memset(dummy_mapping, 0xff, PAGE_SIZE);
030cb6c0 2210}
319f3ba5 2211
08bbc9da
AN
2212/* Protected by xen_reservation_lock. */
2213#define MAX_CONTIG_ORDER 9 /* 2MB */
2214static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2215
2216#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2217static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2218 unsigned long *in_frames,
2219 unsigned long *out_frames)
2220{
2221 int i;
2222 struct multicall_space mcs;
2223
2224 xen_mc_batch();
2225 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2226 mcs = __xen_mc_entry(0);
2227
2228 if (in_frames)
2229 in_frames[i] = virt_to_mfn(vaddr);
2230
2231 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
6eaa412f 2232 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
08bbc9da
AN
2233
2234 if (out_frames)
2235 out_frames[i] = virt_to_pfn(vaddr);
2236 }
2237 xen_mc_issue(0);
2238}
2239
2240/*
2241 * Update the pfn-to-mfn mappings for a virtual address range, either to
2242 * point to an array of mfns, or contiguously from a single starting
2243 * mfn.
2244 */
2245static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2246 unsigned long *mfns,
2247 unsigned long first_mfn)
2248{
2249 unsigned i, limit;
2250 unsigned long mfn;
2251
2252 xen_mc_batch();
2253
2254 limit = 1u << order;
2255 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2256 struct multicall_space mcs;
2257 unsigned flags;
2258
2259 mcs = __xen_mc_entry(0);
2260 if (mfns)
2261 mfn = mfns[i];
2262 else
2263 mfn = first_mfn + i;
2264
2265 if (i < (limit - 1))
2266 flags = 0;
2267 else {
2268 if (order == 0)
2269 flags = UVMF_INVLPG | UVMF_ALL;
2270 else
2271 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2272 }
2273
2274 MULTI_update_va_mapping(mcs.mc, vaddr,
2275 mfn_pte(mfn, PAGE_KERNEL), flags);
2276
2277 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2278 }
2279
2280 xen_mc_issue(0);
2281}
2282
2283/*
2284 * Perform the hypercall to exchange a region of our pfns to point to
2285 * memory with the required contiguous alignment. Takes the pfns as
2286 * input, and populates mfns as output.
2287 *
2288 * Returns a success code indicating whether the hypervisor was able to
2289 * satisfy the request or not.
2290 */
2291static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2292 unsigned long *pfns_in,
2293 unsigned long extents_out,
2294 unsigned int order_out,
2295 unsigned long *mfns_out,
2296 unsigned int address_bits)
2297{
2298 long rc;
2299 int success;
2300
2301 struct xen_memory_exchange exchange = {
2302 .in = {
2303 .nr_extents = extents_in,
2304 .extent_order = order_in,
2305 .extent_start = pfns_in,
2306 .domid = DOMID_SELF
2307 },
2308 .out = {
2309 .nr_extents = extents_out,
2310 .extent_order = order_out,
2311 .extent_start = mfns_out,
2312 .address_bits = address_bits,
2313 .domid = DOMID_SELF
2314 }
2315 };
2316
2317 BUG_ON(extents_in << order_in != extents_out << order_out);
2318
2319 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2320 success = (exchange.nr_exchanged == extents_in);
2321
2322 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2323 BUG_ON(success && (rc != 0));
2324
2325 return success;
2326}
2327
2328int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2329 unsigned int address_bits)
2330{
2331 unsigned long *in_frames = discontig_frames, out_frame;
2332 unsigned long flags;
2333 int success;
2334
2335 /*
2336 * Currently an auto-translated guest will not perform I/O, nor will
2337 * it require PAE page directories below 4GB. Therefore any calls to
2338 * this function are redundant and can be ignored.
2339 */
2340
2341 if (xen_feature(XENFEAT_auto_translated_physmap))
2342 return 0;
2343
2344 if (unlikely(order > MAX_CONTIG_ORDER))
2345 return -ENOMEM;
2346
2347 memset((void *) vstart, 0, PAGE_SIZE << order);
2348
08bbc9da
AN
2349 spin_lock_irqsave(&xen_reservation_lock, flags);
2350
2351 /* 1. Zap current PTEs, remembering MFNs. */
2352 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2353
2354 /* 2. Get a new contiguous memory extent. */
2355 out_frame = virt_to_pfn(vstart);
2356 success = xen_exchange_memory(1UL << order, 0, in_frames,
2357 1, order, &out_frame,
2358 address_bits);
2359
2360 /* 3. Map the new extent in place of old pages. */
2361 if (success)
2362 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2363 else
2364 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2365
2366 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2367
2368 return success ? 0 : -ENOMEM;
2369}
2370EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2371
2372void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2373{
2374 unsigned long *out_frames = discontig_frames, in_frame;
2375 unsigned long flags;
2376 int success;
2377
2378 if (xen_feature(XENFEAT_auto_translated_physmap))
2379 return;
2380
2381 if (unlikely(order > MAX_CONTIG_ORDER))
2382 return;
2383
2384 memset((void *) vstart, 0, PAGE_SIZE << order);
2385
08bbc9da
AN
2386 spin_lock_irqsave(&xen_reservation_lock, flags);
2387
2388 /* 1. Find start MFN of contiguous extent. */
2389 in_frame = virt_to_mfn(vstart);
2390
2391 /* 2. Zap current PTEs. */
2392 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2393
2394 /* 3. Do the exchange for non-contiguous MFNs. */
2395 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2396 0, out_frames, 0);
2397
2398 /* 4. Map new pages in place of old pages. */
2399 if (success)
2400 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2401 else
2402 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2403
2404 spin_unlock_irqrestore(&xen_reservation_lock, flags);
030cb6c0 2405}
08bbc9da 2406EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
319f3ba5 2407
ca65f9fc 2408#ifdef CONFIG_XEN_PVHVM
34b6f01a
OH
2409#ifdef CONFIG_PROC_VMCORE
2410/*
2411 * This function is used in two contexts:
2412 * - the kdump kernel has to check whether a pfn of the crashed kernel
2413 * was a ballooned page. vmcore is using this function to decide
2414 * whether to access a pfn of the crashed kernel.
2415 * - the kexec kernel has to check whether a pfn was ballooned by the
2416 * previous kernel. If the pfn is ballooned, handle it properly.
2417 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2418 * handle the pfn special in this case.
2419 */
2420static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2421{
2422 struct xen_hvm_get_mem_type a = {
2423 .domid = DOMID_SELF,
2424 .pfn = pfn,
2425 };
2426 int ram;
2427
2428 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2429 return -ENXIO;
2430
2431 switch (a.mem_type) {
2432 case HVMMEM_mmio_dm:
2433 ram = 0;
2434 break;
2435 case HVMMEM_ram_rw:
2436 case HVMMEM_ram_ro:
2437 default:
2438 ram = 1;
2439 break;
2440 }
2441
2442 return ram;
2443}
2444#endif
2445
59151001
SS
2446static void xen_hvm_exit_mmap(struct mm_struct *mm)
2447{
2448 struct xen_hvm_pagetable_dying a;
2449 int rc;
2450
2451 a.domid = DOMID_SELF;
2452 a.gpa = __pa(mm->pgd);
2453 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2454 WARN_ON_ONCE(rc < 0);
2455}
2456
2457static int is_pagetable_dying_supported(void)
2458{
2459 struct xen_hvm_pagetable_dying a;
2460 int rc = 0;
2461
2462 a.domid = DOMID_SELF;
2463 a.gpa = 0x00;
2464 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2465 if (rc < 0) {
2466 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2467 return 0;
2468 }
2469 return 1;
2470}
2471
2472void __init xen_hvm_init_mmu_ops(void)
2473{
2474 if (is_pagetable_dying_supported())
2475 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
34b6f01a
OH
2476#ifdef CONFIG_PROC_VMCORE
2477 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2478#endif
59151001 2479}
ca65f9fc 2480#endif
59151001 2481
de1ef206
IC
2482#define REMAP_BATCH_SIZE 16
2483
2484struct remap_data {
2485 unsigned long mfn;
2486 pgprot_t prot;
2487 struct mmu_update *mmu_update;
2488};
2489
2490static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2491 unsigned long addr, void *data)
2492{
2493 struct remap_data *rmd = data;
2494 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2495
d5108316 2496 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
de1ef206
IC
2497 rmd->mmu_update->val = pte_val_ma(pte);
2498 rmd->mmu_update++;
2499
2500 return 0;
2501}
2502
2503int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2504 unsigned long addr,
7892f692 2505 xen_pfn_t mfn, int nr,
9a032e39
IC
2506 pgprot_t prot, unsigned domid,
2507 struct page **pages)
2508
de1ef206
IC
2509{
2510 struct remap_data rmd;
2511 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2512 int batch;
2513 unsigned long range;
2514 int err = 0;
2515
1a1d4331
SS
2516 if (xen_feature(XENFEAT_auto_translated_physmap))
2517 return -EINVAL;
2518
de1ef206
IC
2519 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2520
314e51b9 2521 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
de1ef206
IC
2522
2523 rmd.mfn = mfn;
2524 rmd.prot = prot;
2525
2526 while (nr) {
2527 batch = min(REMAP_BATCH_SIZE, nr);
2528 range = (unsigned long)batch << PAGE_SHIFT;
2529
2530 rmd.mmu_update = mmu_update;
2531 err = apply_to_page_range(vma->vm_mm, addr, range,
2532 remap_area_mfn_pte_fn, &rmd);
2533 if (err)
2534 goto out;
2535
69870a84
DV
2536 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2537 if (err < 0)
de1ef206
IC
2538 goto out;
2539
2540 nr -= batch;
2541 addr += range;
2542 }
2543
2544 err = 0;
2545out:
2546
95a7d768 2547 xen_flush_tlb_all();
de1ef206
IC
2548
2549 return err;
2550}
2551EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
9a032e39
IC
2552
2553/* Returns: 0 success */
2554int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2555 int numpgs, struct page **pages)
2556{
2557 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2558 return 0;
2559
2560 return -EINVAL;
2561}
2562EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
This page took 1.041514 seconds and 4 git commands to generate.