]> Git Repo - linux.git/blame - arch/powerpc/kvm/book3s_hv_uvmem.c
KVM: PPC: Book3S HV: Shared pages support for secure guests
[linux.git] / arch / powerpc / kvm / book3s_hv_uvmem.c
CommitLineData
ca9f4942
BR
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Secure pages management: Migration of pages between normal and secure
4 * memory of KVM guests.
5 *
6 * Copyright 2018 Bharata B Rao, IBM Corp. <[email protected]>
7 */
8
9/*
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
11 * POWER platforms. On such platforms, this driver will be used to manage
12 * the movement of guest pages between the normal memory managed by
13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
14 *
15 * The page-in or page-out requests from UV will come to HV as hcalls and
16 * HV will call back into UV via ultracalls to satisfy these page requests.
17 *
18 * Private ZONE_DEVICE memory equal to the amount of secure memory
19 * available in the platform for running secure guests is hotplugged.
20 * Whenever a page belonging to the guest becomes secure, a page from this
21 * private device memory is used to represent and track that secure page
60f0a643
BR
22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23 * shared between UV and HV. However such pages aren't represented by
24 * device private memory and mappings to shared memory exist in both
25 * UV and HV page tables.
ca9f4942
BR
26 */
27
28/*
29 * Notes on locking
30 *
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
33 * can either come via UV (guest vCPUs requesting for same page)
34 * or when HV and guest simultaneously access the same page.
35 * This mutex serializes the migration of page from HV(normal) to
36 * UV(secure) and vice versa. So the serialization points are around
37 * migrate_vma routines and page-in/out routines.
38 *
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43 * not a cause for concern. Also currently the number of page-outs caused
44 * by HV touching secure pages is very very low. If an when UV supports
45 * overcommitting, then we might see concurrent guest driven page-outs.
46 *
47 * Locking order
48 *
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
53 */
54
55/*
56 * Notes on page size
57 *
58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60 * secure GPAs at 64K page size and maintains one device PFN for each
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62 * for 64K page at a time.
63 *
64 * HV faulting on secure pages: When HV touches any secure page, it
65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66 * UV splits and remaps the 2MB page if necessary and copies out the
67 * required 64K page contents.
68 *
60f0a643
BR
69 * Shared pages: Whenever guest shares a secure page, UV will split and
70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
71 *
ca9f4942
BR
72 * In summary, the current secure pages handling code in HV assumes
73 * 64K page size and in fact fails any page-in/page-out requests of
74 * non-64K size upfront. If and when UV starts supporting multiple
75 * page-sizes, we need to break this assumption.
76 */
77
78#include <linux/pagemap.h>
79#include <linux/migrate.h>
80#include <linux/kvm_host.h>
81#include <linux/ksm.h>
82#include <asm/ultravisor.h>
83#include <asm/mman.h>
84#include <asm/kvm_ppc.h>
85
86static struct dev_pagemap kvmppc_uvmem_pgmap;
87static unsigned long *kvmppc_uvmem_bitmap;
88static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
89
90#define KVMPPC_UVMEM_PFN (1UL << 63)
91
92struct kvmppc_uvmem_slot {
93 struct list_head list;
94 unsigned long nr_pfns;
95 unsigned long base_pfn;
96 unsigned long *pfns;
97};
98
99struct kvmppc_uvmem_page_pvt {
100 struct kvm *kvm;
101 unsigned long gpa;
60f0a643 102 bool skip_page_out;
ca9f4942
BR
103};
104
105int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
106{
107 struct kvmppc_uvmem_slot *p;
108
109 p = kzalloc(sizeof(*p), GFP_KERNEL);
110 if (!p)
111 return -ENOMEM;
112 p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
113 if (!p->pfns) {
114 kfree(p);
115 return -ENOMEM;
116 }
117 p->nr_pfns = slot->npages;
118 p->base_pfn = slot->base_gfn;
119
120 mutex_lock(&kvm->arch.uvmem_lock);
121 list_add(&p->list, &kvm->arch.uvmem_pfns);
122 mutex_unlock(&kvm->arch.uvmem_lock);
123
124 return 0;
125}
126
127/*
128 * All device PFNs are already released by the time we come here.
129 */
130void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
131{
132 struct kvmppc_uvmem_slot *p, *next;
133
134 mutex_lock(&kvm->arch.uvmem_lock);
135 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
136 if (p->base_pfn == slot->base_gfn) {
137 vfree(p->pfns);
138 list_del(&p->list);
139 kfree(p);
140 break;
141 }
142 }
143 mutex_unlock(&kvm->arch.uvmem_lock);
144}
145
146static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn,
147 struct kvm *kvm)
148{
149 struct kvmppc_uvmem_slot *p;
150
151 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
152 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
153 unsigned long index = gfn - p->base_pfn;
154
155 p->pfns[index] = uvmem_pfn | KVMPPC_UVMEM_PFN;
156 return;
157 }
158 }
159}
160
161static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm)
162{
163 struct kvmppc_uvmem_slot *p;
164
165 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
166 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
167 p->pfns[gfn - p->base_pfn] = 0;
168 return;
169 }
170 }
171}
172
173static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
174 unsigned long *uvmem_pfn)
175{
176 struct kvmppc_uvmem_slot *p;
177
178 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
179 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
180 unsigned long index = gfn - p->base_pfn;
181
182 if (p->pfns[index] & KVMPPC_UVMEM_PFN) {
183 if (uvmem_pfn)
184 *uvmem_pfn = p->pfns[index] &
185 ~KVMPPC_UVMEM_PFN;
186 return true;
187 } else
188 return false;
189 }
190 }
191 return false;
192}
193
194unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
195{
196 struct kvm_memslots *slots;
197 struct kvm_memory_slot *memslot;
198 int ret = H_SUCCESS;
199 int srcu_idx;
200
201 if (!kvmppc_uvmem_bitmap)
202 return H_UNSUPPORTED;
203
204 /* Only radix guests can be secure guests */
205 if (!kvm_is_radix(kvm))
206 return H_UNSUPPORTED;
207
208 srcu_idx = srcu_read_lock(&kvm->srcu);
209 slots = kvm_memslots(kvm);
210 kvm_for_each_memslot(memslot, slots) {
211 if (kvmppc_uvmem_slot_init(kvm, memslot)) {
212 ret = H_PARAMETER;
213 goto out;
214 }
215 ret = uv_register_mem_slot(kvm->arch.lpid,
216 memslot->base_gfn << PAGE_SHIFT,
217 memslot->npages * PAGE_SIZE,
218 0, memslot->id);
219 if (ret < 0) {
220 kvmppc_uvmem_slot_free(kvm, memslot);
221 ret = H_PARAMETER;
222 goto out;
223 }
224 }
225 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START;
226out:
227 srcu_read_unlock(&kvm->srcu, srcu_idx);
228 return ret;
229}
230
231unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
232{
233 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
234 return H_UNSUPPORTED;
235
236 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
237 pr_info("LPID %d went secure\n", kvm->arch.lpid);
238 return H_SUCCESS;
239}
240
241/*
242 * Get a free device PFN from the pool
243 *
244 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
245 * PFN will be used to keep track of the secure page on HV side.
246 *
247 * Called with kvm->arch.uvmem_lock held
248 */
249static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
250{
251 struct page *dpage = NULL;
252 unsigned long bit, uvmem_pfn;
253 struct kvmppc_uvmem_page_pvt *pvt;
254 unsigned long pfn_last, pfn_first;
255
256 pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT;
257 pfn_last = pfn_first +
258 (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT);
259
260 spin_lock(&kvmppc_uvmem_bitmap_lock);
261 bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
262 pfn_last - pfn_first);
263 if (bit >= (pfn_last - pfn_first))
264 goto out;
265 bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
266 spin_unlock(&kvmppc_uvmem_bitmap_lock);
267
268 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
269 if (!pvt)
270 goto out_clear;
271
272 uvmem_pfn = bit + pfn_first;
273 kvmppc_uvmem_pfn_insert(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
274
275 pvt->gpa = gpa;
276 pvt->kvm = kvm;
277
278 dpage = pfn_to_page(uvmem_pfn);
279 dpage->zone_device_data = pvt;
280 get_page(dpage);
281 lock_page(dpage);
282 return dpage;
283out_clear:
284 spin_lock(&kvmppc_uvmem_bitmap_lock);
285 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
286out:
287 spin_unlock(&kvmppc_uvmem_bitmap_lock);
288 return NULL;
289}
290
291/*
292 * Alloc a PFN from private device memory pool and copy page from normal
293 * memory to secure memory using UV_PAGE_IN uvcall.
294 */
295static int
296kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
297 unsigned long end, unsigned long gpa, struct kvm *kvm,
298 unsigned long page_shift, bool *downgrade)
299{
300 unsigned long src_pfn, dst_pfn = 0;
301 struct migrate_vma mig;
302 struct page *spage;
303 unsigned long pfn;
304 struct page *dpage;
305 int ret = 0;
306
307 memset(&mig, 0, sizeof(mig));
308 mig.vma = vma;
309 mig.start = start;
310 mig.end = end;
311 mig.src = &src_pfn;
312 mig.dst = &dst_pfn;
313
314 /*
315 * We come here with mmap_sem write lock held just for
316 * ksm_madvise(), otherwise we only need read mmap_sem.
317 * Hence downgrade to read lock once ksm_madvise() is done.
318 */
319 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
320 MADV_UNMERGEABLE, &vma->vm_flags);
321 downgrade_write(&kvm->mm->mmap_sem);
322 *downgrade = true;
323 if (ret)
324 return ret;
325
326 ret = migrate_vma_setup(&mig);
327 if (ret)
328 return ret;
329
330 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
331 ret = -1;
332 goto out_finalize;
333 }
334
335 dpage = kvmppc_uvmem_get_page(gpa, kvm);
336 if (!dpage) {
337 ret = -1;
338 goto out_finalize;
339 }
340
341 pfn = *mig.src >> MIGRATE_PFN_SHIFT;
342 spage = migrate_pfn_to_page(*mig.src);
343 if (spage)
344 uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
345 page_shift);
346
347 *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
348 migrate_vma_pages(&mig);
349out_finalize:
350 migrate_vma_finalize(&mig);
351 return ret;
352}
353
60f0a643
BR
354/*
355 * Shares the page with HV, thus making it a normal page.
356 *
357 * - If the page is already secure, then provision a new page and share
358 * - If the page is a normal page, share the existing page
359 *
360 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
361 * to unmap the device page from QEMU's page tables.
362 */
363static unsigned long
364kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
365{
366
367 int ret = H_PARAMETER;
368 struct page *uvmem_page;
369 struct kvmppc_uvmem_page_pvt *pvt;
370 unsigned long pfn;
371 unsigned long gfn = gpa >> page_shift;
372 int srcu_idx;
373 unsigned long uvmem_pfn;
374
375 srcu_idx = srcu_read_lock(&kvm->srcu);
376 mutex_lock(&kvm->arch.uvmem_lock);
377 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
378 uvmem_page = pfn_to_page(uvmem_pfn);
379 pvt = uvmem_page->zone_device_data;
380 pvt->skip_page_out = true;
381 }
382
383retry:
384 mutex_unlock(&kvm->arch.uvmem_lock);
385 pfn = gfn_to_pfn(kvm, gfn);
386 if (is_error_noslot_pfn(pfn))
387 goto out;
388
389 mutex_lock(&kvm->arch.uvmem_lock);
390 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
391 uvmem_page = pfn_to_page(uvmem_pfn);
392 pvt = uvmem_page->zone_device_data;
393 pvt->skip_page_out = true;
394 kvm_release_pfn_clean(pfn);
395 goto retry;
396 }
397
398 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
399 ret = H_SUCCESS;
400 kvm_release_pfn_clean(pfn);
401 mutex_unlock(&kvm->arch.uvmem_lock);
402out:
403 srcu_read_unlock(&kvm->srcu, srcu_idx);
404 return ret;
405}
406
ca9f4942
BR
407/*
408 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
60f0a643
BR
409 *
410 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
411 * memory in is visible from both UV and HV.
ca9f4942
BR
412 */
413unsigned long
414kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
415 unsigned long flags, unsigned long page_shift)
416{
417 bool downgrade = false;
418 unsigned long start, end;
419 struct vm_area_struct *vma;
420 int srcu_idx;
421 unsigned long gfn = gpa >> page_shift;
422 int ret;
423
424 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
425 return H_UNSUPPORTED;
426
427 if (page_shift != PAGE_SHIFT)
428 return H_P3;
429
60f0a643 430 if (flags & ~H_PAGE_IN_SHARED)
ca9f4942
BR
431 return H_P2;
432
60f0a643
BR
433 if (flags & H_PAGE_IN_SHARED)
434 return kvmppc_share_page(kvm, gpa, page_shift);
435
ca9f4942
BR
436 ret = H_PARAMETER;
437 srcu_idx = srcu_read_lock(&kvm->srcu);
438 down_write(&kvm->mm->mmap_sem);
439
440 start = gfn_to_hva(kvm, gfn);
441 if (kvm_is_error_hva(start))
442 goto out;
443
444 mutex_lock(&kvm->arch.uvmem_lock);
445 /* Fail the page-in request of an already paged-in page */
446 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
447 goto out_unlock;
448
449 end = start + (1UL << page_shift);
450 vma = find_vma_intersection(kvm->mm, start, end);
451 if (!vma || vma->vm_start > start || vma->vm_end < end)
452 goto out_unlock;
453
454 if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
455 &downgrade))
456 ret = H_SUCCESS;
457out_unlock:
458 mutex_unlock(&kvm->arch.uvmem_lock);
459out:
460 if (downgrade)
461 up_read(&kvm->mm->mmap_sem);
462 else
463 up_write(&kvm->mm->mmap_sem);
464 srcu_read_unlock(&kvm->srcu, srcu_idx);
465 return ret;
466}
467
468/*
469 * Provision a new page on HV side and copy over the contents
470 * from secure memory using UV_PAGE_OUT uvcall.
471 */
472static int
473kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
474 unsigned long end, unsigned long page_shift,
475 struct kvm *kvm, unsigned long gpa)
476{
477 unsigned long src_pfn, dst_pfn = 0;
478 struct migrate_vma mig;
479 struct page *dpage, *spage;
60f0a643 480 struct kvmppc_uvmem_page_pvt *pvt;
ca9f4942
BR
481 unsigned long pfn;
482 int ret = U_SUCCESS;
483
484 memset(&mig, 0, sizeof(mig));
485 mig.vma = vma;
486 mig.start = start;
487 mig.end = end;
488 mig.src = &src_pfn;
489 mig.dst = &dst_pfn;
490
491 mutex_lock(&kvm->arch.uvmem_lock);
492 /* The requested page is already paged-out, nothing to do */
493 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
494 goto out;
495
496 ret = migrate_vma_setup(&mig);
497 if (ret)
498 return ret;
499
500 spage = migrate_pfn_to_page(*mig.src);
501 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
502 goto out_finalize;
503
504 if (!is_zone_device_page(spage))
505 goto out_finalize;
506
507 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
508 if (!dpage) {
509 ret = -1;
510 goto out_finalize;
511 }
512
513 lock_page(dpage);
60f0a643 514 pvt = spage->zone_device_data;
ca9f4942
BR
515 pfn = page_to_pfn(dpage);
516
60f0a643
BR
517 /*
518 * This function is used in two cases:
519 * - When HV touches a secure page, for which we do UV_PAGE_OUT
520 * - When a secure page is converted to shared page, we *get*
521 * the page to essentially unmap the device page. In this
522 * case we skip page-out.
523 */
524 if (!pvt->skip_page_out)
525 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
526 gpa, 0, page_shift);
527
ca9f4942
BR
528 if (ret == U_SUCCESS)
529 *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
530 else {
531 unlock_page(dpage);
532 __free_page(dpage);
533 goto out_finalize;
534 }
535
536 migrate_vma_pages(&mig);
537out_finalize:
538 migrate_vma_finalize(&mig);
539out:
540 mutex_unlock(&kvm->arch.uvmem_lock);
541 return ret;
542}
543
544/*
545 * Fault handler callback that gets called when HV touches any page that
546 * has been moved to secure memory, we ask UV to give back the page by
547 * issuing UV_PAGE_OUT uvcall.
548 *
549 * This eventually results in dropping of device PFN and the newly
550 * provisioned page/PFN gets populated in QEMU page tables.
551 */
552static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
553{
554 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
555
556 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
557 vmf->address + PAGE_SIZE, PAGE_SHIFT,
558 pvt->kvm, pvt->gpa))
559 return VM_FAULT_SIGBUS;
560 else
561 return 0;
562}
563
564/*
565 * Release the device PFN back to the pool
566 *
567 * Gets called when secure page becomes a normal page during H_SVM_PAGE_OUT.
568 * Gets called with kvm->arch.uvmem_lock held.
569 */
570static void kvmppc_uvmem_page_free(struct page *page)
571{
572 unsigned long pfn = page_to_pfn(page) -
573 (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT);
574 struct kvmppc_uvmem_page_pvt *pvt;
575
576 spin_lock(&kvmppc_uvmem_bitmap_lock);
577 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
578 spin_unlock(&kvmppc_uvmem_bitmap_lock);
579
580 pvt = page->zone_device_data;
581 page->zone_device_data = NULL;
582 kvmppc_uvmem_pfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
583 kfree(pvt);
584}
585
586static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
587 .page_free = kvmppc_uvmem_page_free,
588 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
589};
590
591/*
592 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
593 */
594unsigned long
595kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
596 unsigned long flags, unsigned long page_shift)
597{
598 unsigned long gfn = gpa >> page_shift;
599 unsigned long start, end;
600 struct vm_area_struct *vma;
601 int srcu_idx;
602 int ret;
603
604 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
605 return H_UNSUPPORTED;
606
607 if (page_shift != PAGE_SHIFT)
608 return H_P3;
609
610 if (flags)
611 return H_P2;
612
613 ret = H_PARAMETER;
614 srcu_idx = srcu_read_lock(&kvm->srcu);
615 down_read(&kvm->mm->mmap_sem);
616 start = gfn_to_hva(kvm, gfn);
617 if (kvm_is_error_hva(start))
618 goto out;
619
620 end = start + (1UL << page_shift);
621 vma = find_vma_intersection(kvm->mm, start, end);
622 if (!vma || vma->vm_start > start || vma->vm_end < end)
623 goto out;
624
625 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
626 ret = H_SUCCESS;
627out:
628 up_read(&kvm->mm->mmap_sem);
629 srcu_read_unlock(&kvm->srcu, srcu_idx);
630 return ret;
631}
632
633static u64 kvmppc_get_secmem_size(void)
634{
635 struct device_node *np;
636 int i, len;
637 const __be32 *prop;
638 u64 size = 0;
639
640 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
641 if (!np)
642 goto out;
643
644 prop = of_get_property(np, "secure-memory-ranges", &len);
645 if (!prop)
646 goto out_put;
647
648 for (i = 0; i < len / (sizeof(*prop) * 4); i++)
649 size += of_read_number(prop + (i * 4) + 2, 2);
650
651out_put:
652 of_node_put(np);
653out:
654 return size;
655}
656
657int kvmppc_uvmem_init(void)
658{
659 int ret = 0;
660 unsigned long size;
661 struct resource *res;
662 void *addr;
663 unsigned long pfn_last, pfn_first;
664
665 size = kvmppc_get_secmem_size();
666 if (!size) {
667 /*
668 * Don't fail the initialization of kvm-hv module if
669 * the platform doesn't export ibm,uv-firmware node.
670 * Let normal guests run on such PEF-disabled platform.
671 */
672 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
673 goto out;
674 }
675
676 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
677 if (IS_ERR(res)) {
678 ret = PTR_ERR(res);
679 goto out;
680 }
681
682 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
683 kvmppc_uvmem_pgmap.res = *res;
684 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
685 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
686 if (IS_ERR(addr)) {
687 ret = PTR_ERR(addr);
688 goto out_free_region;
689 }
690
691 pfn_first = res->start >> PAGE_SHIFT;
692 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
693 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
694 sizeof(unsigned long), GFP_KERNEL);
695 if (!kvmppc_uvmem_bitmap) {
696 ret = -ENOMEM;
697 goto out_unmap;
698 }
699
700 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
701 return ret;
702out_unmap:
703 memunmap_pages(&kvmppc_uvmem_pgmap);
704out_free_region:
705 release_mem_region(res->start, size);
706out:
707 return ret;
708}
709
710void kvmppc_uvmem_free(void)
711{
712 memunmap_pages(&kvmppc_uvmem_pgmap);
713 release_mem_region(kvmppc_uvmem_pgmap.res.start,
714 resource_size(&kvmppc_uvmem_pgmap.res));
715 kfree(kvmppc_uvmem_bitmap);
716}
This page took 0.098798 seconds and 4 git commands to generate.