We really want to avoid manually handling protection for nested
virtualization. By shadowing pages with the protection the guest asked us
for, the SIE can handle most protection-related actions for us (e.g.
special handling for MVPG) and we can directly forward protection
exceptions to the guest.
PTEs will now always be shadowed with the correct _PAGE_PROTECT flag.
Unshadowing will take care of any guest changes to the parent PTE and
any host changes to the host PTE. If the host PTE doesn't have the
fitting access rights or is not available, we have to fix it up.
Acked-by: Martin Schwidefsky <[email protected]>
Signed-off-by: David Hildenbrand <[email protected]>
Signed-off-by: Christian Borntraeger <[email protected]>
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt);
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
unsigned long *pgt, int *dat_protection);
-int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
- unsigned long paddr, int write);
+int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
void gmap_register_pte_notifier(struct gmap_notifier *);
void gmap_unregister_pte_notifier(struct gmap_notifier *);
pte_t *ptep , int reset);
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
- pte_t *sptep, pte_t *tptep, int write);
+ pte_t *sptep, pte_t *tptep, pte_t pte);
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
dat_protection |= pte.p;
if (write && dat_protection)
return PGM_PROTECTION;
- rc = gmap_shadow_page(sg, saddr, pte.pfra * 4096, write);
+ rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
if (rc)
return rc;
return 0;
* gmap_shadow_page - create a shadow page mapping
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
- * @paddr: parent gmap address to get mapped at @saddr
- * @write: =1 map r/w, =0 map r/o
+ * @pte: pte in parent gmap address space to get shadowed
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
*
* Called with sg->mm->mmap_sem in read.
*/
-int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
- unsigned long paddr, int write)
+int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
{
struct gmap *parent;
struct gmap_rmap *rmap;
- unsigned long vmaddr;
+ unsigned long vmaddr, paddr;
spinlock_t *ptl;
pte_t *sptep, *tptep;
int rc;
rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
while (1) {
+ paddr = pte_val(pte) & PAGE_MASK;
vmaddr = __gmap_translate(parent, paddr);
if (IS_ERR_VALUE(vmaddr)) {
rc = vmaddr;
radix_tree_preload_end();
break;
}
- rc = ptep_shadow_pte(sg->mm, saddr,
- sptep, tptep, write);
+ rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
if (rc > 0) {
/* Success and a new mapping */
gmap_insert_rmap(sg, vmaddr, rmap);
}
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
- pte_t *sptep, pte_t *tptep, int write)
+ pte_t *sptep, pte_t *tptep, pte_t pte)
{
pgste_t spgste, tpgste;
pte_t spte, tpte;
int rc = -EAGAIN;
+ if (!(pte_val(*tptep) & _PAGE_INVALID))
+ return 0; /* already shadowed */
spgste = pgste_get_lock(sptep);
spte = *sptep;
if (!(pte_val(spte) & _PAGE_INVALID) &&
- !(pte_val(spte) & _PAGE_PROTECT)) {
- rc = 0;
- if (!(pte_val(*tptep) & _PAGE_INVALID))
- /* Update existing mapping */
- ptep_flush_direct(mm, saddr, tptep);
- else
- rc = 1;
+ !((pte_val(spte) & _PAGE_PROTECT) &&
+ !(pte_val(pte) & _PAGE_PROTECT))) {
pgste_val(spgste) |= PGSTE_VSIE_BIT;
tpgste = pgste_get_lock(tptep);
pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
- (write ? 0 : _PAGE_PROTECT);
+ (pte_val(pte) & _PAGE_PROTECT);
/* don't touch the storage key - it belongs to parent pgste */
tpgste = pgste_set_pte(tptep, tpgste, tpte);
pgste_set_unlock(tptep, tpgste);
+ rc = 1;
}
pgste_set_unlock(sptep, spgste);
return rc;