ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
ofs = 0;
}
+
+ if (cur_gfn < ms->base_gfn)
+ ofs = 0;
+
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
vcpu->run->s.regs.fpc = 0;
/*
* Do not reset these registers in the protected case, as some of
- * them are overlayed and they are not accessible in this case
+ * them are overlaid and they are not accessible in this case
* anyway.
*/
if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
+ bitmap_and(apcb_s, apcb_s, apcb_h,
+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
return 0;
}
sizeof(struct kvm_s390_apcb1)))
return -EFAULT;
- bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
+ bitmap_and(apcb_s, apcb_s, apcb_h,
+ BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
return 0;
}
scb_s->mso = new_mso;
scb_s->prefix = new_prefix;
- /* We have to definetly flush the tlb if this scb never ran */
+ /* We have to definitely flush the tlb if this scb never ran */
if (scb_s->ihcpu != 0xffffU)
scb_s->ihcpu = scb_o->ihcpu;
(vaddr & 0xfffffffffffff000UL) |
/* 52-53: store / fetch */
(((unsigned int) !write_flag) + 1) << 10,
- /* 62-63: asce id (alway primary == 0) */
+ /* 62-63: asce id (always primary == 0) */
.exc_access_id = 0, /* always primary */
.op_access_id = 0, /* not MVPG */
};
/**
* gmap_pte_op_end - release the page table lock
- * @ptl: pointer to the spinlock pointer
+ * @ptep: pointer to the locked pte
+ * @ptl: pointer to the page table spinlock
*/
-static void gmap_pte_op_end(spinlock_t *ptl)
+static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
{
- if (ptl)
- spin_unlock(ptl);
+ pte_unmap_unlock(ptep, ptl);
}
/**
{
int rc;
pte_t *ptep;
- spinlock_t *ptl = NULL;
+ spinlock_t *ptl;
unsigned long pbits = 0;
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
/* Protect and unlock. */
rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
- gmap_pte_op_end(ptl);
+ gmap_pte_op_end(ptep, ptl);
return rc;
}
/* Do *NOT* clear the _PAGE_INVALID bit! */
rc = 0;
}
- gmap_pte_op_end(ptl);
+ gmap_pte_op_end(ptep, ptl);
}
if (!rc)
break;
if (!rc)
gmap_insert_rmap(sg, vmaddr, rmap);
spin_unlock(&sg->guest_table_lock);
- gmap_pte_op_end(ptl);
+ gmap_pte_op_end(ptep, ptl);
}
radix_tree_preload_end();
if (rc) {
* The r2t parameter specifies the address of the source table. The
* four pages of the source table are made read-only in the parent gmap
* address space. A write to the source table area @r2t will automatically
- * remove the shadow r2 table and all of its decendents.
+ * remove the shadow r2 table and all of its descendants.
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
if (!tptep) {
spin_unlock(&sg->guest_table_lock);
- gmap_pte_op_end(ptl);
+ gmap_pte_op_end(sptep, ptl);
radix_tree_preload_end();
break;
}
rmap = NULL;
rc = 0;
}
- gmap_pte_op_end(ptl);
+ gmap_pte_op_end(sptep, ptl);
spin_unlock(&sg->guest_table_lock);
}
radix_tree_preload_end();
continue;
if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
set_bit(i, bitmap);
- spin_unlock(ptl);
+ pte_unmap_unlock(ptep, ptl);
}
}
gmap_pmd_op_end(gmap, pmdp);
* Remove all empty zero pages from the mapping for lazy refaulting
* - This must be called after mm->context.has_pgste is set, to avoid
* future creation of zero pages
- * - This must be called after THP was enabled
+ * - This must be called after THP was disabled.
+ *
+ * mm contracts with s390, that even if mm were to remove a page table,
+ * racing with the loop below and so causing pte_offset_map_lock() to fail,
+ * it will never insert a page table containing empty zero pages once
+ * mm_forbids_zeropage(mm) i.e. mm->context.has_pgste is set.
*/
static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
unsigned long end, struct mm_walk *walk)
spinlock_t *ptl;
ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!ptep)
+ break;
if (is_zero_pfn(pte_pfn(*ptep)))
ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
pte_unmap_unlock(ptep, ptl);
If unsure, say N.
- config ZCRYPT_MULTIDEVNODES
- bool "Support for multiple zcrypt device nodes"
- default y
- depends on S390
- depends on ZCRYPT
- help
- With this option enabled the zcrypt device driver can
- provide multiple devices nodes in /dev. Each device
- node can get customized to limit access and narrow
- down the use of the available crypto hardware.
-
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/aspeed/Kconfig"
+source "drivers/crypto/starfive/Kconfig"
endif # CRYPTO_HW
#include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h>
+ #include <linux/io.h>
#include <asm/extmem.h>
- #include <asm/io.h>
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
&kaddr, NULL);
if (rc < 0)
- return rc;
+ return dax_mem2blk_err(rc);
+
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
return 0;
#include <linux/netdevice.h>
#include <net/dst.h>
- #include <linux/io.h> /* instead of <asm/io.h> ok ? */
- #include <asm/ccwdev.h>
- #include <asm/ccwgroup.h>
- #include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
- #include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
+ #include <linux/io.h>
+ #include <linux/bitops.h>
+ #include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/moduleparam.h>
+ #include <asm/ccwdev.h>
+ #include <asm/ccwgroup.h>
#include <asm/idals.h>
#include "ctcm_main.h"
for (ct = 0; ct < len; ct++, ptr++, rptr++) {
if (sw == 0) {
- sprintf(addr, "%16.16llx", (__u64)rptr);
+ scnprintf(addr, sizeof(addr), "%16.16llx", (__u64)rptr);
- sprintf(boff, "%4.4X", (__u32)ct);
+ scnprintf(boff, sizeof(boff), "%4.4X", (__u32)ct);
bhex[0] = '\0';
basc[0] = '\0';
}
if (sw == 8)
strcat(bhex, " ");
- sprintf(tbuf, "%2.2llX", (__u64)*ptr);
+ scnprintf(tbuf, sizeof(tbuf), "%2.2llX", (__u64)*ptr);
tbuf[2] = '\0';
strcat(bhex, tbuf);
continue;
if ((strcmp(duphex, bhex)) != 0) {
if (dup != 0) {
- sprintf(tdup,
- "Duplicate as above to %s", addr);
+ scnprintf(tdup, sizeof(tdup),
+ "Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n",
tdup);
}
strcat(basc, " ");
}
if (dup != 0) {
- sprintf(tdup, "Duplicate as above to %s", addr);
+ scnprintf(tdup, sizeof(tdup),
+ "Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
}
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
} else {
if (dup >= 1) {
- sprintf(tdup, "Duplicate as above to %s", addr);
+ scnprintf(tdup, sizeof(tdup),
+ "Duplicate as above to %s", addr);
ctcm_pr_debug(" --- %s ---\n", tdup);
}
if (dup != 0) {
struct net_device *dev;
struct ctcm_priv *priv;
- sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
+ scnprintf(device, sizeof(device), "%s%i", MPC_DEVICE_NAME, port_num);
dev = __dev_get_by_name(&init_net, device);