1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/backing-dev.h>
3 #include <linux/falloc.h>
4 #include <linux/kvm_host.h>
5 #include <linux/pagemap.h>
6 #include <linux/anon_inodes.h>
12 struct xarray bindings;
13 struct list_head entry;
17 * folio_file_pfn - like folio_file_page, but return a pfn.
18 * @folio: The folio which contains this index.
19 * @index: The index we want to look up.
21 * Return: The pfn for this index.
23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
25 return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
28 static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
29 pgoff_t index, struct folio *folio)
31 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
32 kvm_pfn_t pfn = folio_file_pfn(folio, index);
33 gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
34 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
36 pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
45 static inline void kvm_gmem_mark_prepared(struct folio *folio)
47 folio_mark_uptodate(folio);
51 * Process @folio, which contains @gfn, so that the guest can use it.
52 * The folio must be locked and the gfn must be contained in @slot.
53 * On successful return the guest sees a zero page so as to avoid
54 * leaking host data and the up-to-date flag is set.
56 static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
57 gfn_t gfn, struct folio *folio)
59 unsigned long nr_pages, i;
63 nr_pages = folio_nr_pages(folio);
64 for (i = 0; i < nr_pages; i++)
65 clear_highpage(folio_page(folio, i));
68 * Preparing huge folios should always be safe, since it should
69 * be possible to split them later if needed.
71 * Right now the folio order is always going to be zero, but the
72 * code is ready for huge folios. The only assumption is that
73 * the base pgoff of memslots is naturally aligned with the
74 * requested page order, ensuring that huge folios can also use
75 * huge page table entries for GPA->HPA mapping.
77 * The order will be passed when creating the guest_memfd, and
78 * checked when creating memslots.
80 WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
81 index = gfn - slot->base_gfn + slot->gmem.pgoff;
82 index = ALIGN_DOWN(index, 1 << folio_order(folio));
83 r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
85 kvm_gmem_mark_prepared(folio);
91 * Returns a locked folio on success. The caller is responsible for
92 * setting the up-to-date flag before the memory is mapped into the guest.
93 * There is no backing storage for the memory, so the folio will remain
94 * up-to-date until it's removed.
96 * Ignore accessed, referenced, and dirty flags. The memory is
97 * unevictable and there is no storage to write back to.
99 static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
101 /* TODO: Support huge pages. */
102 return filemap_grab_folio(inode->i_mapping, index);
105 static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
108 bool flush = false, found_memslot = false;
109 struct kvm_memory_slot *slot;
110 struct kvm *kvm = gmem->kvm;
113 xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
114 pgoff_t pgoff = slot->gmem.pgoff;
116 struct kvm_gfn_range gfn_range = {
117 .start = slot->base_gfn + max(pgoff, start) - pgoff,
118 .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
121 /* guest memfd is relevant to only private mappings. */
122 .attr_filter = KVM_FILTER_PRIVATE,
125 if (!found_memslot) {
126 found_memslot = true;
129 kvm_mmu_invalidate_begin(kvm);
132 flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
136 kvm_flush_remote_tlbs(kvm);
142 static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
145 struct kvm *kvm = gmem->kvm;
147 if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
149 kvm_mmu_invalidate_end(kvm);
154 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
156 struct list_head *gmem_list = &inode->i_mapping->i_private_list;
157 pgoff_t start = offset >> PAGE_SHIFT;
158 pgoff_t end = (offset + len) >> PAGE_SHIFT;
159 struct kvm_gmem *gmem;
162 * Bindings must be stable across invalidation to ensure the start+end
165 filemap_invalidate_lock(inode->i_mapping);
167 list_for_each_entry(gmem, gmem_list, entry)
168 kvm_gmem_invalidate_begin(gmem, start, end);
170 truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
172 list_for_each_entry(gmem, gmem_list, entry)
173 kvm_gmem_invalidate_end(gmem, start, end);
175 filemap_invalidate_unlock(inode->i_mapping);
180 static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
182 struct address_space *mapping = inode->i_mapping;
183 pgoff_t start, index, end;
186 /* Dedicated guest is immutable by default. */
187 if (offset + len > i_size_read(inode))
190 filemap_invalidate_lock_shared(mapping);
192 start = offset >> PAGE_SHIFT;
193 end = (offset + len) >> PAGE_SHIFT;
196 for (index = start; index < end; ) {
199 if (signal_pending(current)) {
204 folio = kvm_gmem_get_folio(inode, index);
210 index = folio_next_index(folio);
215 /* 64-bit only, wrapping the index should be impossible. */
216 if (WARN_ON_ONCE(!index))
222 filemap_invalidate_unlock_shared(mapping);
227 static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
232 if (!(mode & FALLOC_FL_KEEP_SIZE))
235 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
238 if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
241 if (mode & FALLOC_FL_PUNCH_HOLE)
242 ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
244 ret = kvm_gmem_allocate(file_inode(file), offset, len);
251 static int kvm_gmem_release(struct inode *inode, struct file *file)
253 struct kvm_gmem *gmem = file->private_data;
254 struct kvm_memory_slot *slot;
255 struct kvm *kvm = gmem->kvm;
259 * Prevent concurrent attempts to *unbind* a memslot. This is the last
260 * reference to the file and thus no new bindings can be created, but
261 * dereferencing the slot for existing bindings needs to be protected
262 * against memslot updates, specifically so that unbind doesn't race
263 * and free the memslot (kvm_gmem_get_file() will return NULL).
265 * Since .release is called only when the reference count is zero,
266 * after which file_ref_get() and get_file_active() fail,
267 * kvm_gmem_get_pfn() cannot be using the file concurrently.
268 * file_ref_put() provides a full barrier, and get_file_active() the
269 * matching acquire barrier.
271 mutex_lock(&kvm->slots_lock);
273 filemap_invalidate_lock(inode->i_mapping);
275 xa_for_each(&gmem->bindings, index, slot)
276 WRITE_ONCE(slot->gmem.file, NULL);
279 * All in-flight operations are gone and new bindings can be created.
280 * Zap all SPTEs pointed at by this file. Do not free the backing
281 * memory, as its lifetime is associated with the inode, not the file.
283 kvm_gmem_invalidate_begin(gmem, 0, -1ul);
284 kvm_gmem_invalidate_end(gmem, 0, -1ul);
286 list_del(&gmem->entry);
288 filemap_invalidate_unlock(inode->i_mapping);
290 mutex_unlock(&kvm->slots_lock);
292 xa_destroy(&gmem->bindings);
300 static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
303 * Do not return slot->gmem.file if it has already been closed;
304 * there might be some time between the last fput() and when
305 * kvm_gmem_release() clears slot->gmem.file.
307 return get_file_active(&slot->gmem.file);
310 static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
312 return gfn - slot->base_gfn + slot->gmem.pgoff;
315 static struct file_operations kvm_gmem_fops = {
316 .open = generic_file_open,
317 .release = kvm_gmem_release,
318 .fallocate = kvm_gmem_fallocate,
321 void kvm_gmem_init(struct module *module)
323 kvm_gmem_fops.owner = module;
326 static int kvm_gmem_migrate_folio(struct address_space *mapping,
327 struct folio *dst, struct folio *src,
328 enum migrate_mode mode)
334 static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
336 struct list_head *gmem_list = &mapping->i_private_list;
337 struct kvm_gmem *gmem;
340 filemap_invalidate_lock_shared(mapping);
342 start = folio->index;
343 end = start + folio_nr_pages(folio);
345 list_for_each_entry(gmem, gmem_list, entry)
346 kvm_gmem_invalidate_begin(gmem, start, end);
349 * Do not truncate the range, what action is taken in response to the
350 * error is userspace's decision (assuming the architecture supports
351 * gracefully handling memory errors). If/when the guest attempts to
352 * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
353 * at which point KVM can either terminate the VM or propagate the
354 * error to userspace.
357 list_for_each_entry(gmem, gmem_list, entry)
358 kvm_gmem_invalidate_end(gmem, start, end);
360 filemap_invalidate_unlock_shared(mapping);
365 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
366 static void kvm_gmem_free_folio(struct folio *folio)
368 struct page *page = folio_page(folio, 0);
369 kvm_pfn_t pfn = page_to_pfn(page);
370 int order = folio_order(folio);
372 kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
376 static const struct address_space_operations kvm_gmem_aops = {
377 .dirty_folio = noop_dirty_folio,
378 .migrate_folio = kvm_gmem_migrate_folio,
379 .error_remove_folio = kvm_gmem_error_folio,
380 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
381 .free_folio = kvm_gmem_free_folio,
385 static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path,
386 struct kstat *stat, u32 request_mask,
387 unsigned int query_flags)
389 struct inode *inode = path->dentry->d_inode;
391 generic_fillattr(idmap, request_mask, inode, stat);
395 static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
400 static const struct inode_operations kvm_gmem_iops = {
401 .getattr = kvm_gmem_getattr,
402 .setattr = kvm_gmem_setattr,
405 static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
407 const char *anon_name = "[kvm-gmem]";
408 struct kvm_gmem *gmem;
413 fd = get_unused_fd_flags(0);
417 gmem = kzalloc(sizeof(*gmem), GFP_KERNEL);
423 file = anon_inode_create_getfile(anon_name, &kvm_gmem_fops, gmem,
430 file->f_flags |= O_LARGEFILE;
432 inode = file->f_inode;
433 WARN_ON(file->f_mapping != inode->i_mapping);
435 inode->i_private = (void *)(unsigned long)flags;
436 inode->i_op = &kvm_gmem_iops;
437 inode->i_mapping->a_ops = &kvm_gmem_aops;
438 inode->i_mode |= S_IFREG;
439 inode->i_size = size;
440 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
441 mapping_set_inaccessible(inode->i_mapping);
442 /* Unmovable mappings are supposed to be marked unevictable as well. */
443 WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
447 xa_init(&gmem->bindings);
448 list_add(&gmem->entry, &inode->i_mapping->i_private_list);
450 fd_install(fd, file);
460 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
462 loff_t size = args->size;
463 u64 flags = args->flags;
466 if (flags & ~valid_flags)
469 if (size <= 0 || !PAGE_ALIGNED(size))
472 return __kvm_gmem_create(kvm, size, flags);
475 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
476 unsigned int fd, loff_t offset)
478 loff_t size = slot->npages << PAGE_SHIFT;
479 unsigned long start, end;
480 struct kvm_gmem *gmem;
485 BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff));
491 if (file->f_op != &kvm_gmem_fops)
494 gmem = file->private_data;
495 if (gmem->kvm != kvm)
498 inode = file_inode(file);
500 if (offset < 0 || !PAGE_ALIGNED(offset) ||
501 offset + size > i_size_read(inode))
504 filemap_invalidate_lock(inode->i_mapping);
506 start = offset >> PAGE_SHIFT;
507 end = start + slot->npages;
509 if (!xa_empty(&gmem->bindings) &&
510 xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
511 filemap_invalidate_unlock(inode->i_mapping);
516 * memslots of flag KVM_MEM_GUEST_MEMFD are immutable to change, so
517 * kvm_gmem_bind() must occur on a new memslot. Because the memslot
518 * is not visible yet, kvm_gmem_get_pfn() is guaranteed to see the file.
520 WRITE_ONCE(slot->gmem.file, file);
521 slot->gmem.pgoff = start;
523 xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
524 filemap_invalidate_unlock(inode->i_mapping);
527 * Drop the reference to the file, even on success. The file pins KVM,
528 * not the other way 'round. Active bindings are invalidated if the
529 * file is closed before memslots are destroyed.
537 void kvm_gmem_unbind(struct kvm_memory_slot *slot)
539 unsigned long start = slot->gmem.pgoff;
540 unsigned long end = start + slot->npages;
541 struct kvm_gmem *gmem;
545 * Nothing to do if the underlying file was already closed (or is being
546 * closed right now), kvm_gmem_release() invalidates all bindings.
548 file = kvm_gmem_get_file(slot);
552 gmem = file->private_data;
554 filemap_invalidate_lock(file->f_mapping);
555 xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
558 * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn()
559 * cannot see this memslot.
561 WRITE_ONCE(slot->gmem.file, NULL);
562 filemap_invalidate_unlock(file->f_mapping);
567 /* Returns a locked folio on success. */
568 static struct folio *__kvm_gmem_get_pfn(struct file *file,
569 struct kvm_memory_slot *slot,
570 pgoff_t index, kvm_pfn_t *pfn,
571 bool *is_prepared, int *max_order)
573 struct file *gmem_file = READ_ONCE(slot->gmem.file);
574 struct kvm_gmem *gmem = file->private_data;
577 if (file != gmem_file) {
578 WARN_ON_ONCE(gmem_file);
579 return ERR_PTR(-EFAULT);
582 gmem = file->private_data;
583 if (xa_load(&gmem->bindings, index) != slot) {
584 WARN_ON_ONCE(xa_load(&gmem->bindings, index));
585 return ERR_PTR(-EIO);
588 folio = kvm_gmem_get_folio(file_inode(file), index);
592 if (folio_test_hwpoison(folio)) {
595 return ERR_PTR(-EHWPOISON);
598 *pfn = folio_file_pfn(folio, index);
602 *is_prepared = folio_test_uptodate(folio);
606 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
607 gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
610 pgoff_t index = kvm_gmem_get_index(slot, gfn);
611 struct file *file = kvm_gmem_get_file(slot);
613 bool is_prepared = false;
619 folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
626 r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
631 *page = folio_file_page(folio, index);
639 EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
641 #ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
642 long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
643 kvm_gmem_populate_cb post_populate, void *opaque)
646 struct kvm_memory_slot *slot;
649 int ret = 0, max_order;
652 lockdep_assert_held(&kvm->slots_lock);
656 slot = gfn_to_memslot(kvm, start_gfn);
657 if (!kvm_slot_can_be_private(slot))
660 file = kvm_gmem_get_file(slot);
664 filemap_invalidate_lock(file->f_mapping);
666 npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
667 for (i = 0; i < npages; i += (1 << max_order)) {
669 gfn_t gfn = start_gfn + i;
670 pgoff_t index = kvm_gmem_get_index(slot, gfn);
671 bool is_prepared = false;
674 if (signal_pending(current)) {
679 folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order);
681 ret = PTR_ERR(folio);
693 WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
694 (npages - i) < (1 << max_order));
697 while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
698 KVM_MEMORY_ATTRIBUTE_PRIVATE,
699 KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
701 goto put_folio_and_exit;
705 p = src ? src + i * PAGE_SIZE : NULL;
706 ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
708 kvm_gmem_mark_prepared(folio);
716 filemap_invalidate_unlock(file->f_mapping);
719 return ret && !i ? ret : i;
721 EXPORT_SYMBOL_GPL(kvm_gmem_populate);