2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/init.h>
38 #include <linux/kthread.h>
39 #include <linux/sched/mm.h>
40 #include <linux/types.h>
41 #include <linux/list.h>
42 #include <linux/rbtree.h>
43 #include <linux/spinlock.h>
44 #include <linux/eventfd.h>
45 #include <linux/mdev.h>
46 #include <linux/debugfs.h>
48 #include <linux/nospec.h>
50 #include <drm/drm_edid.h>
53 #include "intel_gvt.h"
56 MODULE_IMPORT_NS(DMA_BUF);
57 MODULE_IMPORT_NS(I915_GVT);
59 /* helper macros copied from vfio-pci */
60 #define VFIO_PCI_OFFSET_SHIFT 40
61 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
62 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
63 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
65 #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
67 #define OPREGION_SIGNATURE "IntelGraphicsMem"
70 struct intel_vgpu_regops {
71 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
72 size_t count, loff_t *ppos, bool iswrite);
73 void (*release)(struct intel_vgpu *vgpu,
74 struct vfio_region *region);
82 const struct intel_vgpu_regops *ops;
86 struct vfio_edid_region {
87 struct vfio_region_gfx_edid vfio_edid_regs;
93 struct hlist_node hnode;
97 struct intel_vgpu *vgpu;
98 struct rb_node gfn_node;
99 struct rb_node dma_addr_node;
106 #define vfio_dev_to_vgpu(vfio_dev) \
107 container_of((vfio_dev), struct intel_vgpu, vfio_device)
109 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
110 const u8 *val, int len,
111 struct kvm_page_track_notifier_node *node);
112 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
113 struct kvm_memory_slot *slot,
114 struct kvm_page_track_notifier_node *node);
116 static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
118 struct intel_vgpu_type *type =
119 container_of(mtype, struct intel_vgpu_type, type);
121 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
122 "fence: %d\nresolution: %s\n"
124 BYTES_TO_MB(type->conf->low_mm),
125 BYTES_TO_MB(type->conf->high_mm),
126 type->conf->fence, vgpu_edid_str(type->conf->edid),
130 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
133 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
134 DIV_ROUND_UP(size, PAGE_SIZE));
137 /* Pin a normal or compound guest page for dma. */
138 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
139 unsigned long size, struct page **page)
141 int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
142 struct page *base_page = NULL;
147 * We pin the pages one-by-one to avoid allocating a big arrary
148 * on stack to hold pfns.
150 for (npage = 0; npage < total_pages; npage++) {
151 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
152 struct page *cur_page;
154 ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
155 IOMMU_READ | IOMMU_WRITE, &cur_page);
157 gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n",
163 base_page = cur_page;
164 else if (base_page + npage != cur_page) {
165 gvt_vgpu_err("The pages are not continuous\n");
175 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
179 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
180 dma_addr_t *dma_addr, unsigned long size)
182 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
183 struct page *page = NULL;
186 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
190 /* Setup DMA mapping. */
191 *dma_addr = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL);
192 if (dma_mapping_error(dev, *dma_addr)) {
193 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
194 page_to_pfn(page), ret);
195 gvt_unpin_guest_page(vgpu, gfn, size);
202 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
203 dma_addr_t dma_addr, unsigned long size)
205 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
207 dma_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL);
208 gvt_unpin_guest_page(vgpu, gfn, size);
211 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
214 struct rb_node *node = vgpu->dma_addr_cache.rb_node;
218 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
220 if (dma_addr < itr->dma_addr)
221 node = node->rb_left;
222 else if (dma_addr > itr->dma_addr)
223 node = node->rb_right;
230 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
232 struct rb_node *node = vgpu->gfn_cache.rb_node;
236 itr = rb_entry(node, struct gvt_dma, gfn_node);
239 node = node->rb_left;
240 else if (gfn > itr->gfn)
241 node = node->rb_right;
248 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
249 dma_addr_t dma_addr, unsigned long size)
251 struct gvt_dma *new, *itr;
252 struct rb_node **link, *parent = NULL;
254 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
260 new->dma_addr = dma_addr;
262 kref_init(&new->ref);
264 /* gfn_cache maps gfn to struct gvt_dma. */
265 link = &vgpu->gfn_cache.rb_node;
268 itr = rb_entry(parent, struct gvt_dma, gfn_node);
271 link = &parent->rb_left;
273 link = &parent->rb_right;
275 rb_link_node(&new->gfn_node, parent, link);
276 rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
278 /* dma_addr_cache maps dma addr to struct gvt_dma. */
280 link = &vgpu->dma_addr_cache.rb_node;
283 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
285 if (dma_addr < itr->dma_addr)
286 link = &parent->rb_left;
288 link = &parent->rb_right;
290 rb_link_node(&new->dma_addr_node, parent, link);
291 rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
293 vgpu->nr_cache_entries++;
297 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
298 struct gvt_dma *entry)
300 rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
301 rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
303 vgpu->nr_cache_entries--;
306 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
309 struct rb_node *node = NULL;
312 mutex_lock(&vgpu->cache_lock);
313 node = rb_first(&vgpu->gfn_cache);
315 mutex_unlock(&vgpu->cache_lock);
318 dma = rb_entry(node, struct gvt_dma, gfn_node);
319 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
320 __gvt_cache_remove_entry(vgpu, dma);
321 mutex_unlock(&vgpu->cache_lock);
325 static void gvt_cache_init(struct intel_vgpu *vgpu)
327 vgpu->gfn_cache = RB_ROOT;
328 vgpu->dma_addr_cache = RB_ROOT;
329 vgpu->nr_cache_entries = 0;
330 mutex_init(&vgpu->cache_lock);
333 static void kvmgt_protect_table_init(struct intel_vgpu *info)
335 hash_init(info->ptable);
338 static void kvmgt_protect_table_destroy(struct intel_vgpu *info)
340 struct kvmgt_pgfn *p;
341 struct hlist_node *tmp;
344 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
350 static struct kvmgt_pgfn *
351 __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
353 struct kvmgt_pgfn *p, *res = NULL;
355 hash_for_each_possible(info->ptable, p, hnode, gfn) {
365 static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn)
367 struct kvmgt_pgfn *p;
369 p = __kvmgt_protect_table_find(info, gfn);
373 static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
375 struct kvmgt_pgfn *p;
377 if (kvmgt_gfn_is_write_protected(info, gfn))
380 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
381 if (WARN(!p, "gfn: 0x%llx\n", gfn))
385 hash_add(info->ptable, &p->hnode, gfn);
388 static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn)
390 struct kvmgt_pgfn *p;
392 p = __kvmgt_protect_table_find(info, gfn);
399 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
400 size_t count, loff_t *ppos, bool iswrite)
402 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
403 VFIO_PCI_NUM_REGIONS;
404 void *base = vgpu->region[i].data;
405 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
408 if (pos >= vgpu->region[i].size || iswrite) {
409 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
412 count = min(count, (size_t)(vgpu->region[i].size - pos));
413 memcpy(buf, base + pos, count);
418 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
419 struct vfio_region *region)
423 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
424 .rw = intel_vgpu_reg_rw_opregion,
425 .release = intel_vgpu_reg_release_opregion,
428 static int handle_edid_regs(struct intel_vgpu *vgpu,
429 struct vfio_edid_region *region, char *buf,
430 size_t count, u16 offset, bool is_write)
432 struct vfio_region_gfx_edid *regs = ®ion->vfio_edid_regs;
435 if (offset + count > sizeof(*regs))
442 data = *((unsigned int *)buf);
444 case offsetof(struct vfio_region_gfx_edid, link_state):
445 if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
446 if (!drm_edid_block_valid(
447 (u8 *)region->edid_blob,
451 gvt_vgpu_err("invalid EDID blob\n");
454 intel_vgpu_emulate_hotplug(vgpu, true);
455 } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
456 intel_vgpu_emulate_hotplug(vgpu, false);
458 gvt_vgpu_err("invalid EDID link state %d\n",
462 regs->link_state = data;
464 case offsetof(struct vfio_region_gfx_edid, edid_size):
465 if (data > regs->edid_max_size) {
466 gvt_vgpu_err("EDID size is bigger than %d!\n",
467 regs->edid_max_size);
470 regs->edid_size = data;
474 gvt_vgpu_err("write read-only EDID region at offset %d\n",
479 memcpy(buf, (char *)regs + offset, count);
485 static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
486 size_t count, u16 offset, bool is_write)
488 if (offset + count > region->vfio_edid_regs.edid_size)
492 memcpy(region->edid_blob + offset, buf, count);
494 memcpy(buf, region->edid_blob + offset, count);
499 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
500 size_t count, loff_t *ppos, bool iswrite)
503 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
504 VFIO_PCI_NUM_REGIONS;
505 struct vfio_edid_region *region = vgpu->region[i].data;
506 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
508 if (pos < region->vfio_edid_regs.edid_offset) {
509 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
511 pos -= EDID_BLOB_OFFSET;
512 ret = handle_edid_blob(region, buf, count, pos, iswrite);
516 gvt_vgpu_err("failed to access EDID region\n");
521 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
522 struct vfio_region *region)
527 static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
528 .rw = intel_vgpu_reg_rw_edid,
529 .release = intel_vgpu_reg_release_edid,
532 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
533 unsigned int type, unsigned int subtype,
534 const struct intel_vgpu_regops *ops,
535 size_t size, u32 flags, void *data)
537 struct vfio_region *region;
539 region = krealloc(vgpu->region,
540 (vgpu->num_regions + 1) * sizeof(*region),
545 vgpu->region = region;
546 vgpu->region[vgpu->num_regions].type = type;
547 vgpu->region[vgpu->num_regions].subtype = subtype;
548 vgpu->region[vgpu->num_regions].ops = ops;
549 vgpu->region[vgpu->num_regions].size = size;
550 vgpu->region[vgpu->num_regions].flags = flags;
551 vgpu->region[vgpu->num_regions].data = data;
556 int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
561 /* Each vgpu has its own opregion, although VFIO would create another
562 * one later. This one is used to expose opregion to VFIO. And the
563 * other one created by VFIO later, is used by guest actually.
565 base = vgpu_opregion(vgpu)->va;
569 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
574 ret = intel_vgpu_register_reg(vgpu,
575 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
576 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
577 &intel_vgpu_regops_opregion, OPREGION_SIZE,
578 VFIO_REGION_INFO_FLAG_READ, base);
583 int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
585 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
586 struct vfio_edid_region *base;
589 base = kzalloc(sizeof(*base), GFP_KERNEL);
593 /* TODO: Add multi-port and EDID extension block support */
594 base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
595 base->vfio_edid_regs.edid_max_size = EDID_SIZE;
596 base->vfio_edid_regs.edid_size = EDID_SIZE;
597 base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
598 base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
599 base->edid_blob = port->edid->edid_block;
601 ret = intel_vgpu_register_reg(vgpu,
602 VFIO_REGION_TYPE_GFX,
603 VFIO_REGION_SUBTYPE_GFX_EDID,
604 &intel_vgpu_regops_edid, EDID_SIZE,
605 VFIO_REGION_INFO_FLAG_READ |
606 VFIO_REGION_INFO_FLAG_WRITE |
607 VFIO_REGION_INFO_FLAG_CAPS, base);
612 static void intel_vgpu_dma_unmap(struct vfio_device *vfio_dev, u64 iova,
615 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
616 struct gvt_dma *entry;
617 u64 iov_pfn = iova >> PAGE_SHIFT;
618 u64 end_iov_pfn = iov_pfn + length / PAGE_SIZE;
620 mutex_lock(&vgpu->cache_lock);
621 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
622 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
626 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
628 __gvt_cache_remove_entry(vgpu, entry);
630 mutex_unlock(&vgpu->cache_lock);
633 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
635 struct intel_vgpu *itr;
639 mutex_lock(&vgpu->gvt->lock);
640 for_each_active_vgpu(vgpu->gvt, itr, id) {
641 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
644 if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
650 mutex_unlock(&vgpu->gvt->lock);
654 static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
656 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
658 if (!vgpu->vfio_device.kvm ||
659 vgpu->vfio_device.kvm->mm != current->mm) {
660 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
664 if (__kvmgt_vgpu_exist(vgpu))
667 vgpu->track_node.track_write = kvmgt_page_track_write;
668 vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
669 kvm_get_kvm(vgpu->vfio_device.kvm);
670 kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
673 set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
675 debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
676 &vgpu->nr_cache_entries);
678 intel_gvt_activate_vgpu(vgpu);
683 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
685 struct eventfd_ctx *trigger;
687 trigger = vgpu->msi_trigger;
689 eventfd_ctx_put(trigger);
690 vgpu->msi_trigger = NULL;
694 static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
696 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
698 intel_gvt_release_vgpu(vgpu);
700 clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
702 debugfs_lookup_and_remove(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs);
704 kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
706 kvm_put_kvm(vgpu->vfio_device.kvm);
708 kvmgt_protect_table_destroy(vgpu);
709 gvt_cache_destroy(vgpu);
711 WARN_ON(vgpu->nr_cache_entries);
713 vgpu->gfn_cache = RB_ROOT;
714 vgpu->dma_addr_cache = RB_ROOT;
716 intel_vgpu_release_msi_eventfd_ctx(vgpu);
719 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
721 u32 start_lo, start_hi;
724 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
725 PCI_BASE_ADDRESS_MEM_MASK;
726 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
727 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
730 case PCI_BASE_ADDRESS_MEM_TYPE_64:
731 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
734 case PCI_BASE_ADDRESS_MEM_TYPE_32:
735 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
736 /* 1M mem BAR treated as 32-bit BAR */
738 /* mem unknown type treated as 32-bit BAR */
743 return ((u64)start_hi << 32) | start_lo;
746 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
747 void *buf, unsigned int count, bool is_write)
749 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
753 ret = intel_vgpu_emulate_mmio_write(vgpu,
754 bar_start + off, buf, count);
756 ret = intel_vgpu_emulate_mmio_read(vgpu,
757 bar_start + off, buf, count);
761 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
763 return off >= vgpu_aperture_offset(vgpu) &&
764 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
767 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
768 void *buf, unsigned long count, bool is_write)
770 void __iomem *aperture_va;
772 if (!intel_vgpu_in_aperture(vgpu, off) ||
773 !intel_vgpu_in_aperture(vgpu, off + count)) {
774 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
778 aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
779 ALIGN_DOWN(off, PAGE_SIZE),
780 count + offset_in_page(off));
785 memcpy_toio(aperture_va + offset_in_page(off), buf, count);
787 memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
789 io_mapping_unmap(aperture_va);
794 static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf,
795 size_t count, loff_t *ppos, bool is_write)
797 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
798 u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
802 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
803 gvt_vgpu_err("invalid index: %u\n", index);
808 case VFIO_PCI_CONFIG_REGION_INDEX:
810 ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
813 ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
816 case VFIO_PCI_BAR0_REGION_INDEX:
817 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
818 buf, count, is_write);
820 case VFIO_PCI_BAR2_REGION_INDEX:
821 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
823 case VFIO_PCI_BAR1_REGION_INDEX:
824 case VFIO_PCI_BAR3_REGION_INDEX:
825 case VFIO_PCI_BAR4_REGION_INDEX:
826 case VFIO_PCI_BAR5_REGION_INDEX:
827 case VFIO_PCI_VGA_REGION_INDEX:
828 case VFIO_PCI_ROM_REGION_INDEX:
831 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
834 index -= VFIO_PCI_NUM_REGIONS;
835 return vgpu->region[index].ops->rw(vgpu, buf, count,
839 return ret == 0 ? count : ret;
842 static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos)
844 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
845 struct intel_gvt *gvt = vgpu->gvt;
848 /* Only allow MMIO GGTT entry access */
849 if (index != PCI_BASE_ADDRESS_0)
852 offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
853 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
855 return (offset >= gvt->device_info.gtt_start_offset &&
856 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
860 static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf,
861 size_t count, loff_t *ppos)
863 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
864 unsigned int done = 0;
870 /* Only support GGTT entry 8 bytes read */
871 if (count >= 8 && !(*ppos % 8) &&
872 gtt_entry(vgpu, ppos)) {
875 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
880 if (copy_to_user(buf, &val, sizeof(val)))
884 } else if (count >= 4 && !(*ppos % 4)) {
887 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
892 if (copy_to_user(buf, &val, sizeof(val)))
896 } else if (count >= 2 && !(*ppos % 2)) {
899 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
904 if (copy_to_user(buf, &val, sizeof(val)))
911 ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos,
916 if (copy_to_user(buf, &val, sizeof(val)))
934 static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev,
935 const char __user *buf,
936 size_t count, loff_t *ppos)
938 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
939 unsigned int done = 0;
945 /* Only support GGTT entry 8 bytes write */
946 if (count >= 8 && !(*ppos % 8) &&
947 gtt_entry(vgpu, ppos)) {
950 if (copy_from_user(&val, buf, sizeof(val)))
953 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
959 } else if (count >= 4 && !(*ppos % 4)) {
962 if (copy_from_user(&val, buf, sizeof(val)))
965 ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
971 } else if (count >= 2 && !(*ppos % 2)) {
974 if (copy_from_user(&val, buf, sizeof(val)))
977 ret = intel_vgpu_rw(vgpu, (char *)&val,
978 sizeof(val), ppos, true);
986 if (copy_from_user(&val, buf, sizeof(val)))
989 ret = intel_vgpu_rw(vgpu, &val, sizeof(val),
1008 static int intel_vgpu_mmap(struct vfio_device *vfio_dev,
1009 struct vm_area_struct *vma)
1011 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1014 unsigned long req_size, pgoff, req_start;
1017 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1018 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1021 if (vma->vm_end < vma->vm_start)
1023 if ((vma->vm_flags & VM_SHARED) == 0)
1025 if (index != VFIO_PCI_BAR2_REGION_INDEX)
1028 pg_prot = vma->vm_page_prot;
1029 virtaddr = vma->vm_start;
1030 req_size = vma->vm_end - vma->vm_start;
1031 pgoff = vma->vm_pgoff &
1032 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1033 req_start = pgoff << PAGE_SHIFT;
1035 if (!intel_vgpu_in_aperture(vgpu, req_start))
1037 if (req_start + req_size >
1038 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1041 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1043 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1046 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1048 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1054 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1055 unsigned int index, unsigned int start,
1056 unsigned int count, u32 flags,
1062 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1063 unsigned int index, unsigned int start,
1064 unsigned int count, u32 flags, void *data)
1069 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1070 unsigned int index, unsigned int start, unsigned int count,
1071 u32 flags, void *data)
1076 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1077 unsigned int index, unsigned int start, unsigned int count,
1078 u32 flags, void *data)
1080 struct eventfd_ctx *trigger;
1082 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1083 int fd = *(int *)data;
1085 trigger = eventfd_ctx_fdget(fd);
1086 if (IS_ERR(trigger)) {
1087 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1088 return PTR_ERR(trigger);
1090 vgpu->msi_trigger = trigger;
1091 } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1092 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1097 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1098 unsigned int index, unsigned int start, unsigned int count,
1101 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1102 unsigned int start, unsigned int count, u32 flags,
1106 case VFIO_PCI_INTX_IRQ_INDEX:
1107 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1108 case VFIO_IRQ_SET_ACTION_MASK:
1109 func = intel_vgpu_set_intx_mask;
1111 case VFIO_IRQ_SET_ACTION_UNMASK:
1112 func = intel_vgpu_set_intx_unmask;
1114 case VFIO_IRQ_SET_ACTION_TRIGGER:
1115 func = intel_vgpu_set_intx_trigger;
1119 case VFIO_PCI_MSI_IRQ_INDEX:
1120 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1121 case VFIO_IRQ_SET_ACTION_MASK:
1122 case VFIO_IRQ_SET_ACTION_UNMASK:
1123 /* XXX Need masking support exported */
1125 case VFIO_IRQ_SET_ACTION_TRIGGER:
1126 func = intel_vgpu_set_msi_trigger;
1135 return func(vgpu, index, start, count, flags, data);
1138 static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
1141 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1142 unsigned long minsz;
1144 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1146 if (cmd == VFIO_DEVICE_GET_INFO) {
1147 struct vfio_device_info info;
1149 minsz = offsetofend(struct vfio_device_info, num_irqs);
1151 if (copy_from_user(&info, (void __user *)arg, minsz))
1154 if (info.argsz < minsz)
1157 info.flags = VFIO_DEVICE_FLAGS_PCI;
1158 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1159 info.num_regions = VFIO_PCI_NUM_REGIONS +
1161 info.num_irqs = VFIO_PCI_NUM_IRQS;
1163 return copy_to_user((void __user *)arg, &info, minsz) ?
1166 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1167 struct vfio_region_info info;
1168 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1171 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1175 minsz = offsetofend(struct vfio_region_info, offset);
1177 if (copy_from_user(&info, (void __user *)arg, minsz))
1180 if (info.argsz < minsz)
1183 switch (info.index) {
1184 case VFIO_PCI_CONFIG_REGION_INDEX:
1185 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1186 info.size = vgpu->gvt->device_info.cfg_space_size;
1187 info.flags = VFIO_REGION_INFO_FLAG_READ |
1188 VFIO_REGION_INFO_FLAG_WRITE;
1190 case VFIO_PCI_BAR0_REGION_INDEX:
1191 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1192 info.size = vgpu->cfg_space.bar[info.index].size;
1198 info.flags = VFIO_REGION_INFO_FLAG_READ |
1199 VFIO_REGION_INFO_FLAG_WRITE;
1201 case VFIO_PCI_BAR1_REGION_INDEX:
1202 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1206 case VFIO_PCI_BAR2_REGION_INDEX:
1207 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1208 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1209 VFIO_REGION_INFO_FLAG_MMAP |
1210 VFIO_REGION_INFO_FLAG_READ |
1211 VFIO_REGION_INFO_FLAG_WRITE;
1212 info.size = gvt_aperture_sz(vgpu->gvt);
1214 sparse = kzalloc(struct_size(sparse, areas, nr_areas),
1219 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1220 sparse->header.version = 1;
1221 sparse->nr_areas = nr_areas;
1222 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1223 sparse->areas[0].offset =
1224 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1225 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1228 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1229 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1233 gvt_dbg_core("get region info bar:%d\n", info.index);
1236 case VFIO_PCI_ROM_REGION_INDEX:
1237 case VFIO_PCI_VGA_REGION_INDEX:
1238 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1242 gvt_dbg_core("get region info index:%d\n", info.index);
1246 struct vfio_region_info_cap_type cap_type = {
1247 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1248 .header.version = 1 };
1250 if (info.index >= VFIO_PCI_NUM_REGIONS +
1254 array_index_nospec(info.index,
1255 VFIO_PCI_NUM_REGIONS +
1258 i = info.index - VFIO_PCI_NUM_REGIONS;
1261 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1262 info.size = vgpu->region[i].size;
1263 info.flags = vgpu->region[i].flags;
1265 cap_type.type = vgpu->region[i].type;
1266 cap_type.subtype = vgpu->region[i].subtype;
1268 ret = vfio_info_add_capability(&caps,
1276 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1277 switch (cap_type_id) {
1278 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1279 ret = vfio_info_add_capability(&caps,
1281 struct_size(sparse, areas,
1295 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1296 if (info.argsz < sizeof(info) + caps.size) {
1297 info.argsz = sizeof(info) + caps.size;
1298 info.cap_offset = 0;
1300 vfio_info_cap_shift(&caps, sizeof(info));
1301 if (copy_to_user((void __user *)arg +
1302 sizeof(info), caps.buf,
1308 info.cap_offset = sizeof(info);
1315 return copy_to_user((void __user *)arg, &info, minsz) ?
1317 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1318 struct vfio_irq_info info;
1320 minsz = offsetofend(struct vfio_irq_info, count);
1322 if (copy_from_user(&info, (void __user *)arg, minsz))
1325 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1328 switch (info.index) {
1329 case VFIO_PCI_INTX_IRQ_INDEX:
1330 case VFIO_PCI_MSI_IRQ_INDEX:
1336 info.flags = VFIO_IRQ_INFO_EVENTFD;
1338 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1340 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1341 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1342 VFIO_IRQ_INFO_AUTOMASKED);
1344 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1346 return copy_to_user((void __user *)arg, &info, minsz) ?
1348 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1349 struct vfio_irq_set hdr;
1352 size_t data_size = 0;
1354 minsz = offsetofend(struct vfio_irq_set, count);
1356 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1359 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1360 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1362 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1363 VFIO_PCI_NUM_IRQS, &data_size);
1365 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1369 data = memdup_user((void __user *)(arg + minsz),
1372 return PTR_ERR(data);
1376 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1377 hdr.start, hdr.count, data);
1381 } else if (cmd == VFIO_DEVICE_RESET) {
1382 intel_gvt_reset_vgpu(vgpu);
1384 } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1385 struct vfio_device_gfx_plane_info dmabuf;
1388 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1390 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1392 if (dmabuf.argsz < minsz)
1395 ret = intel_vgpu_query_plane(vgpu, &dmabuf);
1399 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1401 } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1404 if (get_user(dmabuf_id, (__u32 __user *)arg))
1406 return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
1413 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1416 struct intel_vgpu *vgpu = dev_get_drvdata(dev);
1418 return sprintf(buf, "%d\n", vgpu->id);
1421 static DEVICE_ATTR_RO(vgpu_id);
1423 static struct attribute *intel_vgpu_attrs[] = {
1424 &dev_attr_vgpu_id.attr,
1428 static const struct attribute_group intel_vgpu_group = {
1429 .name = "intel_vgpu",
1430 .attrs = intel_vgpu_attrs,
1433 static const struct attribute_group *intel_vgpu_groups[] = {
1438 static int intel_vgpu_init_dev(struct vfio_device *vfio_dev)
1440 struct mdev_device *mdev = to_mdev_device(vfio_dev->dev);
1441 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1442 struct intel_vgpu_type *type =
1443 container_of(mdev->type, struct intel_vgpu_type, type);
1446 vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
1447 ret = intel_gvt_create_vgpu(vgpu, type->conf);
1451 kvmgt_protect_table_init(vgpu);
1452 gvt_cache_init(vgpu);
1457 static void intel_vgpu_release_dev(struct vfio_device *vfio_dev)
1459 struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
1461 intel_gvt_destroy_vgpu(vgpu);
1464 static const struct vfio_device_ops intel_vgpu_dev_ops = {
1465 .init = intel_vgpu_init_dev,
1466 .release = intel_vgpu_release_dev,
1467 .open_device = intel_vgpu_open_device,
1468 .close_device = intel_vgpu_close_device,
1469 .read = intel_vgpu_read,
1470 .write = intel_vgpu_write,
1471 .mmap = intel_vgpu_mmap,
1472 .ioctl = intel_vgpu_ioctl,
1473 .dma_unmap = intel_vgpu_dma_unmap,
1474 .bind_iommufd = vfio_iommufd_emulated_bind,
1475 .unbind_iommufd = vfio_iommufd_emulated_unbind,
1476 .attach_ioas = vfio_iommufd_emulated_attach_ioas,
1479 static int intel_vgpu_probe(struct mdev_device *mdev)
1481 struct intel_vgpu *vgpu;
1484 vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
1485 &intel_vgpu_dev_ops);
1487 gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
1488 return PTR_ERR(vgpu);
1491 dev_set_drvdata(&mdev->dev, vgpu);
1492 ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
1496 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
1497 dev_name(mdev_dev(mdev)));
1501 vfio_put_device(&vgpu->vfio_device);
1505 static void intel_vgpu_remove(struct mdev_device *mdev)
1507 struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
1509 vfio_unregister_group_dev(&vgpu->vfio_device);
1510 vfio_put_device(&vgpu->vfio_device);
1513 static unsigned int intel_vgpu_get_available(struct mdev_type *mtype)
1515 struct intel_vgpu_type *type =
1516 container_of(mtype, struct intel_vgpu_type, type);
1517 struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt;
1518 unsigned int low_gm_avail, high_gm_avail, fence_avail;
1520 mutex_lock(&gvt->lock);
1521 low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
1522 gvt->gm.vgpu_allocated_low_gm_size;
1523 high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
1524 gvt->gm.vgpu_allocated_high_gm_size;
1525 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
1526 gvt->fence.vgpu_allocated_fence_num;
1527 mutex_unlock(&gvt->lock);
1529 return min3(low_gm_avail / type->conf->low_mm,
1530 high_gm_avail / type->conf->high_mm,
1531 fence_avail / type->conf->fence);
1534 static struct mdev_driver intel_vgpu_mdev_driver = {
1535 .device_api = VFIO_DEVICE_API_PCI_STRING,
1537 .name = "intel_vgpu_mdev",
1538 .owner = THIS_MODULE,
1539 .dev_groups = intel_vgpu_groups,
1541 .probe = intel_vgpu_probe,
1542 .remove = intel_vgpu_remove,
1543 .get_available = intel_vgpu_get_available,
1544 .show_description = intel_vgpu_show_description,
1547 int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
1549 struct kvm *kvm = info->vfio_device.kvm;
1550 struct kvm_memory_slot *slot;
1553 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
1556 idx = srcu_read_lock(&kvm->srcu);
1557 slot = gfn_to_memslot(kvm, gfn);
1559 srcu_read_unlock(&kvm->srcu, idx);
1563 write_lock(&kvm->mmu_lock);
1565 if (kvmgt_gfn_is_write_protected(info, gfn))
1568 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1569 kvmgt_protect_table_add(info, gfn);
1572 write_unlock(&kvm->mmu_lock);
1573 srcu_read_unlock(&kvm->srcu, idx);
1577 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
1579 struct kvm *kvm = info->vfio_device.kvm;
1580 struct kvm_memory_slot *slot;
1583 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
1586 idx = srcu_read_lock(&kvm->srcu);
1587 slot = gfn_to_memslot(kvm, gfn);
1589 srcu_read_unlock(&kvm->srcu, idx);
1593 write_lock(&kvm->mmu_lock);
1595 if (!kvmgt_gfn_is_write_protected(info, gfn))
1598 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1599 kvmgt_protect_table_del(info, gfn);
1602 write_unlock(&kvm->mmu_lock);
1603 srcu_read_unlock(&kvm->srcu, idx);
1607 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1608 const u8 *val, int len,
1609 struct kvm_page_track_notifier_node *node)
1611 struct intel_vgpu *info =
1612 container_of(node, struct intel_vgpu, track_node);
1614 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1615 intel_vgpu_page_track_handler(info, gpa,
1619 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1620 struct kvm_memory_slot *slot,
1621 struct kvm_page_track_notifier_node *node)
1625 struct intel_vgpu *info =
1626 container_of(node, struct intel_vgpu, track_node);
1628 write_lock(&kvm->mmu_lock);
1629 for (i = 0; i < slot->npages; i++) {
1630 gfn = slot->base_gfn + i;
1631 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1632 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1633 KVM_PAGE_TRACK_WRITE);
1634 kvmgt_protect_table_del(info, gfn);
1637 write_unlock(&kvm->mmu_lock);
1640 void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
1647 for (i = 0; i < vgpu->num_regions; i++)
1648 if (vgpu->region[i].ops->release)
1649 vgpu->region[i].ops->release(vgpu,
1651 vgpu->num_regions = 0;
1652 kfree(vgpu->region);
1653 vgpu->region = NULL;
1656 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
1657 unsigned long size, dma_addr_t *dma_addr)
1659 struct gvt_dma *entry;
1662 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1665 mutex_lock(&vgpu->cache_lock);
1667 entry = __gvt_cache_find_gfn(vgpu, gfn);
1669 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1673 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1676 } else if (entry->size != size) {
1677 /* the same gfn with different size: unmap and re-map */
1678 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1679 __gvt_cache_remove_entry(vgpu, entry);
1681 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1685 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1689 kref_get(&entry->ref);
1690 *dma_addr = entry->dma_addr;
1693 mutex_unlock(&vgpu->cache_lock);
1697 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1699 mutex_unlock(&vgpu->cache_lock);
1703 int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
1705 struct gvt_dma *entry;
1708 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1711 mutex_lock(&vgpu->cache_lock);
1712 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1714 kref_get(&entry->ref);
1717 mutex_unlock(&vgpu->cache_lock);
1722 static void __gvt_dma_release(struct kref *ref)
1724 struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1726 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1728 __gvt_cache_remove_entry(entry->vgpu, entry);
1731 void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
1732 dma_addr_t dma_addr)
1734 struct gvt_dma *entry;
1736 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1739 mutex_lock(&vgpu->cache_lock);
1740 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
1742 kref_put(&entry->ref, __gvt_dma_release);
1743 mutex_unlock(&vgpu->cache_lock);
1746 static void init_device_info(struct intel_gvt *gvt)
1748 struct intel_gvt_device_info *info = &gvt->device_info;
1749 struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
1751 info->max_support_vgpus = 8;
1752 info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
1753 info->mmio_size = 2 * 1024 * 1024;
1755 info->gtt_start_offset = 8 * 1024 * 1024;
1756 info->gtt_entry_size = 8;
1757 info->gtt_entry_size_shift = 3;
1758 info->gmadr_bytes_in_cmd = 8;
1759 info->max_surface_size = 36 * 1024 * 1024;
1760 info->msi_cap_offset = pdev->msi_cap;
1763 static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
1765 struct intel_vgpu *vgpu;
1768 mutex_lock(&gvt->lock);
1769 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
1770 if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
1771 (void *)&gvt->service_request)) {
1772 if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
1773 intel_vgpu_emulate_vblank(vgpu);
1776 mutex_unlock(&gvt->lock);
1779 static int gvt_service_thread(void *data)
1781 struct intel_gvt *gvt = (struct intel_gvt *)data;
1784 gvt_dbg_core("service thread start\n");
1786 while (!kthread_should_stop()) {
1787 ret = wait_event_interruptible(gvt->service_thread_wq,
1788 kthread_should_stop() || gvt->service_request);
1790 if (kthread_should_stop())
1793 if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
1796 intel_gvt_test_and_emulate_vblank(gvt);
1798 if (test_bit(INTEL_GVT_REQUEST_SCHED,
1799 (void *)&gvt->service_request) ||
1800 test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
1801 (void *)&gvt->service_request)) {
1802 intel_gvt_schedule(gvt);
1809 static void clean_service_thread(struct intel_gvt *gvt)
1811 kthread_stop(gvt->service_thread);
1814 static int init_service_thread(struct intel_gvt *gvt)
1816 init_waitqueue_head(&gvt->service_thread_wq);
1818 gvt->service_thread = kthread_run(gvt_service_thread,
1819 gvt, "gvt_service_thread");
1820 if (IS_ERR(gvt->service_thread)) {
1821 gvt_err("fail to start service thread.\n");
1822 return PTR_ERR(gvt->service_thread);
1828 * intel_gvt_clean_device - clean a GVT device
1829 * @i915: i915 private
1831 * This function is called at the driver unloading stage, to free the
1832 * resources owned by a GVT device.
1835 static void intel_gvt_clean_device(struct drm_i915_private *i915)
1837 struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
1839 if (drm_WARN_ON(&i915->drm, !gvt))
1842 mdev_unregister_parent(&gvt->parent);
1843 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
1844 intel_gvt_clean_vgpu_types(gvt);
1846 intel_gvt_debugfs_clean(gvt);
1847 clean_service_thread(gvt);
1848 intel_gvt_clean_cmd_parser(gvt);
1849 intel_gvt_clean_sched_policy(gvt);
1850 intel_gvt_clean_workload_scheduler(gvt);
1851 intel_gvt_clean_gtt(gvt);
1852 intel_gvt_free_firmware(gvt);
1853 intel_gvt_clean_mmio_info(gvt);
1854 idr_destroy(&gvt->vgpu_idr);
1860 * intel_gvt_init_device - initialize a GVT device
1861 * @i915: drm i915 private data
1863 * This function is called at the initialization stage, to initialize
1864 * necessary GVT components.
1867 * Zero on success, negative error code if failed.
1870 static int intel_gvt_init_device(struct drm_i915_private *i915)
1872 struct intel_gvt *gvt;
1873 struct intel_vgpu *vgpu;
1876 if (drm_WARN_ON(&i915->drm, i915->gvt))
1879 gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
1883 gvt_dbg_core("init gvt device\n");
1885 idr_init_base(&gvt->vgpu_idr, 1);
1886 spin_lock_init(&gvt->scheduler.mmio_context_lock);
1887 mutex_init(&gvt->lock);
1888 mutex_init(&gvt->sched_lock);
1889 gvt->gt = to_gt(i915);
1892 init_device_info(gvt);
1894 ret = intel_gvt_setup_mmio_info(gvt);
1898 intel_gvt_init_engine_mmio_context(gvt);
1900 ret = intel_gvt_load_firmware(gvt);
1902 goto out_clean_mmio_info;
1904 ret = intel_gvt_init_irq(gvt);
1906 goto out_free_firmware;
1908 ret = intel_gvt_init_gtt(gvt);
1910 goto out_free_firmware;
1912 ret = intel_gvt_init_workload_scheduler(gvt);
1916 ret = intel_gvt_init_sched_policy(gvt);
1918 goto out_clean_workload_scheduler;
1920 ret = intel_gvt_init_cmd_parser(gvt);
1922 goto out_clean_sched_policy;
1924 ret = init_service_thread(gvt);
1926 goto out_clean_cmd_parser;
1928 ret = intel_gvt_init_vgpu_types(gvt);
1930 goto out_clean_thread;
1932 vgpu = intel_gvt_create_idle_vgpu(gvt);
1934 ret = PTR_ERR(vgpu);
1935 gvt_err("failed to create idle vgpu\n");
1936 goto out_clean_types;
1938 gvt->idle_vgpu = vgpu;
1940 intel_gvt_debugfs_init(gvt);
1942 ret = mdev_register_parent(&gvt->parent, i915->drm.dev,
1943 &intel_vgpu_mdev_driver,
1944 gvt->mdev_types, gvt->num_types);
1946 goto out_destroy_idle_vgpu;
1948 gvt_dbg_core("gvt device initialization is done\n");
1951 out_destroy_idle_vgpu:
1952 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
1953 intel_gvt_debugfs_clean(gvt);
1955 intel_gvt_clean_vgpu_types(gvt);
1957 clean_service_thread(gvt);
1958 out_clean_cmd_parser:
1959 intel_gvt_clean_cmd_parser(gvt);
1960 out_clean_sched_policy:
1961 intel_gvt_clean_sched_policy(gvt);
1962 out_clean_workload_scheduler:
1963 intel_gvt_clean_workload_scheduler(gvt);
1965 intel_gvt_clean_gtt(gvt);
1967 intel_gvt_free_firmware(gvt);
1968 out_clean_mmio_info:
1969 intel_gvt_clean_mmio_info(gvt);
1971 idr_destroy(&gvt->vgpu_idr);
1977 static void intel_gvt_pm_resume(struct drm_i915_private *i915)
1979 struct intel_gvt *gvt = i915->gvt;
1981 intel_gvt_restore_fence(gvt);
1982 intel_gvt_restore_mmio(gvt);
1983 intel_gvt_restore_ggtt(gvt);
1986 static const struct intel_vgpu_ops intel_gvt_vgpu_ops = {
1987 .init_device = intel_gvt_init_device,
1988 .clean_device = intel_gvt_clean_device,
1989 .pm_resume = intel_gvt_pm_resume,
1992 static int __init kvmgt_init(void)
1996 ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops);
2000 ret = mdev_register_driver(&intel_vgpu_mdev_driver);
2002 intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
2006 static void __exit kvmgt_exit(void)
2008 mdev_unregister_driver(&intel_vgpu_mdev_driver);
2009 intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
2012 module_init(kvmgt_init);
2013 module_exit(kvmgt_exit);
2015 MODULE_LICENSE("GPL and additional rights");
2016 MODULE_AUTHOR("Intel Corporation");