]> Git Repo - linux.git/commitdiff
Merge drm-intel-next-queued into gvt-next
authorZhenyu Wang <[email protected]>
Thu, 20 Feb 2020 08:23:37 +0000 (16:23 +0800)
committerZhenyu Wang <[email protected]>
Thu, 20 Feb 2020 08:23:37 +0000 (16:23 +0800)
Backmerge to pull in
https://patchwork.freedesktop.org/patch/353621/?series=73544&rev=1

Signed-off-by: Zhenyu Wang <[email protected]>
1  2 
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/vgpu.c

index 4177e9e8179fe05a899ed299ef0a23d561ad7686,3259a1fa69e105a2c91bca299dcec8d063bdfdc0..85e59c502ab50552cdcb4901fe2f9fdde18ac59e
@@@ -1597,10 -1597,12 +1597,10 @@@ static struct mdev_parent_ops intel_vgp
  
  static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
  {
 -      struct attribute **kvm_type_attrs;
        struct attribute_group **kvm_vgpu_type_groups;
  
        intel_gvt_ops = ops;
 -      if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
 -                      &kvm_vgpu_type_groups))
 +      if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
                return -EFAULT;
        intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
  
@@@ -1767,6 -1769,8 +1767,6 @@@ static int kvmgt_guest_init(struct mdev
        kvmgt_protect_table_init(info);
        gvt_cache_init(vgpu);
  
 -      init_completion(&vgpu->vblank_done);
 -
        info->track_node.track_write = kvmgt_page_track_write;
        info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
        kvm_page_track_register_notifier(kvm, &info->track_node);
@@@ -1912,6 -1916,28 +1912,28 @@@ err_unlock
        return ret;
  }
  
+ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+ {
+       struct kvmgt_guest_info *info;
+       struct gvt_dma *entry;
+       int ret = 0;
+       if (!handle_valid(handle))
+               return -ENODEV;
+       info = (struct kvmgt_guest_info *)handle;
+       mutex_lock(&info->vgpu->vdev.cache_lock);
+       entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+       if (entry)
+               kref_get(&entry->ref);
+       else
+               ret = -ENOMEM;
+       mutex_unlock(&info->vgpu->vdev.cache_lock);
+       return ret;
+ }
  static void __gvt_dma_release(struct kref *ref)
  {
        struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
@@@ -2023,6 -2049,7 +2045,7 @@@ static struct intel_gvt_mpt kvmgt_mpt 
        .gfn_to_mfn = kvmgt_gfn_to_pfn,
        .dma_map_guest_page = kvmgt_dma_map_guest_page,
        .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
+       .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
        .set_opregion = kvmgt_set_opregion,
        .set_edid = kvmgt_set_edid,
        .get_vfio_device = kvmgt_get_vfio_device,
index 79107e630049569b7da72574bd5d387d9d57b12d,85bd9bf4f6eee58b3c00e567a7c5286a017dccb9..1e086590513685abf9aeca7592ad08cba28e7f0a
@@@ -212,9 -212,9 +212,9 @@@ static void intel_gvt_update_vgpu_types
   */
  void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
  {
-       mutex_lock(&vgpu->gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
        vgpu->active = true;
-       mutex_unlock(&vgpu->gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
  }
  
  /**
@@@ -426,7 -426,9 +426,7 @@@ static struct intel_vgpu *__intel_gvt_c
        if (ret)
                goto out_clean_sched_policy;
  
 -      /*TODO: add more platforms support */
 -      if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
 -              ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
 +      ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
        if (ret)
                goto out_clean_sched_policy;
  
This page took 0.115492 seconds and 4 git commands to generate.