]> Git Repo - J-linux.git/commitdiff
Merge branch 'drm-next-4.21' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <[email protected]>
Mon, 19 Nov 2018 01:07:52 +0000 (11:07 +1000)
committerDave Airlie <[email protected]>
Mon, 19 Nov 2018 01:07:52 +0000 (11:07 +1000)
New features for 4.21:
amdgpu:
- Support for SDMA paging queue on vega
- Put compute EOP buffers into vram for better performance
- Share more code with amdkfd
- Support for scanout with DCC on gfx9
- Initial kerneldoc for DC
- Updated SMU firmware support for gfx8 chips
- Rework CSA handling for eventual support for preemption
- XGMI PSP support
- Clean up RLC handling
- Enable GPU reset by default on VI, SOC15 dGPUs
- Ring and IB test cleanups

amdkfd:
- Share more code with amdgpu

ttm:
- Move global init out of the drivers

scheduler:
- Track if schedulers are ready for work
- Timeout/fault handling changes to facilitate GPU recovery

Signed-off-by: Dave Airlie <[email protected]>
From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
16 files changed:
1  2 
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/bochs/bochs.h
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_ttm.c
include/drm/drmP.h

diff --combined drivers/gpu/drm/Makefile
index 576ba985e138395e15258b40ef99edf9ef045f9e,32a837b72765dbf07ac79781cbd70598c66b5005..7f3be3506057af368c84a6584b298930166c49c3
@@@ -11,7 -11,7 +11,7 @@@ drm-y       :=        drm_auth.o drm_bufs.o dr
                drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
                drm_info.o drm_encoder_slave.o \
-               drm_trace_points.o drm_global.o drm_prime.o \
+               drm_trace_points.o drm_prime.o \
                drm_rect.o drm_vma_manager.o drm_flip_work.o \
                drm_modeset_lock.o drm_atomic.o drm_bridge.o \
                drm_framebuffer.o drm_connector.o drm_blend.o \
@@@ -36,8 -36,7 +36,8 @@@ drm_kms_helper-y := drm_crtc_helper.o d
                drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
                drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
                drm_simple_kms_helper.o drm_modeset_helper.o \
 -              drm_scdc_helper.o drm_gem_framebuffer_helper.o
 +              drm_scdc_helper.o drm_gem_framebuffer_helper.o \
 +              drm_atomic_state_helper.o
  
  drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
  drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
index 35bc8fc3bc701d965291bda7435835175982c5e3,fd12e9162f3c79aca16c5f89ecfb0e972313f7bd..024dfbd87f118beaa4384711fc986a579668a86a
@@@ -955,7 -955,7 +955,7 @@@ static int amdgpu_cs_vm_handling(struc
        if (r)
                return r;
  
 -      r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
 +      r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
        if (r)
                return r;
  
@@@ -1104,7 -1104,7 +1104,7 @@@ static int amdgpu_syncobj_lookup_and_ad
  {
        int r;
        struct dma_fence *fence;
 -      r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
 +      r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
        if (r)
                return r;
  
@@@ -1260,8 -1260,7 +1260,7 @@@ static int amdgpu_cs_submit(struct amdg
        return 0;
  
  error_abort:
-       dma_fence_put(&job->base.s_fence->finished);
-       job->base.s_fence = NULL;
+       drm_sched_job_cleanup(&job->base);
        amdgpu_mn_unlock(p->mn);
  
  error_unlock:
@@@ -1285,7 -1284,7 +1284,7 @@@ int amdgpu_cs_ioctl(struct drm_device *
  
        r = amdgpu_cs_parser_init(&parser, data);
        if (r) {
-               DRM_ERROR("Failed to initialize parser !\n");
+               DRM_ERROR("Failed to initialize parser %d!\n", r);
                goto out;
        }
  
index bfa317ad20a956017273a7c1fe7ca2decd6491e1,6a48cad0fb19c85aa7cd08875aed9ea6b6e27ff2..f5edddf3b29d5310ce4e6474d1a04be5e248adbb
@@@ -35,20 -35,25 +35,25 @@@ u64 gfxhub_v1_0_get_mc_fb_offset(struc
        return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
  }
  
- static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+                               uint64_t page_table_base)
  {
-       uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+       /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+       int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+                       - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
  
-       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-                    lower_32_bits(value));
+       WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+                               offset * vmid, lower_32_bits(page_table_base));
  
-       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-                    upper_32_bits(value));
+       WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+                               offset * vmid, upper_32_bits(page_table_base));
  }
  
  static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  {
-       gfxhub_v1_0_init_gart_pt_regs(adev);
+       uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+       gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
  
        WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
                     (u32)(adev->gmc.gart_start >> 12));
@@@ -72,7 -77,7 +77,7 @@@ static void gfxhub_v1_0_init_system_ape
  
        /* Program the system aperture low logical page number. */
        WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 -                   min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
 +                   min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
  
        if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
                /*
                 * to get rid of the VM fault and hardware hang.
                 */
                WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 -                           max((adev->gmc.vram_end >> 18) + 0x1,
 +                           max((adev->gmc.fb_end >> 18) + 0x1,
                                 adev->gmc.agp_end >> 18));
        else
                WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 -                           max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
 +                           max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
  
        /* Set default page address. */
        value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
index a0db67adc34cee3d1ee13ca97d8b333ff36dfdc6,3881a42e780c5a54f7064d62a42e4c14581a5fb9..d0d966d6080a6dda87d57d2d8ee1ed2b58a1444a
@@@ -52,20 -52,25 +52,25 @@@ u64 mmhub_v1_0_get_fb_location(struct a
        return base;
  }
  
- static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+                               uint64_t page_table_base)
  {
-       uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+       /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+       int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+                       - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
  
-       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
-                    lower_32_bits(value));
+       WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+                       offset * vmid, lower_32_bits(page_table_base));
  
-       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
-                    upper_32_bits(value));
+       WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+                       offset * vmid, upper_32_bits(page_table_base));
  }
  
  static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
  {
-       mmhub_v1_0_init_gart_pt_regs(adev);
+       uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+       mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
  
        WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
                     (u32)(adev->gmc.gart_start >> 12));
@@@ -90,7 -95,7 +95,7 @@@ static void mmhub_v1_0_init_system_aper
  
        /* Program the system aperture low logical page number. */
        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 -                   min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
 +                   min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
  
        if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
                /*
                 * to get rid of the VM fault and hardware hang.
                 */
                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 -                           max((adev->gmc.vram_end >> 18) + 0x1,
 +                           max((adev->gmc.fb_end >> 18) + 0x1,
                                 adev->gmc.agp_end >> 18));
        else
                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 -                           max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
 +                           max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
  
        /* Set default page address. */
        value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
index 5064768642f32ea12a84742e61e02e8e9b5c02ff,0e4e5f9e2219be488b53dc0576acba44eebd7eba..aa43bb253ea28c3177b610a9850fd3e3ac5a7fc5
  #define FIRMWARE_RAVEN_DMCU           "amdgpu/raven_dmcu.bin"
  MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
  
+ /**
+  * DOC: overview
+  *
+  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+  * requests into DC requests, and DC responses into DRM responses.
+  *
+  * The root control structure is &struct amdgpu_display_manager.
+  */
  /* basic init/fini API */
  static int amdgpu_dm_init(struct amdgpu_device *adev);
  static void amdgpu_dm_fini(struct amdgpu_device *adev);
@@@ -95,7 -105,7 +105,7 @@@ static voi
  amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
  
  static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-                               struct amdgpu_plane *aplane,
+                               struct drm_plane *plane,
                                unsigned long possible_crtcs);
  static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
                               struct drm_plane *plane,
@@@ -379,11 -389,6 +389,6 @@@ static void amdgpu_dm_fbc_init(struct d
  
  }
  
- /*
-  * Init display KMS
-  *
-  * Returns 0 on success
-  */
  static int amdgpu_dm_init(struct amdgpu_device *adev)
  {
        struct dc_init_data init_data;
@@@ -663,6 -668,26 +668,26 @@@ static void s3_handle_mst(struct drm_de
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
  }
  
+ /**
+  * dm_hw_init() - Initialize DC device
+  * @handle: The base driver device containing the amdpgu_dm device.
+  *
+  * Initialize the &struct amdgpu_display_manager device. This involves calling
+  * the initializers of each DM component, then populating the struct with them.
+  *
+  * Although the function implies hardware initialization, both hardware and
+  * software are initialized here. Splitting them out to their relevant init
+  * hooks is a future TODO item.
+  *
+  * Some notable things that are initialized here:
+  *
+  * - Display Core, both software and hardware
+  * - DC modules that we need (freesync and color management)
+  * - DRM software states
+  * - Interrupt sources and handlers
+  * - Vblank support
+  * - Debug FS entries, if enabled
+  */
  static int dm_hw_init(void *handle)
  {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        return 0;
  }
  
+ /**
+  * dm_hw_fini() - Teardown DC device
+  * @handle: The base driver device containing the amdpgu_dm device.
+  *
+  * Teardown components within &struct amdgpu_display_manager that require
+  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+  * were loaded. Also flush IRQ workqueues and disable them.
+  */
  static int dm_hw_fini(void *handle)
  {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@@ -898,6 -931,16 +931,16 @@@ static int dm_resume(void *handle
        return ret;
  }
  
+ /**
+  * DOC: DM Lifecycle
+  *
+  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+  * the base driver's device list to be initialized and torn down accordingly.
+  *
+  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+  */
  static const struct amd_ip_funcs amdgpu_dm_funcs = {
        .name = "dm",
        .early_init = dm_early_init,
@@@ -965,6 -1008,12 +1008,12 @@@ dm_atomic_state_alloc_free(struct drm_a
        kfree(dm_state);
  }
  
+ /**
+  * DOC: atomic
+  *
+  * *WIP*
+  */
  static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
        .fb_create = amdgpu_display_user_framebuffer_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
@@@ -1527,8 -1576,23 +1576,23 @@@ static int amdgpu_dm_backlight_update_s
  {
        struct amdgpu_display_manager *dm = bl_get_data(bd);
  
+       /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer
+        * and 16 bit fractional, where 1.0 is max backlight value.
+        * bd->props.brightness is 8 bit format and needs to be converted by
+        * scaling via copy lower byte to upper byte of 16 bit value.
+        */
+       uint32_t brightness = bd->props.brightness * 0x101;
+       /*
+        * PWM interperts 0 as 100% rather than 0% because of HW
+        * limitation for level 0.  So limiting minimum brightness level
+        * to 1.
+        */
+       if (bd->props.brightness < 1)
+               brightness = 0x101;
        if (dc_link_set_backlight_level(dm->backlight_link,
-                       bd->props.brightness, 0, 0))
+                       brightness, 0, 0))
                return 0;
        else
                return 1;
@@@ -1580,18 -1644,18 +1644,18 @@@ static int initialize_plane(struct amdg
                             struct amdgpu_mode_info *mode_info,
                             int plane_id)
  {
-       struct amdgpu_plane *plane;
+       struct drm_plane *plane;
        unsigned long possible_crtcs;
        int ret = 0;
  
-       plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+       plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
        mode_info->planes[plane_id] = plane;
  
        if (!plane) {
                DRM_ERROR("KMS: Failed to allocate plane\n");
                return -ENOMEM;
        }
-       plane->base.type = mode_info->plane_type[plane_id];
+       plane->type = mode_info->plane_type[plane_id];
  
        /*
         * HACK: IGT tests expect that each plane can only have
@@@ -1682,7 -1746,7 +1746,7 @@@ static int amdgpu_dm_initialize_drm_dev
        }
  
        for (i = 0; i < dm->dc->caps.max_streams; i++)
-               if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+               if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
                        DRM_ERROR("KMS: Failed to initialize crtc\n");
                        goto fail;
                }
@@@ -3185,6 -3249,7 +3249,6 @@@ amdgpu_dm_connector_helper_funcs = 
         */
        .get_modes = get_modes,
        .mode_valid = amdgpu_dm_connector_mode_valid,
 -      .best_encoder = drm_atomic_helper_best_encoder
  };
  
  static void dm_crtc_helper_disable(struct drm_crtc *crtc)
@@@ -3457,49 -3522,49 +3521,49 @@@ static const u32 cursor_formats[] = 
  };
  
  static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
-                               struct amdgpu_plane *aplane,
+                               struct drm_plane *plane,
                                unsigned long possible_crtcs)
  {
        int res = -EPERM;
  
-       switch (aplane->base.type) {
+       switch (plane->type) {
        case DRM_PLANE_TYPE_PRIMARY:
                res = drm_universal_plane_init(
                                dm->adev->ddev,
-                               &aplane->base,
+                               plane,
                                possible_crtcs,
                                &dm_plane_funcs,
                                rgb_formats,
                                ARRAY_SIZE(rgb_formats),
-                               NULL, aplane->base.type, NULL);
+                               NULL, plane->type, NULL);
                break;
        case DRM_PLANE_TYPE_OVERLAY:
                res = drm_universal_plane_init(
                                dm->adev->ddev,
-                               &aplane->base,
+                               plane,
                                possible_crtcs,
                                &dm_plane_funcs,
                                yuv_formats,
                                ARRAY_SIZE(yuv_formats),
-                               NULL, aplane->base.type, NULL);
+                               NULL, plane->type, NULL);
                break;
        case DRM_PLANE_TYPE_CURSOR:
                res = drm_universal_plane_init(
                                dm->adev->ddev,
-                               &aplane->base,
+                               plane,
                                possible_crtcs,
                                &dm_plane_funcs,
                                cursor_formats,
                                ARRAY_SIZE(cursor_formats),
-                               NULL, aplane->base.type, NULL);
+                               NULL, plane->type, NULL);
                break;
        }
  
-       drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+       drm_plane_helper_add(plane, &dm_plane_helper_funcs);
  
        /* Create (reset) the plane state */
-       if (aplane->base.funcs->reset)
-               aplane->base.funcs->reset(&aplane->base);
+       if (plane->funcs->reset)
+               plane->funcs->reset(plane);
  
  
        return res;
@@@ -3510,7 -3575,7 +3574,7 @@@ static int amdgpu_dm_crtc_init(struct a
                               uint32_t crtc_index)
  {
        struct amdgpu_crtc *acrtc = NULL;
-       struct amdgpu_plane *cursor_plane;
+       struct drm_plane *cursor_plane;
  
        int res = -ENOMEM;
  
        if (!cursor_plane)
                goto fail;
  
-       cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+       cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
        res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
  
        acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
                        dm->ddev,
                        &acrtc->base,
                        plane,
-                       &cursor_plane->base,
+                       cursor_plane,
                        &amdgpu_dm_crtc_funcs, NULL);
  
        if (res)
@@@ -3587,17 -3652,14 +3651,17 @@@ static int to_drm_connector_type(enum s
        }
  }
  
 +static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
 +{
 +      return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
 +}
 +
  static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
  {
 -      const struct drm_connector_helper_funcs *helper =
 -              connector->helper_private;
        struct drm_encoder *encoder;
        struct amdgpu_encoder *amdgpu_encoder;
  
 -      encoder = helper->best_encoder(connector);
 +      encoder = amdgpu_dm_connector_to_encoder(connector);
  
        if (encoder == NULL)
                return;
@@@ -3724,12 -3786,14 +3788,12 @@@ static void amdgpu_dm_connector_ddc_get
  
  static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
  {
 -      const struct drm_connector_helper_funcs *helper =
 -                      connector->helper_private;
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                        to_amdgpu_dm_connector(connector);
        struct drm_encoder *encoder;
        struct edid *edid = amdgpu_dm_connector->edid;
  
 -      encoder = helper->best_encoder(connector);
 +      encoder = amdgpu_dm_connector_to_encoder(connector);
  
        if (!edid || !drm_edid_is_valid(edid)) {
                amdgpu_dm_connector->num_modes =
@@@ -3768,12 -3832,12 +3832,12 @@@ void amdgpu_dm_connector_init_helper(st
        case DRM_MODE_CONNECTOR_HDMIA:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
                aconnector->base.ycbcr_420_allowed =
-                       link->link_enc->features.ycbcr420_supported ? true : false;
+                       link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
                break;
        case DRM_MODE_CONNECTOR_DisplayPort:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
                aconnector->base.ycbcr_420_allowed =
-                       link->link_enc->features.ycbcr420_supported ? true : false;
+                       link->link_enc->features.dp_ycbcr420_supported ? true : false;
                break;
        case DRM_MODE_CONNECTOR_DVID:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@@ -4531,6 -4595,14 +4595,14 @@@ static int amdgpu_dm_atomic_commit(stru
        /*TODO Handle EINTR, reenable IRQ*/
  }
  
+ /**
+  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+  * @state: The atomic state to commit
+  *
+  * This will tell DC to commit the constructed DC state from atomic_check,
+  * programming the hardware. Any failures here implies a hardware failure, since
+  * atomic check should have filtered anything non-kosher.
+  */
  static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
  {
        struct drm_device *dev = state->dev;
@@@ -5302,6 -5374,12 +5374,12 @@@ enum surface_update_type dm_determine_u
        struct dc_stream_update stream_update;
        enum surface_update_type update_type = UPDATE_TYPE_FAST;
  
+       if (!updates || !surface) {
+               DRM_ERROR("Plane or surface update failed to allocate");
+               /* Set type to FULL to avoid crashing in DC*/
+               update_type = UPDATE_TYPE_FULL;
+               goto ret;
+       }
  
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
        return update_type;
  }
  
+ /**
+  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+  * @dev: The DRM device
+  * @state: The atomic state to commit
+  *
+  * Validate that the given atomic state is programmable by DC into hardware.
+  * This involves constructing a &struct dc_state reflecting the new hardware
+  * state we wish to commit, then querying DC to see if it is programmable. It's
+  * important not to modify the existing DC state. Otherwise, atomic_check
+  * may unexpectedly commit hardware changes.
+  *
+  * When validating the DC state, it's important that the right locks are
+  * acquired. For full updates case which removes/adds/updates streams on one
+  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+  * that any such full update commit will wait for completion of any outstanding
+  * flip using DRMs synchronization events. See
+  * dm_determine_update_type_for_commit()
+  *
+  * Note that DM adds the affected connectors for all CRTCs in state, when that
+  * might not seem necessary. This is because DC stream creation requires the
+  * DC sink, which is tied to the DRM connector state. Cleaning this up should
+  * be possible but non-trivial - a possible TODO item.
+  *
+  * Return: -Error code if validation failed.
+  */
  static int amdgpu_dm_atomic_check(struct drm_device *dev,
                                  struct drm_atomic_state *state)
  {
                lock_and_validation_needed = true;
        }
  
-       /*
-        * For full updates case when
-        * removing/adding/updating streams on one CRTC while flipping
-        * on another CRTC,
-        * acquiring global lock  will guarantee that any such full
-        * update commit
-        * will wait for completion of any outstanding flip using DRMs
-        * synchronization events.
-        */
        update_type = dm_determine_update_type_for_commit(dc, state);
  
        if (overall_update_type < update_type)
index 577a8b917cb912e2884cc13431334187049fda73,75e4cf6cda5db98e50cf454c3c8364d3d32e1274..fb38c8b857b5a26a7e40d2469d00fe3ce02546ed
@@@ -66,7 -66,6 +66,7 @@@ struct bochs_device 
        u16 yres_virtual;
        u32 stride;
        u32 bpp;
 +      struct edid *edid;
  
        /* drm */
        struct drm_device  *dev;
@@@ -77,8 -76,6 +77,6 @@@
  
        /* ttm */
        struct {
-               struct drm_global_reference mem_global_ref;
-               struct ttm_bo_global_ref bo_global_ref;
                struct ttm_bo_device bdev;
                bool initialized;
        } ttm;
@@@ -127,7 -124,6 +125,7 @@@ void bochs_hw_setmode(struct bochs_devi
                      const struct drm_format_info *format);
  void bochs_hw_setbase(struct bochs_device *bochs,
                      int x, int y, u64 addr);
 +int bochs_hw_load_edid(struct bochs_device *bochs);
  
  /* bochs_mm.c */
  int bochs_mm_init(struct bochs_device *bochs);
index e6ccf7fa92d4997bf408dd425fc57ae41cc2a456,3bd773ef78dd72a3b2a2017fef03d907b8a35235..0980411e41bf0d7d61fa173b4199253795aa2966
@@@ -16,61 -16,6 +16,6 @@@ static inline struct bochs_device *boch
        return container_of(bd, struct bochs_device, ttm.bdev);
  }
  
- static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
- {
-       return ttm_mem_global_init(ref->object);
- }
- static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
- {
-       ttm_mem_global_release(ref->object);
- }
- static int bochs_ttm_global_init(struct bochs_device *bochs)
- {
-       struct drm_global_reference *global_ref;
-       int r;
-       global_ref = &bochs->ttm.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &bochs_ttm_mem_global_init;
-       global_ref->release = &bochs_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-       bochs->ttm.bo_global_ref.mem_glob =
-               bochs->ttm.mem_global_ref.object;
-       global_ref = &bochs->ttm.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&bochs->ttm.mem_global_ref);
-               return r;
-       }
-       return 0;
- }
- static void bochs_ttm_global_release(struct bochs_device *bochs)
- {
-       if (bochs->ttm.mem_global_ref.release == NULL)
-               return;
-       drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
-       drm_global_item_unref(&bochs->ttm.mem_global_ref);
-       bochs->ttm.mem_global_ref.release = NULL;
- }
  static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
  {
        struct bochs_bo *bo;
@@@ -208,12 -153,7 +153,7 @@@ int bochs_mm_init(struct bochs_device *
        struct ttm_bo_device *bdev = &bochs->ttm.bdev;
        int ret;
  
-       ret = bochs_ttm_global_init(bochs);
-       if (ret)
-               return ret;
        ret = ttm_bo_device_init(&bochs->ttm.bdev,
-                                bochs->ttm.bo_global_ref.ref.object,
                                 &bochs_bo_driver,
                                 bochs->dev->anon_inode->i_mapping,
                                 DRM_FILE_PAGE_OFFSET,
@@@ -240,7 -180,6 +180,6 @@@ void bochs_mm_fini(struct bochs_device 
                return;
  
        ttm_bo_device_release(&bochs->ttm.bdev);
-       bochs_ttm_global_release(bochs);
        bochs->ttm.initialized = false;
  }
  
@@@ -414,7 -353,7 +353,7 @@@ int bochs_dumb_create(struct drm_file *
                return ret;
  
        ret = drm_gem_handle_create(file, gobj, &handle);
 -      drm_gem_object_unreference_unlocked(gobj);
 +      drm_gem_object_put_unlocked(gobj);
        if (ret)
                return ret;
  
@@@ -454,6 -393,6 +393,6 @@@ int bochs_dumb_mmap_offset(struct drm_f
        bo = gem_to_bochs_bo(obj);
        *offset = bochs_bo_mmap_offset(bo);
  
 -      drm_gem_object_unreference_unlocked(obj);
 +      drm_gem_object_put_unlocked(obj);
        return 0;
  }
index aa1cef794f9a95d84b0777cd743f3d96c2c1f0be,fc29f46b7c326cc14856f70fe01d7a247b781fd6..e2ffecd5e45379c80ed067c93b49215e78c25967
@@@ -476,6 -476,8 +476,6 @@@ static void drm_fs_inode_free(struct in
   * The initial ref-count of the object is 1. Use drm_dev_get() and
   * drm_dev_put() to take and drop further ref-counts.
   *
 - * Note that for purely virtual devices @parent can be NULL.
 - *
   * Drivers that do not want to allocate their own device struct
   * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
   * that do embed &struct drm_device it must be placed first in the overall
@@@ -500,8 -502,6 +500,8 @@@ int drm_dev_init(struct drm_device *dev
                return -ENODEV;
        }
  
 +      BUG_ON(!parent);
 +
        kref_init(&dev->ref);
        dev->dev = parent;
        dev->driver = driver;
                }
        }
  
 -      /* Use the parent device name as DRM device unique identifier, but fall
 -       * back to the driver name for virtual devices like vgem. */
 -      ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
 +      ret = drm_dev_set_unique(dev, dev_name(parent));
        if (ret)
                goto err_setunique;
  
@@@ -973,14 -975,12 +973,12 @@@ static void drm_core_exit(void
        drm_sysfs_destroy();
        idr_destroy(&drm_minors_idr);
        drm_connector_ida_destroy();
-       drm_global_release();
  }
  
  static int __init drm_core_init(void)
  {
        int ret;
  
-       drm_global_init();
        drm_connector_ida_init();
        idr_init(&drm_minors_idr);
  
index 9b476368aa313efd7c33945aeadd2b4c481c70d2,88b2768d91c9f10f44577bcd348e0589b526b551..49a6763693f1ab372d4eeea89f72cd04bbf63442
@@@ -93,7 -93,7 +93,7 @@@ static void etnaviv_sched_timedout_job(
         * If the GPU managed to complete this jobs fence, the timout is
         * spurious. Bail out.
         */
 -      if (fence_completed(gpu, submit->out_fence->seqno))
 +      if (dma_fence_is_signaled(submit->out_fence))
                return;
  
        /*
        change = dma_addr - gpu->hangcheck_dma_addr;
        if (change < 0 || change > 16) {
                gpu->hangcheck_dma_addr = dma_addr;
-               schedule_delayed_work(&sched_job->sched->work_tdr,
-                                     sched_job->sched->timeout);
                return;
        }
  
@@@ -127,6 -125,8 +125,8 @@@ static void etnaviv_sched_free_job(stru
  {
        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
  
+       drm_sched_job_cleanup(sched_job);
        etnaviv_submit_put(submit);
  }
  
@@@ -159,6 -159,7 +159,7 @@@ int etnaviv_sched_push_job(struct drm_s
                                                submit->out_fence, 0,
                                                INT_MAX, GFP_KERNEL);
        if (submit->out_fence_id < 0) {
+               drm_sched_job_cleanup(&submit->sched_job);
                ret = -ENOMEM;
                goto out_unlock;
        }
index 14d3fa855708f544d459b64b23c611e18e7d01cf,2310d6e9ff1f36b7e1cc5aff3ed6020bd91eed5a..13a0254b59a1a55fd09663dd32170b478b9513a3
@@@ -23,6 -23,7 +23,6 @@@
   *          Alon Levy
   */
  
 -
  #ifndef QXL_DRV_H
  #define QXL_DRV_H
  
@@@ -82,16 -83,16 +82,16 @@@ struct qxl_bo 
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
 -      unsigned                        pin_count;
 +      unsigned int pin_count;
        void                            *kptr;
        int                             type;
  
        /* Constant after initialization */
        struct drm_gem_object           gem_base;
 -      bool is_primary; /* is this now a primary surface */
 -      bool is_dumb;
 +      unsigned int is_primary:1; /* is this now a primary surface */
 +      unsigned int is_dumb:1;
        struct qxl_bo *shadow;
 -      bool hw_surf_alloc;
 +      unsigned int hw_surf_alloc:1;
        struct qxl_surface surf;
        uint32_t surface_id;
        struct qxl_release *surf_create;
@@@ -126,12 -127,10 +126,9 @@@ struct qxl_output 
  #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
  
  struct qxl_mman {
-       struct ttm_bo_global_ref        bo_global_ref;
-       struct drm_global_reference     mem_global_ref;
-       unsigned int mem_global_referenced:1;
        struct ttm_bo_device            bdev;
  };
  
 -
  struct qxl_memslot {
        uint8_t         generation;
        uint64_t        start_phys_addr;
@@@ -189,12 -188,12 +186,12 @@@ struct qxl_draw_fill 
   */
  struct qxl_debugfs {
        struct drm_info_list    *files;
 -      unsigned                num_files;
 +      unsigned int num_files;
  };
  
  int qxl_debugfs_add_files(struct qxl_device *rdev,
                             struct drm_info_list *files,
 -                           unsigned nfiles);
 +                           unsigned int nfiles);
  int qxl_debugfs_fence_init(struct qxl_device *rdev);
  
  struct qxl_device;
@@@ -229,7 -228,7 +226,7 @@@ struct qxl_device 
  
        struct qxl_ram_header *ram_header;
  
 -      bool primary_created;
 +      unsigned int primary_created:1;
  
        struct qxl_memslot      *mem_slots;
        uint8_t         n_mem_slots;
        atomic_t irq_received_display;
        atomic_t irq_received_cursor;
        atomic_t irq_received_io_cmd;
 -      unsigned irq_received_error;
 +      unsigned int irq_received_error;
        wait_queue_head_t display_event;
        wait_queue_head_t cursor_event;
        wait_queue_head_t io_cmd_event;
  
        /* debugfs */
        struct qxl_debugfs      debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
 -      unsigned                debugfs_count;
 +      unsigned int debugfs_count;
  
        struct mutex            update_area_mutex;
  
@@@ -370,6 -369,7 +367,6 @@@ int qxl_mode_dumb_mmap(struct drm_file 
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p);
  
 -
  /* qxl ttm */
  int qxl_ttm_init(struct qxl_device *qdev);
  void qxl_ttm_fini(struct qxl_device *qdev);
@@@ -395,7 -395,7 +392,7 @@@ void qxl_update_screen(struct qxl_devic
  /* qxl io operations (qxl_cmd.c) */
  
  void qxl_io_create_primary(struct qxl_device *qdev,
 -                         unsigned offset,
 +                         unsigned int offset,
                           struct qxl_bo *bo);
  void qxl_io_destroy_primary(struct qxl_device *qdev);
  void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
@@@ -446,9 -446,9 +443,9 @@@ void qxl_draw_opaque_fb(const struct qx
  void qxl_draw_dirty_fb(struct qxl_device *qdev,
                       struct drm_framebuffer *fb,
                       struct qxl_bo *bo,
 -                     unsigned flags, unsigned color,
 +                     unsigned int flags, unsigned int color,
                       struct drm_clip_rect *clips,
 -                     unsigned num_clips, int inc);
 +                     unsigned int num_clips, int inc);
  
  void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
  
@@@ -493,7 -493,7 +490,7 @@@ bool qxl_fbdev_qobj_is_fb(struct qxl_de
  
  int qxl_debugfs_add_files(struct qxl_device *qdev,
                          struct drm_info_list *files,
 -                        unsigned nfiles);
 +                        unsigned int nfiles);
  
  int qxl_surface_id_alloc(struct qxl_device *qdev,
                         struct qxl_bo *surf);
index 559a101138379192fa2310bb178ba5643162d9f0,1468fddc19d023893a97c0373411597eb1211ee9..886f61e94f24470c37feaf224974fb28d43d2e5b
@@@ -46,62 -46,6 +46,6 @@@ static struct qxl_device *qxl_get_qdev(
        return qdev;
  }
  
- static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
- {
-       return ttm_mem_global_init(ref->object);
- }
- static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
- {
-       ttm_mem_global_release(ref->object);
- }
- static int qxl_ttm_global_init(struct qxl_device *qdev)
- {
-       struct drm_global_reference *global_ref;
-       int r;
-       qdev->mman.mem_global_referenced = false;
-       global_ref = &qdev->mman.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &qxl_ttm_mem_global_init;
-       global_ref->release = &qxl_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-       qdev->mman.bo_global_ref.mem_glob =
-               qdev->mman.mem_global_ref.object;
-       global_ref = &qdev->mman.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&qdev->mman.mem_global_ref);
-               return r;
-       }
-       qdev->mman.mem_global_referenced = true;
-       return 0;
- }
- static void qxl_ttm_global_fini(struct qxl_device *qdev)
- {
-       if (qdev->mman.mem_global_referenced) {
-               drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
-               drm_global_item_unref(&qdev->mman.mem_global_ref);
-               qdev->mman.mem_global_referenced = false;
-       }
- }
  static struct vm_operations_struct qxl_ttm_vm_ops;
  static const struct vm_operations_struct *ttm_vm_ops;
  
@@@ -174,7 -118,7 +118,7 @@@ static int qxl_init_mem_type(struct ttm
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        default:
 -              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 +              DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
                return -EINVAL;
        }
        return 0;
@@@ -331,6 -275,7 +275,6 @@@ static int qxl_bo_move(struct ttm_buffe
        if (ret)
                return ret;
  
 -
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                qxl_move_null(bo, new_mem);
                return 0;
@@@ -372,12 -317,8 +316,8 @@@ int qxl_ttm_init(struct qxl_device *qde
        int r;
        int num_io_pages; /* != rom->num_io_pages, we include surface0 */
  
-       r = qxl_ttm_global_init(qdev);
-       if (r)
-               return r;
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&qdev->mman.bdev,
-                              qdev->mman.bo_global_ref.ref.object,
                               &qxl_bo_driver,
                               qdev->ddev.anon_inode->i_mapping,
                               DRM_FILE_PAGE_OFFSET, 0);
                return r;
        }
        DRM_INFO("qxl: %uM of VRAM memory size\n",
 -               (unsigned)qdev->vram_size / (1024 * 1024));
 +               (unsigned int)qdev->vram_size / (1024 * 1024));
        DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
 -               ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
 +               ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
        DRM_INFO("qxl: %uM of Surface memory size\n",
 -               (unsigned)qdev->surfaceram_size / (1024 * 1024));
 +               (unsigned int)qdev->surfaceram_size / (1024 * 1024));
        return 0;
  }
  
@@@ -413,10 -354,10 +353,9 @@@ void qxl_ttm_fini(struct qxl_device *qd
        ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
        ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
        ttm_bo_device_release(&qdev->mman.bdev);
-       qxl_ttm_global_fini(qdev);
        DRM_INFO("qxl: ttm finalized\n");
  }
  
 -
  #define QXL_DEBUGFS_MEM_TYPES 2
  
  #if defined(CONFIG_DEBUG_FS)
@@@ -441,7 -382,7 +380,7 @@@ int qxl_ttm_debugfs_init(struct qxl_dev
  #if defined(CONFIG_DEBUG_FS)
        static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
        static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
 -      unsigned i;
 +      unsigned int i;
  
        for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
                if (i == 0)
index 83b4657ffb107aff31997fca9c2fb51836b1087c,01c6d14a005d4d14eae1afb35e1129a622f3cc27..d87935bf8e308f81e13ccde697fe35e86fa446c6
  
  static void ttm_bo_global_kobj_release(struct kobject *kobj);
  
+ /**
+  * ttm_global_mutex - protecting the global BO state
+  */
+ DEFINE_MUTEX(ttm_global_mutex);
+ struct ttm_bo_global ttm_bo_glob = {
+       .use_count = 0
+ };
  static struct attribute ttm_bo_count = {
        .name = "bo_count",
        .mode = S_IRUGO
@@@ -872,7 -880,7 +880,7 @@@ static int ttm_bo_add_move_fence(struc
        if (fence) {
                reservation_object_add_shared_fence(bo->resv, fence);
  
 -              ret = reservation_object_reserve_shared(bo->resv);
 +              ret = reservation_object_reserve_shared(bo->resv, 1);
                if (unlikely(ret))
                        return ret;
  
@@@ -977,7 -985,7 +985,7 @@@ int ttm_bo_mem_space(struct ttm_buffer_
        bool has_erestartsys = false;
        int i, ret;
  
 -      ret = reservation_object_reserve_shared(bo->resv);
 +      ret = reservation_object_reserve_shared(bo->resv, 1);
        if (unlikely(ret))
                return ret;
  
@@@ -1519,35 -1527,45 +1527,45 @@@ static void ttm_bo_global_kobj_release(
                container_of(kobj, struct ttm_bo_global, kobj);
  
        __free_page(glob->dummy_read_page);
-       kfree(glob);
  }
  
void ttm_bo_global_release(struct drm_global_reference *ref)
static void ttm_bo_global_release(void)
  {
-       struct ttm_bo_global *glob = ref->object;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
+       mutex_lock(&ttm_global_mutex);
+       if (--glob->use_count > 0)
+               goto out;
  
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
+       ttm_mem_global_release(&ttm_mem_glob);
+ out:
+       mutex_unlock(&ttm_global_mutex);
  }
- EXPORT_SYMBOL(ttm_bo_global_release);
  
int ttm_bo_global_init(struct drm_global_reference *ref)
static int ttm_bo_global_init(void)
  {
-       struct ttm_bo_global_ref *bo_ref =
-               container_of(ref, struct ttm_bo_global_ref, ref);
-       struct ttm_bo_global *glob = ref->object;
-       int ret;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
+       int ret = 0;
        unsigned i;
  
-       mutex_init(&glob->device_list_mutex);
+       mutex_lock(&ttm_global_mutex);
+       if (++glob->use_count > 1)
+               goto out;
+       ret = ttm_mem_global_init(&ttm_mem_glob);
+       if (ret)
+               goto out;
        spin_lock_init(&glob->lru_lock);
-       glob->mem_glob = bo_ref->mem_glob;
+       glob->mem_glob = &ttm_mem_glob;
        glob->mem_glob->bo_glob = glob;
        glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  
        if (unlikely(glob->dummy_read_page == NULL)) {
                ret = -ENOMEM;
-               goto out_no_drp;
+               goto out;
        }
  
        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
                &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
        if (unlikely(ret != 0))
                kobject_put(&glob->kobj);
-       return ret;
- out_no_drp:
-       kfree(glob);
+ out:
+       mutex_unlock(&ttm_global_mutex);
        return ret;
  }
- EXPORT_SYMBOL(ttm_bo_global_init);
  
  int ttm_bo_device_release(struct ttm_bo_device *bdev)
  {
                }
        }
  
-       mutex_lock(&glob->device_list_mutex);
+       mutex_lock(&ttm_global_mutex);
        list_del(&bdev->device_list);
-       mutex_unlock(&glob->device_list_mutex);
+       mutex_unlock(&ttm_global_mutex);
  
        cancel_delayed_work_sync(&bdev->wq);
  
  
        drm_vma_offset_manager_destroy(&bdev->vma_manager);
  
+       if (!ret)
+               ttm_bo_global_release();
        return ret;
  }
  EXPORT_SYMBOL(ttm_bo_device_release);
  
  int ttm_bo_device_init(struct ttm_bo_device *bdev,
-                      struct ttm_bo_global *glob,
                       struct ttm_bo_driver *driver,
                       struct address_space *mapping,
                       uint64_t file_page_offset,
                       bool need_dma32)
  {
-       int ret = -EINVAL;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
+       int ret;
+       ret = ttm_bo_global_init();
+       if (ret)
+               return ret;
  
        bdev->driver = driver;
  
        bdev->dev_mapping = mapping;
        bdev->glob = glob;
        bdev->need_dma32 = need_dma32;
-       mutex_lock(&glob->device_list_mutex);
+       mutex_lock(&ttm_global_mutex);
        list_add_tail(&bdev->device_list, &glob->device_list);
-       mutex_unlock(&glob->device_list_mutex);
+       mutex_unlock(&ttm_global_mutex);
  
        return 0;
  out_no_sys:
+       ttm_bo_global_release();
        return ret;
  }
  EXPORT_SYMBOL(ttm_bo_device_init);
index e493edb0d3e71299de52b4fceec4f6ac59afe9c2,e086d6c6ddd954e7ed3e1db1d5c004eeedc953b5..efa005a1c1b79d3e7faa36f23a09419c95cf53a8
@@@ -129,7 -129,7 +129,7 @@@ int ttm_eu_reserve_buffers(struct ww_ac
                        if (!entry->shared)
                                continue;
  
 -                      ret = reservation_object_reserve_shared(bo->resv);
 +                      ret = reservation_object_reserve_shared(bo->resv, 1);
                        if (!ret)
                                continue;
                }
                }
  
                if (!ret && entry->shared)
 -                      ret = reservation_object_reserve_shared(bo->resv);
 +                      ret = reservation_object_reserve_shared(bo->resv, 1);
  
                if (unlikely(ret != 0)) {
                        if (ret == -EINTR)
@@@ -187,14 -187,12 +187,12 @@@ void ttm_eu_fence_buffer_objects(struc
        struct ttm_buffer_object *bo;
        struct ttm_bo_global *glob;
        struct ttm_bo_device *bdev;
-       struct ttm_bo_driver *driver;
  
        if (list_empty(list))
                return;
  
        bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
        bdev = bo->bdev;
-       driver = bdev->driver;
        glob = bo->bdev->glob;
  
        spin_lock(&glob->lru_lock);
index 6474e83cbf3d5aaea86d03d3c6a8f9f56c1e0331,1acbf182536f14e95208ad329a659da4888255e1..2a8aaea72af315ad35b095b5a49b04d401b914a1
@@@ -65,7 -65,6 +65,7 @@@ struct virtio_gpu_object 
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
 +      bool created;
  };
  #define gem_to_virtio_gpu_obj(gobj) \
        container_of((gobj), struct virtio_gpu_object, gem_base)
@@@ -143,9 -142,6 +143,6 @@@ struct virtio_gpu_fbdev 
  };
  
  struct virtio_gpu_mman {
-       struct ttm_bo_global_ref        bo_global_ref;
-       struct drm_global_reference     mem_global_ref;
-       bool                            mem_global_referenced;
        struct ttm_bo_device            bdev;
  };
  
@@@ -191,7 -187,8 +188,7 @@@ struct virtio_gpu_device 
        struct kmem_cache *vbufs;
        bool vqs_ready;
  
 -      struct idr      resource_idr;
 -      spinlock_t resource_idr_lock;
 +      struct ida      resource_ida;
  
        wait_queue_head_t resp_wq;
        /* current display info */
  
        struct virtio_gpu_fence_driver fence_drv;
  
 -      struct idr      ctx_id_idr;
 -      spinlock_t ctx_id_idr_lock;
 +      struct ida      ctx_id_ida;
  
        bool has_virgl_3d;
  
@@@ -258,8 -256,11 +255,8 @@@ int virtio_gpu_surface_dirty(struct vir
  /* virtio vg */
  int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
  void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
 -void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
 -                             uint32_t *resid);
 -void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
  void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 -                                  uint32_t resource_id,
 +                                  struct virtio_gpu_object *bo,
                                    uint32_t format,
                                    uint32_t width,
                                    uint32_t height);
@@@ -281,6 -282,7 +278,6 @@@ void virtio_gpu_cmd_set_scanout(struct 
                                uint32_t x, uint32_t y);
  int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
                             struct virtio_gpu_object *obj,
 -                           uint32_t resource_id,
                             struct virtio_gpu_fence **fence);
  void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
                              struct virtio_gpu_object *obj);
@@@ -319,7 -321,6 +316,7 @@@ void virtio_gpu_cmd_transfer_to_host_3d
                                        struct virtio_gpu_fence **fence);
  void
  virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 +                                struct virtio_gpu_object *bo,
                                  struct virtio_gpu_resource_create_3d *rc_3d,
                                  struct virtio_gpu_fence **fence);
  void virtio_gpu_ctrl_ack(struct virtqueue *vq);
index cd63dffa6d40be8cc5826bf0b2e5556c892b1088,8fc088d5ef0dfd7b70b5a1a44d65d339b0147d02..4bfbf25fabff8091cab1518f5fc4302a0e4dfd56
@@@ -50,62 -50,6 +50,6 @@@ virtio_gpu_device *virtio_gpu_get_vgdev
        return vgdev;
  }
  
- static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
- {
-       return ttm_mem_global_init(ref->object);
- }
- static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
- {
-       ttm_mem_global_release(ref->object);
- }
- static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
- {
-       struct drm_global_reference *global_ref;
-       int r;
-       vgdev->mman.mem_global_referenced = false;
-       global_ref = &vgdev->mman.mem_global_ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_MEM;
-       global_ref->size = sizeof(struct ttm_mem_global);
-       global_ref->init = &virtio_gpu_ttm_mem_global_init;
-       global_ref->release = &virtio_gpu_ttm_mem_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM memory accounting "
-                         "subsystem.\n");
-               return r;
-       }
-       vgdev->mman.bo_global_ref.mem_glob =
-               vgdev->mman.mem_global_ref.object;
-       global_ref = &vgdev->mman.bo_global_ref.ref;
-       global_ref->global_type = DRM_GLOBAL_TTM_BO;
-       global_ref->size = sizeof(struct ttm_bo_global);
-       global_ref->init = &ttm_bo_global_init;
-       global_ref->release = &ttm_bo_global_release;
-       r = drm_global_item_ref(global_ref);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&vgdev->mman.mem_global_ref);
-               return r;
-       }
-       vgdev->mman.mem_global_referenced = true;
-       return 0;
- }
- static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
- {
-       if (vgdev->mman.mem_global_referenced) {
-               drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
-               drm_global_item_unref(&vgdev->mman.mem_global_ref);
-               vgdev->mman.mem_global_referenced = false;
-       }
- }
  int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
  {
        struct drm_file *file_priv;
@@@ -347,7 -291,8 +291,7 @@@ static void virtio_gpu_bo_move_notify(s
  
        } else if (new_mem->placement & TTM_PL_FLAG_TT) {
                if (bo->hw_res_handle) {
 -                      virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
 -                                               NULL);
 +                      virtio_gpu_object_attach(vgdev, bo, NULL);
                }
        }
  }
@@@ -382,12 -327,8 +326,8 @@@ int virtio_gpu_ttm_init(struct virtio_g
  {
        int r;
  
-       r = virtio_gpu_ttm_global_init(vgdev);
-       if (r)
-               return r;
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&vgdev->mman.bdev,
-                              vgdev->mman.bo_global_ref.ref.object,
                               &virtio_gpu_bo_driver,
                               vgdev->ddev->anon_inode->i_mapping,
                               DRM_FILE_PAGE_OFFSET, 0);
  err_mm_init:
        ttm_bo_device_release(&vgdev->mman.bdev);
  err_dev_init:
-       virtio_gpu_ttm_global_fini(vgdev);
        return r;
  }
  
  void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
  {
        ttm_bo_device_release(&vgdev->mman.bdev);
-       virtio_gpu_ttm_global_fini(vgdev);
        DRM_INFO("virtio_gpu: ttm finalized\n");
  }
diff --combined include/drm/drmP.h
index 514beb2d483aa34ed00a96fd95b2d99f5aeeeb29,2557001d1b218bb18a50f6e1419a9b1666dbdc8a..bdb0d5548f39ede087a64e626f34b215834733d5
@@@ -68,7 -68,6 +68,6 @@@
  #include <drm/drm_agpsupport.h>
  #include <drm/drm_crtc.h>
  #include <drm/drm_fourcc.h>
- #include <drm/drm_global.h>
  #include <drm/drm_hashtab.h>
  #include <drm/drm_mm.h>
  #include <drm/drm_os_linux.h>
@@@ -110,10 -109,4 +109,10 @@@ static inline bool drm_can_sleep(void
        return true;
  }
  
 +#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
 +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
 +#else
 +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
 +#endif
 +
  #endif
This page took 0.150244 seconds and 4 git commands to generate.