drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_info.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o drm_prime.o \
+ drm_trace_points.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_scdc_helper.o drm_gem_framebuffer_helper.o
+ drm_scdc_helper.o drm_gem_framebuffer_helper.o \
+ drm_atomic_state_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
if (r)
return r;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
if (r)
return r;
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
if (r)
return r;
return 0;
error_abort:
- dma_fence_put(&job->base.s_fence->finished);
- job->base.s_fence = NULL;
+ drm_sched_job_cleanup(&job->base);
amdgpu_mn_unlock(p->mn);
error_unlock:
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
- DRM_ERROR("Failed to initialize parser !\n");
+ DRM_ERROR("Failed to initialize parser %d!\n", r);
goto out;
}
return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
}
- static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.vram_end >> 18) + 0x1,
+ max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
return base;
}
- static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+ void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/*
* to get rid of the VM fault and hardware hang.
*/
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.vram_end >> 18) + 0x1,
+ max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+ /**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
/* basic init/fini API */
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct amdgpu_plane *aplane,
+ struct drm_plane *plane,
unsigned long possible_crtcs);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
}
- /*
- * Init display KMS
- *
- * Returns 0 on success
- */
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
+ /**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
static int dm_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return 0;
}
+ /**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
static int dm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return ret;
}
+ /**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
static const struct amd_ip_funcs amdgpu_dm_funcs = {
.name = "dm",
.early_init = dm_early_init,
kfree(dm_state);
}
+ /**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
+
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
+ /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ * bd->props.brightness is 8 bit format and needs to be converted by
+ * scaling via copy lower byte to upper byte of 16 bit value.
+ */
+ uint32_t brightness = bd->props.brightness * 0x101;
+
+ /*
+ * PWM interperts 0 as 100% rather than 0% because of HW
+ * limitation for level 0. So limiting minimum brightness level
+ * to 1.
+ */
+ if (bd->props.brightness < 1)
+ brightness = 0x101;
+
if (dc_link_set_backlight_level(dm->backlight_link,
- bd->props.brightness, 0, 0))
+ brightness, 0, 0))
return 0;
else
return 1;
struct amdgpu_mode_info *mode_info,
int plane_id)
{
- struct amdgpu_plane *plane;
+ struct drm_plane *plane;
unsigned long possible_crtcs;
int ret = 0;
- plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
mode_info->planes[plane_id] = plane;
if (!plane) {
DRM_ERROR("KMS: Failed to allocate plane\n");
return -ENOMEM;
}
- plane->base.type = mode_info->plane_type[plane_id];
+ plane->type = mode_info->plane_type[plane_id];
/*
* HACK: IGT tests expect that each plane can only have
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
- if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
DRM_ERROR("KMS: Failed to initialize crtc\n");
goto fail;
}
*/
.get_modes = get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = drm_atomic_helper_best_encoder
};
static void dm_crtc_helper_disable(struct drm_crtc *crtc)
};
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct amdgpu_plane *aplane,
+ struct drm_plane *plane,
unsigned long possible_crtcs)
{
int res = -EPERM;
- switch (aplane->base.type) {
+ switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
rgb_formats,
ARRAY_SIZE(rgb_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
case DRM_PLANE_TYPE_OVERLAY:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
yuv_formats,
ARRAY_SIZE(yuv_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
case DRM_PLANE_TYPE_CURSOR:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
cursor_formats,
ARRAY_SIZE(cursor_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
}
- drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */
- if (aplane->base.funcs->reset)
- aplane->base.funcs->reset(&aplane->base);
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
return res;
uint32_t crtc_index)
{
struct amdgpu_crtc *acrtc = NULL;
- struct amdgpu_plane *cursor_plane;
+ struct drm_plane *cursor_plane;
int res = -ENOMEM;
if (!cursor_plane)
goto fail;
- cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
dm->ddev,
&acrtc->base,
plane,
- &cursor_plane->base,
+ cursor_plane,
&amdgpu_dm_crtc_funcs, NULL);
if (res)
}
}
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
+
static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
{
- const struct drm_connector_helper_funcs *helper =
- connector->helper_private;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- encoder = helper->best_encoder(connector);
+ encoder = amdgpu_dm_connector_to_encoder(connector);
if (encoder == NULL)
return;
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
- const struct drm_connector_helper_funcs *helper =
- connector->helper_private;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
struct edid *edid = amdgpu_dm_connector->edid;
- encoder = helper->best_encoder(connector);
+ encoder = amdgpu_dm_connector_to_encoder(connector);
if (!edid || !drm_edid_is_valid(edid)) {
amdgpu_dm_connector->num_modes =
case DRM_MODE_CONNECTOR_HDMIA:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.ycbcr420_supported ? true : false;
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.ycbcr420_supported ? true : false;
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
/*TODO Handle EINTR, reenable IRQ*/
}
+ /**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct dc_stream_update stream_update;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ if (!updates || !surface) {
+ DRM_ERROR("Plane or surface update failed to allocate");
+ /* Set type to FULL to avoid crashing in DC*/
+ update_type = UPDATE_TYPE_FULL;
+ goto ret;
+ }
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
return update_type;
}
+ /**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events. See
+ * dm_determine_update_type_for_commit()
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
lock_and_validation_needed = true;
}
- /*
- * For full updates case when
- * removing/adding/updating streams on one CRTC while flipping
- * on another CRTC,
- * acquiring global lock will guarantee that any such full
- * update commit
- * will wait for completion of any outstanding flip using DRMs
- * synchronization events.
- */
update_type = dm_determine_update_type_for_commit(dc, state);
if (overall_update_type < update_type)
u16 yres_virtual;
u32 stride;
u32 bpp;
+ struct edid *edid;
/* drm */
struct drm_device *dev;
/* ttm */
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
} ttm;
const struct drm_format_info *format);
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, u64 addr);
+int bochs_hw_load_edid(struct bochs_device *bochs);
/* bochs_mm.c */
int bochs_mm_init(struct bochs_device *bochs);
return container_of(bd, struct bochs_device, ttm.bdev);
}
- static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
- {
- return ttm_mem_global_init(ref->object);
- }
-
- static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
- {
- ttm_mem_global_release(ref->object);
- }
-
- static int bochs_ttm_global_init(struct bochs_device *bochs)
- {
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &bochs->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &bochs_ttm_mem_global_init;
- global_ref->release = &bochs_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- bochs->ttm.bo_global_ref.mem_glob =
- bochs->ttm.mem_global_ref.object;
- global_ref = &bochs->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&bochs->ttm.mem_global_ref);
- return r;
- }
-
- return 0;
- }
-
- static void bochs_ttm_global_release(struct bochs_device *bochs)
- {
- if (bochs->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
- drm_global_item_unref(&bochs->ttm.mem_global_ref);
- bochs->ttm.mem_global_ref.release = NULL;
- }
-
-
static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct bochs_bo *bo;
struct ttm_bo_device *bdev = &bochs->ttm.bdev;
int ret;
- ret = bochs_ttm_global_init(bochs);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&bochs->ttm.bdev,
- bochs->ttm.bo_global_ref.ref.object,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
return;
ttm_bo_device_release(&bochs->ttm.bdev);
- bochs_ttm_global_release(bochs);
bochs->ttm.initialized = false;
}
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
bo = gem_to_bochs_bo(obj);
*offset = bochs_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
* The initial ref-count of the object is 1. Use drm_dev_get() and
* drm_dev_put() to take and drop further ref-counts.
*
- * Note that for purely virtual devices @parent can be NULL.
- *
* Drivers that do not want to allocate their own device struct
* embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
* that do embed &struct drm_device it must be placed first in the overall
return -ENODEV;
}
+ BUG_ON(!parent);
+
kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
}
}
- /* Use the parent device name as DRM device unique identifier, but fall
- * back to the driver name for virtual devices like vgem. */
- ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
+ ret = drm_dev_set_unique(dev, dev_name(parent));
if (ret)
goto err_setunique;
drm_sysfs_destroy();
idr_destroy(&drm_minors_idr);
drm_connector_ida_destroy();
- drm_global_release();
}
static int __init drm_core_init(void)
{
int ret;
- drm_global_init();
drm_connector_ida_init();
idr_init(&drm_minors_idr);
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
- if (fence_completed(gpu, submit->out_fence->seqno))
+ if (dma_fence_is_signaled(submit->out_fence))
return;
/*
change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
- schedule_delayed_work(&sched_job->sched->work_tdr,
- sched_job->sched->timeout);
return;
}
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ drm_sched_job_cleanup(sched_job);
+
etnaviv_submit_put(submit);
}
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
if (submit->out_fence_id < 0) {
+ drm_sched_job_cleanup(&submit->sched_job);
ret = -ENOMEM;
goto out_unlock;
}
* Alon Levy
*/
-
#ifndef QXL_DRV_H
#define QXL_DRV_H
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
- unsigned pin_count;
+ unsigned int pin_count;
void *kptr;
int type;
/* Constant after initialization */
struct drm_gem_object gem_base;
- bool is_primary; /* is this now a primary surface */
- bool is_dumb;
+ unsigned int is_primary:1; /* is this now a primary surface */
+ unsigned int is_dumb:1;
struct qxl_bo *shadow;
- bool hw_surf_alloc;
+ unsigned int hw_surf_alloc:1;
struct qxl_surface surf;
uint32_t surface_id;
struct qxl_release *surf_create;
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- unsigned int mem_global_referenced:1;
struct ttm_bo_device bdev;
};
-
struct qxl_memslot {
uint8_t generation;
uint64_t start_phys_addr;
*/
struct qxl_debugfs {
struct drm_info_list *files;
- unsigned num_files;
+ unsigned int num_files;
};
int qxl_debugfs_add_files(struct qxl_device *rdev,
struct drm_info_list *files,
- unsigned nfiles);
+ unsigned int nfiles);
int qxl_debugfs_fence_init(struct qxl_device *rdev);
struct qxl_device;
struct qxl_ram_header *ram_header;
- bool primary_created;
+ unsigned int primary_created:1;
struct qxl_memslot *mem_slots;
uint8_t n_mem_slots;
atomic_t irq_received_display;
atomic_t irq_received_cursor;
atomic_t irq_received_io_cmd;
- unsigned irq_received_error;
+ unsigned int irq_received_error;
wait_queue_head_t display_event;
wait_queue_head_t cursor_event;
wait_queue_head_t io_cmd_event;
/* debugfs */
struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
+ unsigned int debugfs_count;
struct mutex update_area_mutex;
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
/* qxl io operations (qxl_cmd.c) */
void qxl_io_create_primary(struct qxl_device *qdev,
- unsigned offset,
+ unsigned int offset,
struct qxl_bo *bo);
void qxl_io_destroy_primary(struct qxl_device *qdev);
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct drm_framebuffer *fb,
struct qxl_bo *bo,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips, int inc);
+ unsigned int num_clips, int inc);
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
int qxl_debugfs_add_files(struct qxl_device *qdev,
struct drm_info_list *files,
- unsigned nfiles);
+ unsigned int nfiles);
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf);
return qdev;
}
- static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
- {
- return ttm_mem_global_init(ref->object);
- }
-
- static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
- {
- ttm_mem_global_release(ref->object);
- }
-
- static int qxl_ttm_global_init(struct qxl_device *qdev)
- {
- struct drm_global_reference *global_ref;
- int r;
-
- qdev->mman.mem_global_referenced = false;
- global_ref = &qdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &qxl_ttm_mem_global_init;
- global_ref->release = &qxl_ttm_mem_global_release;
-
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- qdev->mman.bo_global_ref.mem_glob =
- qdev->mman.mem_global_ref.object;
- global_ref = &qdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&qdev->mman.mem_global_ref);
- return r;
- }
-
- qdev->mman.mem_global_referenced = true;
- return 0;
- }
-
- static void qxl_ttm_global_fini(struct qxl_device *qdev)
- {
- if (qdev->mman.mem_global_referenced) {
- drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&qdev->mman.mem_global_ref);
- qdev->mman.mem_global_referenced = false;
- }
- }
-
static struct vm_operations_struct qxl_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
return -EINVAL;
}
return 0;
if (ret)
return ret;
-
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
qxl_move_null(bo, new_mem);
return 0;
int r;
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
- r = qxl_ttm_global_init(qdev);
- if (r)
- return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev,
- qdev->mman.bo_global_ref.ref.object,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
return r;
}
DRM_INFO("qxl: %uM of VRAM memory size\n",
- (unsigned)qdev->vram_size / (1024 * 1024));
+ (unsigned int)qdev->vram_size / (1024 * 1024));
DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
- ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+ ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
DRM_INFO("qxl: %uM of Surface memory size\n",
- (unsigned)qdev->surfaceram_size / (1024 * 1024));
+ (unsigned int)qdev->surfaceram_size / (1024 * 1024));
return 0;
}
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
- qxl_ttm_global_fini(qdev);
DRM_INFO("qxl: ttm finalized\n");
}
-
#define QXL_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS)
#if defined(CONFIG_DEBUG_FS)
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
- unsigned i;
+ unsigned int i;
for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
if (i == 0)
static void ttm_bo_global_kobj_release(struct kobject *kobj);
+ /**
+ * ttm_global_mutex - protecting the global BO state
+ */
+ DEFINE_MUTEX(ttm_global_mutex);
+ struct ttm_bo_global ttm_bo_glob = {
+ .use_count = 0
+ };
+
static struct attribute ttm_bo_count = {
.name = "bo_count",
.mode = S_IRUGO
if (fence) {
reservation_object_add_shared_fence(bo->resv, fence);
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret))
return ret;
bool has_erestartsys = false;
int i, ret;
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret))
return ret;
container_of(kobj, struct ttm_bo_global, kobj);
__free_page(glob->dummy_read_page);
- kfree(glob);
}
- void ttm_bo_global_release(struct drm_global_reference *ref)
+ static void ttm_bo_global_release(void)
{
- struct ttm_bo_global *glob = ref->object;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+
+ mutex_lock(&ttm_global_mutex);
+ if (--glob->use_count > 0)
+ goto out;
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
+ ttm_mem_global_release(&ttm_mem_glob);
+ out:
+ mutex_unlock(&ttm_global_mutex);
}
- EXPORT_SYMBOL(ttm_bo_global_release);
- int ttm_bo_global_init(struct drm_global_reference *ref)
+ static int ttm_bo_global_init(void)
{
- struct ttm_bo_global_ref *bo_ref =
- container_of(ref, struct ttm_bo_global_ref, ref);
- struct ttm_bo_global *glob = ref->object;
- int ret;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+ int ret = 0;
unsigned i;
- mutex_init(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
+ if (++glob->use_count > 1)
+ goto out;
+
+ ret = ttm_mem_global_init(&ttm_mem_glob);
+ if (ret)
+ goto out;
+
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = bo_ref->mem_glob;
+ glob->mem_glob = &ttm_mem_glob;
glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
- goto out_no_drp;
+ goto out;
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
- return ret;
- out_no_drp:
- kfree(glob);
+ out:
+ mutex_unlock(&ttm_global_mutex);
return ret;
}
- EXPORT_SYMBOL(ttm_bo_global_init);
-
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
}
}
- mutex_lock(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
- mutex_unlock(&glob->device_list_mutex);
+ mutex_unlock(&ttm_global_mutex);
cancel_delayed_work_sync(&bdev->wq);
drm_vma_offset_manager_destroy(&bdev->vma_manager);
+ if (!ret)
+ ttm_bo_global_release();
+
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
- int ret = -EINVAL;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+ int ret;
+
+ ret = ttm_bo_global_init();
+ if (ret)
+ return ret;
bdev->driver = driver;
bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
- mutex_lock(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
- mutex_unlock(&glob->device_list_mutex);
+ mutex_unlock(&ttm_global_mutex);
return 0;
out_no_sys:
+ ttm_bo_global_release();
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);
if (!entry->shared)
continue;
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (!ret)
continue;
}
}
if (!ret && entry->shared)
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret != 0)) {
if (ret == -EINTR)
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- struct ttm_bo_driver *driver;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
- driver = bdev->driver;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
};
struct virtio_gpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- bool mem_global_referenced;
struct ttm_bo_device bdev;
};
struct kmem_cache *vbufs;
bool vqs_ready;
- struct idr resource_idr;
- spinlock_t resource_idr_lock;
+ struct ida resource_ida;
wait_queue_head_t resp_wq;
/* current display info */
struct virtio_gpu_fence_driver fence_drv;
- struct idr ctx_id_idr;
- spinlock_t ctx_id_idr_lock;
+ struct ida ctx_id_ida;
bool has_virgl_3d;
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid);
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
+ struct virtio_gpu_object *bo,
uint32_t format,
uint32_t width,
uint32_t height);
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
struct virtio_gpu_fence **fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj);
struct virtio_gpu_fence **fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
struct virtio_gpu_resource_create_3d *rc_3d,
struct virtio_gpu_fence **fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
return vgdev;
}
- static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
- {
- return ttm_mem_global_init(ref->object);
- }
-
- static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
- {
- ttm_mem_global_release(ref->object);
- }
-
- static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
- {
- struct drm_global_reference *global_ref;
- int r;
-
- vgdev->mman.mem_global_referenced = false;
- global_ref = &vgdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &virtio_gpu_ttm_mem_global_init;
- global_ref->release = &virtio_gpu_ttm_mem_global_release;
-
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- vgdev->mman.bo_global_ref.mem_glob =
- vgdev->mman.mem_global_ref.object;
- global_ref = &vgdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&vgdev->mman.mem_global_ref);
- return r;
- }
-
- vgdev->mman.mem_global_referenced = true;
- return 0;
- }
-
- static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
- {
- if (vgdev->mman.mem_global_referenced) {
- drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&vgdev->mman.mem_global_ref);
- vgdev->mman.mem_global_referenced = false;
- }
- }
-
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
if (bo->hw_res_handle) {
- virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
- NULL);
+ virtio_gpu_object_attach(vgdev, bo, NULL);
}
}
}
{
int r;
- r = virtio_gpu_ttm_global_init(vgdev);
- if (r)
- return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&vgdev->mman.bdev,
- vgdev->mman.bo_global_ref.ref.object,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
err_mm_init:
ttm_bo_device_release(&vgdev->mman.bdev);
err_dev_init:
- virtio_gpu_ttm_global_fini(vgdev);
return r;
}
void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
{
ttm_bo_device_release(&vgdev->mman.bdev);
- virtio_gpu_ttm_global_fini(vgdev);
DRM_INFO("virtio_gpu: ttm finalized\n");
}
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
- #include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
#include <drm/drm_os_linux.h>
return true;
}
+#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
+#else
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
+#endif
+
#endif