#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
+ struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
- int r;
+ long r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.num_shared = 1;
+ tv.num_shared = 2;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%d)\n", r);
+ "we fail to reserve bo (%ld)\n", r);
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
- if (bo_va && --bo_va->ref_count == 0) {
- amdgpu_vm_bo_rmv(adev, bo_va);
-
- if (amdgpu_vm_ready(vm)) {
- struct dma_fence *fence = NULL;
+ if (!bo_va || --bo_va->ref_count)
+ goto out_unlock;
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (unlikely(r)) {
- dev_err(adev->dev, "failed to clear page "
- "tables on GEM object close (%d)\n", r);
- }
+ amdgpu_vm_bo_rmv(adev, bo_va);
+ if (!amdgpu_vm_ready(vm))
+ goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
- }
- }
+ fence = dma_resv_get_excl(bo->tbo.base.resv);
+ if (fence) {
+ amdgpu_bo_fence(bo, fence, true);
+ fence = NULL;
}
+
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ if (r || !fence)
+ goto out_unlock;
+
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+
+ out_unlock:
+ if (unlikely(r < 0))
+ dev_err(adev->dev, "failed to clear page "
+ "tables on GEM object close (%ld)\n", r);
ttm_eu_backoff_reservation(&ticket, &list);
}
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
- seq_printf(m, " imported from %p", dma_buf);
+ seq_printf(m, " imported from %p%s", dma_buf,
+ attachment->peer2peer ? " P2P" : "");
else if (dma_buf)
seq_printf(m, " exported as %p", dma_buf);
static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
(1 << 0), /* HMM_PFN_VALID */
(1 << 1), /* HMM_PFN_WRITE */
- 0 /* HMM_PFN_DEVICE_PRIVATE */
};
static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
- r = hmm_range_fault(range, 0);
+ r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
if (unlikely(r <= 0)) {
/*
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_NORMAL, &job);
if (r)
return r;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
+ direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
if (r)
return r;
/* for IB padding */
num_dw += 64;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
if (r)
return r;
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
- SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
};
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
}
/**
- * sdma_v4_0_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_page_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
/**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- DRM_INFO("use_doorbell being set to: [%s]\n",
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
/* doorbell size is 2 dwords, get DWORD offset */
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
fw_inst_const_size);
}
- memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
- fw_bss_data_size);
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
/* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
le32_to_cpu(hdr->inst_const_bytes);
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
®ion_info);
const union dc_tiling_info *tiling_info,
const uint64_t info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool force_disable_dcc)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
memset(&input, 0, sizeof(input));
memset(&output, 0, sizeof(output));
+ if (force_disable_dcc)
+ return 0;
+
if (!offset)
return 0;
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool force_disable_dcc)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info,
- tiling_flags, dcc, address);
+ tiling_flags, dcc, address,
+ force_disable_dcc);
if (ret)
return ret;
}
const struct drm_plane_state *plane_state,
const uint64_t tiling_flags,
struct dc_plane_info *plane_info,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool force_disable_dcc)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
- &plane_info->dcc, address);
+ &plane_info->dcc, address,
+ force_disable_dcc);
if (ret)
return ret;
struct dc_plane_info plane_info;
uint64_t tiling_flags;
int ret;
+ bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info);
if (ret)
if (ret)
return ret;
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&plane_info,
- &dc_plane_state->address);
+ &dc_plane_state->address,
+ force_disable_dcc);
if (ret)
return ret;
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
- if (stream->link->psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_feature_enabled) {
struct dc *core_dc = stream->link->ctx->dc;
if (dc_is_dmcu_initialized(core_dc)) {
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
- stream->psr_version = dmcu->dmcu_version.psr_version;
-
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
i2c_del_adapter(&aconnector->i2c->base);
kfree(aconnector->i2c);
}
+ kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
}
static int
amdgpu_dm_connector_late_register(struct drm_connector *connector)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ int r;
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
+ }
-#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
uint64_t tiling_flags;
uint32_t domain;
int r;
+ bool force_disable_dcc = false;
dm_plane_state_old = to_dm_plane_state(plane->state);
dm_plane_state_new = to_dm_plane_state(new_state);
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc,
- &plane_state->address);
+ &plane_state->address,
+ force_disable_dcc);
}
return 0;
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_dm_initialize_dp_connector(dm, aconnector);
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
out_free:
if (res) {
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
fill_dc_scaling_info(new_plane_state,
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count],
- &bundle->flip_addrs[planes_count].address);
+ &bundle->flip_addrs[planes_count].address,
+ false);
+
+ DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+ new_plane_state->plane->index,
+ bundle->plane_infos[planes_count].dcc.enable);
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
}
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_allow_active)
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
dc_commit_updates_for_stream(dm->dc,
dc_state);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->psr_version &&
- !acrtc_state->stream->link->psr_feature_enabled)
+ acrtc_state->stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active) {
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_settings.psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
/* i.e. reset mode */
if (dm_old_crtc_state->stream) {
- if (dm_old_crtc_state->stream->link->psr_allow_active)
+ if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
new_dm_plane_state->dc_state->gamma_correction;
bundle->surface_updates[num_plane].in_transfer_func =
new_dm_plane_state->dc_state->in_transfer_func;
+ bundle->surface_updates[num_plane].gamut_remap_matrix =
+ &new_dm_plane_state->dc_state->gamut_remap_matrix;
bundle->stream_update.gamut_remap =
&new_dm_crtc_state->stream->gamut_remap_matrix;
bundle->stream_update.output_csc_transform =
ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
plane_info,
- &flip_addr->address);
+ &flip_addr->address,
+ false);
if (ret)
goto cleanup;
return;
if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
dpcd_data, sizeof(dpcd_data))) {
- link->psr_feature_enabled = dpcd_data[0] ? true:false;
- DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+
+ if (dpcd_data[0] == 0) {
+ link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED;
+ link->psr_settings.psr_feature_enabled = false;
+ } else {
+ link->psr_settings.psr_version = PSR_VERSION_1;
+ link->psr_settings.psr_feature_enabled = true;
+ }
+
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
}
}
link = stream->link;
dc = link->ctx->dc;
- psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+ psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
if (psr_config.psr_version > 0) {
psr_config.psr_exit_link_training_required = 0x1;
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
}
- DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
return ret;
}
#include "amdgpu_dm_debugfs.h"
#endif
-
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dc/dcn20/dcn20_resource.h"
#endif
- /* #define TRACE_DPCD */
-
- #ifdef TRACE_DPCD
- #define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
-
- static inline char *side_band_msg_type_to_str(uint32_t address)
- {
- static char str[10] = {0};
-
- if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
- strcpy(str, "DOWN_REQ");
- else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
- strcpy(str, "UP_REP");
- else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
- strcpy(str, "DOWN_REP");
- else
- strcpy(str, "UP_REQ");
-
- return str;
- }
-
- static void log_dpcd(uint8_t type,
- uint32_t address,
- uint8_t *data,
- uint32_t size,
- bool res)
- {
- DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
- (type == DP_AUX_NATIVE_READ) ||
- (type == DP_AUX_I2C_READ) ?
- "Read" : "Write",
- address,
- SIDE_BAND_MSG(address) ?
- side_band_msg_type_to_str(address) : "Nop",
- res ? "OK" : "Fail");
-
- if (res) {
- print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
- }
- }
- #endif
-
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
- struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+ struct amdgpu_dm_connector *aconnector =
+ to_amdgpu_dm_connector(connector);
+ struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
+ if (aconnector->dc_sink) {
+ dc_link_remove_remote_sink(aconnector->dc_link,
+ aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ kfree(aconnector->edid);
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
- drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
- kfree(amdgpu_dm_connector);
+ drm_dp_mst_put_port_malloc(aconnector->port);
+ kfree(aconnector);
}
static int
to_amdgpu_dm_connector(connector);
int r;
- amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
- r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
- if (r)
+ r = drm_dp_mst_connector_late_register(connector,
+ amdgpu_dm_connector->port);
+ if (r < 0)
return r;
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
- return r;
+ return 0;
}
static void
*/
amdgpu_dm_connector_funcs_reset(connector);
- DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
drm_dp_mst_get_port_malloc(port);
- DRM_DEBUG_KMS(":%d\n", connector->base.id);
-
return connector;
}
-static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- if (aconnector->dc_sink) {
- amdgpu_dm_update_freesync_caps(connector, NULL);
- dc_link_remove_remote_sink(aconnector->dc_link,
- aconnector->dc_sink);
- dc_sink_release(aconnector->dc_sink);
- aconnector->dc_sink = NULL;
- aconnector->dc_link->cur_link_settings.lane_count = 0;
- }
-
- drm_connector_unregister(connector);
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
- .destroy_connector = dm_dp_destroy_mst_connector,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector)
+ struct amdgpu_dm_connector *aconnector,
+ int link_index)
{
- aconnector->dm_dp_aux.aux.name = "dmdc";
+ aconnector->dm_dp_aux.aux.name =
+ kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
+ link_index);
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
- /* Set Default link settings */
- struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
-
- /* Higher link settings based on feature supported */
- if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH2;
-
- if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
+ struct dc_link_settings max_link_cap = {0};
- if (link->link_enc->funcs->get_max_link_cap)
- link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+ /* get max link encoder capability */
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_feature_enabled)
+ if (!link->psr_settings.psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
/* get phy test pattern and pattern parameters from DP receiver */
core_link_read_dpcd(
link,
- DP_TEST_PHY_PATTERN,
+ DP_PHY_TEST_PATTERN,
&dpcd_test_pattern.raw,
sizeof(dpcd_test_pattern));
core_link_read_dpcd(
sizeof(hpd_irq_dpcd_data),
"Status: ");
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.blank_stream(pipe_ctx);
+ }
+
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_reallocate_mst_payload(link);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
void dpcd_set_source_specific_data(struct dc_link *link)
{
const uint32_t post_oui_delay = 30; // 30ms
+ uint8_t dspc = 0;
+ enum dc_status ret = DC_ERROR_UNEXPECTED;
+
+ ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
+ sizeof(dspc));
+
+ if (ret != DC_OK) {
+ DC_LOG_ERROR("Error in DP aux read transaction,"
+ " not writing source specific data\n");
+ return;
+ }
+
+ /* Return if OUI unsupported */
+ if (!(dspc & DP_OUI_SUPPORT))
+ return;
if (!link->dc->vendor_signature.is_valid) {
struct dpcd_amd_signature amd_signature;
const struct smc_firmware_header_v1_0 *hdr;
uint32_t addr_start = MP1_SRAM;
uint32_t i;
+ uint32_t smc_fw_size;
uint32_t mp1_fw_flags;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
src = (const uint32_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
- for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
WREG32_PCIE(addr_start, src[i]);
addr_start += 4;
}
switch (smu->adev->asic_type) {
case CHIP_VEGA20:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
break;
case CHIP_ARCTURUS:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
break;
case CHIP_NAVI10:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
case CHIP_NAVI12:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
break;
case CHIP_NAVI14:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
default:
pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
* Considering above, we just leave user a warning message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_if_version) {
+ if (if_version != smu->smc_driver_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_if_version, if_version,
+ smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
pr_warn("SMU driver if version not matched\n");
}
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (!smu_power->power_context || smu_power->power_context_size == 0)
return -EINVAL;
{
struct smu_table_context *table_context = &smu->smu_table;
- if (!smu->pm_enabled)
- return 0;
if (!table_context)
return -EINVAL;
{
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret;
}
{
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
int ret = 0;
int clk_id;
- if (!smu->pm_enabled)
- return ret;
-
if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
(smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
return 0;
struct smu_temperature_range range;
struct amdgpu_device *adev = smu->adev;
- if (!smu->pm_enabled)
- return ret;
-
memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
ret = smu_get_thermal_temperature_range(smu, &range);
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
- if (!smu->pm_enabled)
- return -EINVAL;
-
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
switch (clk_type) {
if (ret)
goto out;
+ if (ras && ras->supported) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ if (ret)
+ goto out;
+ }
+
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);