}
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
- adev_to_drm(adev), &gpu_resources);
+ &gpu_resources);
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
goto err;
}
- ret = amdgpu_job_alloc(adev, 1, &job, NULL);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
if (ret)
goto err;
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
+ /* Temporary workaround to fix issues observed in some
+ * compute applications when GFXOFF is enabled on GFX11.
+ */
+ if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) {
+ pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
+ amdgpu_gfx_off_ctrl(adev, idle);
+ }
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
!idle);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
{
- struct ras_err_data err_data = {0, 0, 0, NULL};
-
- amdgpu_umc_poison_handler(adev, &err_data, reset);
+ amdgpu_umc_poison_handler(adev, reset);
}
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
#include <linux/pci-p2pdma.h>
#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
* @pdev: pci dev pointer
* @state: vga_switcheroo state
*
- * Callback for the switcheroo driver. Suspends or resumes the
+ * Callback for the switcheroo driver. Suspends or resumes
* the asics before or after it is powered up using ACPI methods.
*/
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */
- if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
+ if (amdgpu_mcbp) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_CSA_SIZE);
return r;
}
adev->ip_blocks[i].status.hw = true;
+
+ if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
+ * amdgpu_device_resume() after IP resume.
+ */
+ amdgpu_gfx_off_ctrl(adev, false);
+ DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
+ }
+
}
return 0;
* at suspend time.
*
*/
-static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
+static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
+ int ret;
+
/* No need to evict vram on APUs for suspend to ram or s2idle */
if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
- return;
+ return 0;
- if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
+ ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
+ if (ret)
DRM_WARN("evicting device resources failed\n");
-
+ return ret;
}
/*
if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
- amdgpu_device_evict_resources(adev);
+ r = amdgpu_device_evict_resources(adev);
+ if (r)
+ return r;
amdgpu_fence_driver_hw_fini(adev);
}
/* Make sure IB tests flushed */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_irq_gpu_reset_resume_helper(adev);
flush_delayed_work(&adev->delayed_init_work);
+ if (adev->in_s0ix) {
+ /* re-enable gfxoff after IP resume. This re-enables gfxoff after
+ * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
+ */
+ amdgpu_gfx_off_ctrl(adev, true);
+ DRM_DEBUG("will enable gfxoff for the mission mode\n");
+ }
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
}
- if (adev->enable_mes)
+ if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
amdgpu_mes_self_test(tmp_adev);
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
#include <drm/amdgpu_drm.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
+ #include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem.h>
#include <drm/drm_vblank.h>
#include <drm/drm_managed.h>
pm_runtime_forbid(dev->dev);
}
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ !amdgpu_sriov_vf(adev)) {
bool need_to_reset_gpu = false;
if (adev->gmc.xgmi.num_physical_nodes > 1) {
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
+ #include <drm/drm_fb_helper.h>
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "atom.h"
fw_info->feature = adev->psp.cap_feature_version;
break;
case AMDGPU_INFO_FW_MES_KIQ:
- fw_info->ver = adev->mes.ucode_fw_version[0];
- fw_info->feature = 0;
+ fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
+ fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK)
+ >> AMDGPU_MES_FEAT_VERSION_SHIFT;
break;
case AMDGPU_INFO_FW_MES:
- fw_info->ver = adev->mes.ucode_fw_version[1];
+ fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK)
+ >> AMDGPU_MES_FEAT_VERSION_SHIFT;
+ break;
+ case AMDGPU_INFO_FW_IMU:
+ fw_info->ver = adev->gfx.imu_fw_version;
fw_info->feature = 0;
break;
default:
dev_info->ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
- if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
+ if (amdgpu_mcbp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
goto error_vm;
}
- if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
+ if (amdgpu_mcbp) {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
amdgpu_vce_free_handles(adev, file_priv);
- if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
+ if (amdgpu_mcbp) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
amdgpu_vm_bo_del(adev, fpriv->csa_va);
fw_info.feature, fw_info.ver);
}
+ /* IMU */
+ query_fw.fw_type = AMDGPU_INFO_FW_IMU;
+ query_fw.index = 0;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
/* PSP SOS */
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
struct amdgpu_device *adev = ring->adev;
unsigned offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
- struct dma_fence *fence;
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
}
}
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
- dma_fence_put(fence);
-
- return r;
-
- error_free:
- amdgpu_job_free(job);
- return r;
+ dma_fence_put(amdgpu_job_submit(job));
+ return 0;
}
/**
}
static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
- unsigned long offset, void *buf, int len, int write)
+ unsigned long offset, void *buf,
+ int len, int write)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
memcpy(adev->mman.sdma_access_ptr, buf, len);
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
goto out;
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
- src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
+ src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
+ src_mm.start;
dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
if (write)
swap(src_addr, dst_addr);
- amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
+ PAGE_SIZE, false);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r) {
- amdgpu_job_free(job);
- goto out;
- }
+ fence = amdgpu_job_submit(job);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT;
AMDGPU_IB_POOL_DELAYED;
int r;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, pool, job);
if (r)
return r;
adev->gart.bo);
(*job)->vm_needs_flush = true;
}
- if (resv) {
- r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
- AMDGPU_SYNC_ALWAYS,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("sync failed (%d).\n", r);
- amdgpu_job_free(*job);
- return r;
- }
- }
- return 0;
+ if (!resv)
+ return 0;
+
+ return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
+ DMA_RESV_USAGE_BOOKKEEP);
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
if (direct_submit)
r = amdgpu_job_submit_direct(job, ring, fence);
else
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ *fence = amdgpu_job_submit(job);
if (r)
goto error_free;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
-
+ *fence = amdgpu_job_submit(job);
return 0;
-
- error_free:
- amdgpu_job_free(job);
- return r;
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (p->mapping != adev->mman.bdev.dev_mapping)
return -EPERM;
- ptr = kmap(p);
+ ptr = kmap_local_page(p);
r = copy_to_user(buf, ptr + off, bytes);
- kunmap(p);
+ kunmap_local(ptr);
if (r)
return -EFAULT;
if (p->mapping != adev->mman.bdev.dev_mapping)
return -EPERM;
- ptr = kmap(p);
+ ptr = kmap_local_page(p);
r = copy_from_user(ptr + off, buf, bytes);
- kunmap(p);
+ kunmap_local(ptr);
if (r)
return -EFAULT;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
return r;
cpu_addr = &job->ibs[0].ptr[num_dw];
amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
+ fence = amdgpu_job_submit(job);
dma_fence_put(fence);
return r;
-
- error_free:
- amdgpu_job_free(job);
- return r;
}
/**
for (addr = start; addr < end;) {
unsigned long next;
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start)
+ vma = vma_lookup(mm, addr);
+ if (!vma)
break;
next = min(vma->vm_end, end);
for (addr = start; addr < end;) {
unsigned long next;
- vma = find_vma(mm, addr);
- if (!vma || addr < vma->vm_start) {
+ vma = vma_lookup(mm, addr);
+ if (!vma) {
pr_debug("failed to find vma for prange %p\n", prange);
r = -EFAULT;
break;
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process:
+ pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
kfd_unref_process(p);
out_mmput:
mmput(mm);
-
- pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
-
return r ? VM_FAULT_SIGBUS : 0;
}
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
- #include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
- spin_lock_init(&adev->dm.vblank_lock);
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
+ /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
+ adev->dm.dc->debug.ignore_cable_id = true;
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
.get_format_info = amd_get_format_info,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
{
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
- uint32_t max_dsc_target_bpp_limit_override = 0;
struct dc *dc = sink->ctx->dc;
uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
uint32_t dsc_max_supported_bw_in_kbps;
+ uint32_t max_dsc_target_bpp_limit_override =
+ drm_connector->display_info.max_dsc_bpp;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
- if (stream->link && stream->link->local_sink)
- max_dsc_target_bpp_limit_override =
- stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
+ enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
#endif
if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
stream->use_vsc_sdp_for_colorimetry = true;
}
- mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
+ if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
create_eml_sink(aconnector);
}
+static enum dc_status dm_validate_stream_and_context(struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ enum dc_status dc_result = DC_ERROR_UNEXPECTED;
+ struct dc_plane_state *dc_plane_state = NULL;
+ struct dc_state *dc_state = NULL;
+
+ if (!stream)
+ goto cleanup;
+
+ dc_plane_state = dc_create_plane_state(dc);
+ if (!dc_plane_state)
+ goto cleanup;
+
+ dc_state = dc_create_state(dc);
+ if (!dc_state)
+ goto cleanup;
+
+ /* populate stream to plane */
+ dc_plane_state->src_rect.height = stream->src.height;
+ dc_plane_state->src_rect.width = stream->src.width;
+ dc_plane_state->dst_rect.height = stream->src.height;
+ dc_plane_state->dst_rect.width = stream->src.width;
+ dc_plane_state->clip_rect.height = stream->src.height;
+ dc_plane_state->clip_rect.width = stream->src.width;
+ dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
+ dc_plane_state->plane_size.surface_size.height = stream->src.height;
+ dc_plane_state->plane_size.surface_size.width = stream->src.width;
+ dc_plane_state->plane_size.chroma_size.height = stream->src.height;
+ dc_plane_state->plane_size.chroma_size.width = stream->src.width;
+ dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
+ dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
+ dc_plane_state->rotation = ROTATION_ANGLE_0;
+ dc_plane_state->is_tiling_rotated = false;
+ dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
+
+ dc_result = dc_validate_stream(dc, stream);
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_plane(dc, dc_plane_state);
+
+ if (dc_result == DC_OK)
+ dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
+
+ if (dc_result == DC_OK && !dc_add_plane_to_context(
+ dc,
+ stream,
+ dc_plane_state,
+ dc_state))
+ dc_result = DC_FAIL_ATTACH_SURFACES;
+
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_global_state(dc, dc_state, true);
+
+cleanup:
+ if (dc_state)
+ dc_release_state(dc_state);
+
+ if (dc_plane_state)
+ dc_plane_state_release(dc_plane_state);
+
+ return dc_result;
+}
+
struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+ if (dc_result == DC_OK)
+ dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
+
if (dc_result != DC_OK) {
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
drm_mode->hdisplay,
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
!acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_enable(acrtc_state->stream);
} else {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- acrtc->dm_irq_params.crc_window.update_win = true;
- acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
+ acrtc->dm_irq_params.window_param.update_win = true;
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
crc_rd_wrk->crtc = crtc;
spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
++#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
-#include <nvif/cla06f.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_bios.h"
#include "nouveau_ioctl.h"
#include "nouveau_abi16.h"
-#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
#include "nouveau_debugfs.h"
#include "nouveau_usif.h"
nouveau_accel_ce_init(struct nouveau_drm *drm)
{
struct nvif_device *device = &drm->client.device;
+ u64 runm;
int ret = 0;
/* Allocate channel that has access to a (preferably async) copy
* engine, to use for TTM buffer moves.
*/
- if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
- ret = nouveau_channel_new(drm, device,
- nvif_fifo_runlist_ce(device), 0,
- true, &drm->cechan);
- } else
- if (device->info.chipset >= 0xa3 &&
- device->info.chipset != 0xaa &&
- device->info.chipset != 0xac) {
- /* Prior to Kepler, there's only a single runlist, so all
- * engines can be accessed from any channel.
- *
- * We still want to use a separate channel though.
- */
- ret = nouveau_channel_new(drm, device, NvDmaFB, NvDmaTT, false,
- &drm->cechan);
+ runm = nvif_fifo_runlist_ce(device);
+ if (!runm) {
+ NV_DEBUG(drm, "no ce runlist\n");
+ return;
}
+ ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
nouveau_accel_gr_init(struct nouveau_drm *drm)
{
struct nvif_device *device = &drm->client.device;
- u32 arg0, arg1;
+ u64 runm;
int ret;
- if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE)
- return;
-
/* Allocate channel that has access to the graphics engine. */
- if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
- arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
- arg1 = 1;
- } else {
- arg0 = NvDmaFB;
- arg1 = NvDmaTT;
+ runm = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR);
+ if (!runm) {
+ NV_DEBUG(drm, "no gr runlist\n");
+ return;
}
- ret = nouveau_channel_new(drm, device, arg0, arg1, false,
- &drm->channel);
+ ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_gr_fini(drm);
nouveau_accel_gr_fini(drm);
if (drm->fence)
nouveau_fence(drm)->dtor(drm);
+ nouveau_channels_fini(drm);
}
static void
case PASCAL_CHANNEL_GPFIFO_A:
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
+ case AMPERE_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_B:
ret = nvc0_fence_create(drm);
break;
nouveau_hwmon_init(dev);
nouveau_svm_init(drm);
nouveau_dmem_init(drm);
- nouveau_fbcon_init(dev);
nouveau_led_init(dev);
if (nouveau_pmops_runtime()) {
}
nouveau_led_fini(dev);
- nouveau_fbcon_fini(dev);
nouveau_dmem_fini(drm);
nouveau_svm_fini(drm);
nouveau_hwmon_fini(dev);
if (ret)
goto fail_drm_dev_init;
+ if (nouveau_drm(drm_dev)->client.device.info.ram_size <= 32 * 1024 * 1024)
+ drm_fbdev_generic_setup(drm_dev, 8);
+ else
+ drm_fbdev_generic_setup(drm_dev, 32);
+
quirk_broken_nv_runpm(pdev);
return 0;
nouveau_led_suspend(dev);
if (dev->mode_config.num_crtc) {
- NV_DEBUG(drm, "suspending console...\n");
- nouveau_fbcon_set_suspend(dev, 1);
NV_DEBUG(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev, runtime);
if (ret)
if (dev->mode_config.num_crtc) {
NV_DEBUG(drm, "resuming display...\n");
nouveau_display_resume(dev, runtime);
- NV_DEBUG(drm, "resuming console...\n");
- nouveau_fbcon_set_suspend(dev, 0);
}
nouveau_led_resume(dev);
DRM_DEBUG_DRIVER("... tv_disable : %d\n", nouveau_tv_disable);
DRM_DEBUG_DRIVER("... ignorelid : %d\n", nouveau_ignorelid);
DRM_DEBUG_DRIVER("... duallink : %d\n", nouveau_duallink);
- DRM_DEBUG_DRIVER("... nofbaccel : %d\n", nouveau_nofbaccel);
DRM_DEBUG_DRIVER("... config : %s\n", nouveau_config);
DRM_DEBUG_DRIVER("... debug : %s\n", nouveau_debug);
DRM_DEBUG_DRIVER("... noaccel : %d\n", nouveau_noaccel);