S: Maintained
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/amd/
F: include/linux/amd-iommu.h
- R: Stanislav Fomichev <sdf@google.com>
+ R: Stanislav Fomichev <sdf@fomichev.me>
BPF [SECURITY & LSM] (Security Audit and Enforcement using BPF)
S: Maintained
F: Documentation/bpf/prog_lsm.rst
F: include/linux/bpf_lsm.h
F: kernel/bpf/bpf_lsm.c
+ F: kernel/trace/bpf_trace.c
F: security/bpf/
BPF [SELFTESTS] (Test Runners & Infrastructure)
CLANG CONTROL FLOW INTEGRITY SUPPORT
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
F: Documentation/devicetree/bindings/display/panel/ilitek,ili9805.yaml
F: drivers/gpu/drm/panel/panel-ilitek-ili9805.c
+DRM DRIVER FOR ILITEK ILI9806E PANELS
+S: Maintained
+F: drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+
DRM DRIVER FOR JADARD JD9365DA-H3 MIPI-DSI LCD PANELS
S: Maintained
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
F: drivers/gpu/drm/panel/panel-lg-sw43408.c
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/vkms.rst
+F: drivers/gpu/drm/ci/xfails/vkms*
F: drivers/gpu/drm/vkms/
DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
DRM DRIVERS FOR VC4
S: Supported
-T: git git://github.com/anholt/linux
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml
F: drivers/gpu/drm/vc4/
DRM PANEL DRIVERS
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
EXEC & BINFMT API, ELF
- R: Kees Cook <keescook@chromium.org>
+ R: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
F: drivers/net/ethernet/nvidia/*
FORTIFY_SOURCE
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: include/linux/platform_data/gsc_hwmon.h
GCC PLUGINS
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: drivers/input/touchscreen/resistive-adc-touch.c
GENERIC STRING LIBRARY
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
F: block/partitions/efi.*
HABANALABS PCI DRIVER
S: Supported
C: irc://irc.oftc.net/dri-devel
-T: git https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux.git
+T: git https://github.com/HabanaAI/drivers.accel.habanalabs.kernel.git
F: Documentation/ABI/testing/debugfs-driver-habanalabs
F: Documentation/ABI/testing/sysfs-driver-habanalabs
F: drivers/accel/habanalabs/
F: drivers/gpu/drm/i915/display/
F: drivers/gpu/drm/xe/display/
F: drivers/gpu/drm/xe/compat-i915-headers
+F: include/drm/intel/
INTEL DRM I915 DRIVER (Meteor Lake, DG2 and older excluding Poulsbo, Moorestown and derivative)
Q: http://patchwork.freedesktop.org/project/intel-gfx/
B: https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
C: irc://irc.oftc.net/intel-gfx
-T: git git://anongit.freedesktop.org/drm-intel
+T: git https://gitlab.freedesktop.org/drm/i915/kernel.git
F: Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
F: Documentation/gpu/i915.rst
F: drivers/gpu/drm/ci/xfails/i915*
F: drivers/gpu/drm/i915/
-F: include/drm/i915*
+F: include/drm/intel/
F: include/uapi/drm/i915_drm.h
INTEL DRM XE DRIVER (Lunar Lake and newer)
S: Supported
W: https://drm.pages.freedesktop.org/intel-docs/
F: Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
F: Documentation/gpu/xe/
F: drivers/gpu/drm/xe/
-F: include/drm/xe*
+F: include/drm/intel/
F: include/uapi/drm/xe_drm.h
INTEL ETHERNET DRIVERS
S: Supported
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/intel/
INTEL IPU3 CSI-2 CIO2 DRIVER
S: Maintained
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c
S: Maintained
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: Documentation/devicetree/bindings/iommu/
F: Documentation/userspace-api/iommu.rst
F: drivers/iommu/
F: usr/
KERNEL HARDENING (not covered by other areas)
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
KVM PARAVIRT (KVM/paravirt)
S: Supported
LEAKING_ADDRESSES
- R: Kees Cook <keescook@chromium.org>
+ R: Kees Cook <kees@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: arch/powerpc/platforms/83xx/
LINUX KERNEL DUMP TEST MODULE (LKDTM)
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Maintained
F: drivers/misc/lkdtm/*
F: tools/testing/selftests/lkdtm/*
F: drivers/media/usb/dvb-usb-v2/lmedm04*
LOADPIN SECURITY MODULE
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/LoadPin.rst
F: tools/testing/selftests/nci/
NFS, SUNRPC, AND LOCKD CLIENTS
S: Maintained
PENSANDO ETHERNET DRIVERS
S: Supported
F: Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
PROC SYSCTL
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
F: drivers/net/pse-pd/
PSTORE FILESYSTEM
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
F: Documentation/devicetree/bindings/sound/qcom,*
F: drivers/media/cec/platform/seco/seco-cec.h
SECURE COMPUTING
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
S: Maintained
- W: https://gitlab.com/jarkkojs/linux-tpmdd-test
+ W: https://codeberg.org/jarkko/linux-tpmdd-test
Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
F: Documentation/devicetree/bindings/tpm/
F: include/uapi/linux/ublk_cmd.h
UBSAN
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Maintained
W: http://www.linux-mm.org
F: include/linux/yam.h
YAMA SECURITY MODULE
- M: Kees Cook <keescook@chromium.org>
+ M: Kees Cook <kees@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/admin-guide/LSM/Yama.rst
depends on DRM && KUNIT && MMU
select DRM_BUDDY
select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
- depends on DRM && !FRAMEBUFFER_CONSOLE
- select DRM_KMS_HELPER
+ depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE)
select FONT_SUPPORT
help
Enable a drm panic handler, which will display a user-friendly message
This is unsafe and should not be enabled on a production build.
If in doubt, say "N".
+config DRM_PANIC_SCREEN
+ string "Panic screen formatter"
+ default "user"
+ depends on DRM_PANIC
+ help
+ This option enable to choose what will be displayed when a kernel
+ panic occurs. You can choose between "user", a short message telling
+ the user to reboot the system, or "kmsg" which will display the last
+ lines of kmsg.
+ This can also be overridden by drm.panic_screen=xxxx kernel parameter
+ or by writing to /sys/module/drm/parameters/panic_screen sysfs entry
+ Default is "user"
+
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
default n
depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
select DRM_TTM
+ select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
config DRM_WERROR
bool "Compile the drm subsystem with warnings as errors"
depends on DRM && EXPERT
+ depends on !WERROR
default n
help
A kernel build should not cause any compiler warnings, and this
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
GC_HWIP, false,
&rlcg_flag)) {
- ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
+ ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, GET_INST(GC, xcc_id));
} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
GC_HWIP, true,
&rlcg_flag)) {
- amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
+ amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, GET_INST(GC, xcc_id));
} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_asic_pre_asic_init(adev);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[40];
int err;
const struct gpu_info_firmware_header_v1_0 *hdr;
break;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
+ "amdgpu/%s_gpu_info.bin", chip_name);
if (err) {
dev_err(adev->dev,
- "Failed to get gpu_info firmware \"%s\"\n",
- fw_name);
+ "Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
+ chip_name);
goto out;
}
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
+ mutex_init(&adev->virt.rlcg_reg_lock);
hash_init(adev->mn_hash);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
shadow = vmbo->shadow;
/* No need to recover an evicted BO */
- if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ if (!shadow->tbo.resource ||
+ shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
continue;
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu_device pointer
- * @from_hypervisor: request from hypervisor
+ * @reset_context: amdgpu reset context pointer
*
* do VF FLR and reinitialize Asic
* return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
- bool from_hypervisor)
+ struct amdgpu_reset_context *reset_context)
{
int r;
struct amdgpu_hive_info *hive = NULL;
- int retry_limit = 0;
-
-retry:
- amdgpu_amdkfd_pre_reset(adev);
- amdgpu_device_stop_pending_resets(adev);
-
- if (from_hypervisor)
+ if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
+ amdgpu_virt_ready_to_reset(adev);
+ amdgpu_virt_wait_reset(adev);
+ clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
r = amdgpu_virt_request_full_gpu(adev, true);
- else
+ } else {
r = amdgpu_virt_reset_gpu(adev);
+ }
if (r)
return r;
+
amdgpu_ras_set_fed(adev, false);
amdgpu_irq_gpu_reset_resume_helper(adev);
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
- goto error;
+ return r;
amdgpu_virt_init_data_exchange(adev);
/* now we are okay to resume SMC/CP/SDMA */
r = amdgpu_device_ip_reinit_late_sriov(adev);
if (r)
- goto error;
+ return r;
hive = amdgpu_get_xgmi_hive(adev);
/* Update PSP FW topology after reset */
if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
r = amdgpu_xgmi_update_topology(hive, adev);
-
if (hive)
amdgpu_put_xgmi_hive(hive);
+ if (r)
+ return r;
- if (!r) {
- r = amdgpu_ib_ring_tests(adev);
-
- amdgpu_amdkfd_post_reset(adev);
- }
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ return r;
-error:
- if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
amdgpu_inc_vram_lost(adev);
r = amdgpu_device_recover_vram(adev);
}
- amdgpu_virt_release_full_gpu(adev, true);
+ if (r)
+ return r;
- if (AMDGPU_RETRY_SRIOV_RESET(r)) {
- if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
- retry_limit++;
- goto retry;
- } else
- DRM_ERROR("GPU reset retry is beyond the retry limit\n");
- }
+ /* need to be called during full access so we can't do it later like
+ * bare-metal does.
+ */
+ amdgpu_amdkfd_post_reset(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
- return r;
+ /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
+ amdgpu_ras_resume(adev);
+ return 0;
}
/**
dev_info(adev->dev, "GPU mode1 reset\n");
+ /* Cache the state before bus master disable. The saved config space
+ * values are used in other cases like restore after mode-2 reset.
+ */
+ amdgpu_device_cache_pci_state(adev->pdev);
+
/* disable BM */
pci_clear_master(adev->pdev);
- amdgpu_device_cache_pci_state(adev->pdev);
-
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
ret = amdgpu_dpm_mode1_reset(adev);
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
amdgpu_reset_reg_dumps(tmp_adev);
+ dev_info(tmp_adev->dev, "Dumping IP State\n");
/* Trigger ip dump before we reset the asic */
for (i = 0; i < tmp_adev->num_ip_blocks; i++)
if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
tmp_adev->ip_blocks[i].version->funcs
->dump_ip_state((void *)tmp_adev);
+ dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
}
reset_context->reset_device_list = device_list_handle;
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
/*
* Special case: RAS triggered and full reset isn't supported
* to put adev in the 1st position.
*/
INIT_LIST_HEAD(&device_list);
- if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
list_add_tail(&tmp_adev->reset_list, &device_list);
if (adev->shutdown)
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
- if (!amdgpu_sriov_vf(tmp_adev))
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
/*
* Mark these ASICs to be reseted as untracked first
r, adev_to_drm(tmp_adev)->unique);
tmp_adev->asic_reset_res = r;
}
-
- if (!amdgpu_sriov_vf(tmp_adev))
- /*
- * Drop all pending non scheduler resets. Scheduler resets
- * were already dropped during drm_sched_stop
- */
- amdgpu_device_stop_pending_resets(tmp_adev);
}
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ r = amdgpu_device_reset_sriov(adev, reset_context);
+ if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
+ amdgpu_virt_release_full_gpu(adev, true);
+ goto retry;
+ }
if (r)
adev->asic_reset_res = r;
-
- /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
- amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
if (r && r == -EAGAIN)
goto retry;
}
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /*
+ * Drop any pending non scheduler resets queued before reset is done.
+ * Any reset scheduled after this point would be valid. Scheduler resets
+ * were already dropped during drm_sched_stop and no new ones can come
+ * in before drm_sched_start.
+ */
+ amdgpu_device_stop_pending_resets(tmp_adev);
+ }
+
skip_hw_reset:
/* Post ASIC reset for all devs .*/
adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
- if (amdgpu_passthrough(adev) &&
+ if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
adev->nbio.funcs->clear_doorbell_interrupt)
adev->nbio.funcs->clear_doorbell_interrupt(adev);
struct amdgpu_reset_context reset_context;
u32 memsize;
struct list_head device_list;
- struct amdgpu_hive_info *hive;
- int hive_ras_recovery = 0;
- struct amdgpu_ras *ras;
/* PCI error slot reset should be skipped During RAS recovery */
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive) {
- hive_ras_recovery = atomic_read(&hive->ras_recovery);
- amdgpu_put_xgmi_hive(hive);
- }
- ras = amdgpu_ras_get_context(adev);
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
- ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
+ amdgpu_ras_in_recovery(adev))
return PCI_ERS_RESULT_RECOVERED;
DRM_INFO("PCI error: slot reset callback!!\n");
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+/**
+ * amdgpu_device_get_gang - return a reference to the current gang
+ * @adev: amdgpu_device pointer
+ *
+ * Returns: A new reference to the current gang leader.
+ */
+struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
+{
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&adev->gang_submit);
+ rcu_read_unlock();
+ return fence;
+}
+
/**
* amdgpu_device_switch_gang - switch to a new gang
* @adev: amdgpu_device pointer
do {
dma_fence_put(old);
- rcu_read_lock();
- old = dma_fence_get_rcu_safe(&adev->gang_submit);
- rcu_read_unlock();
-
+ old = amdgpu_device_get_gang(adev);
if (old == gang)
break;
memset(&bp, 0, sizeof(bp));
*obj = NULL;
+ flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bp.size = size;
bp.byte_align = alignment;
return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
- abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
+ !amdgpu_vm_is_bo_always_valid(vm, abo))
return -EPERM;
r = amdgpu_bo_reserve(abo, false);
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
AMDGPU_GEM_CREATE_ENCRYPTED |
+ AMDGPU_GEM_CREATE_GFX12_DCC |
AMDGPU_GEM_CREATE_DISCARDABLE))
return -EINVAL;
if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
return -EINVAL;
+ if ((flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
+ ((amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) ||
+ !(args->in.domains & AMDGPU_GEM_DOMAIN_VRAM)))
+ return -EINVAL;
+
if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
return -EINVAL;
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
+ pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
if (flags & AMDGPU_VM_PAGE_NOALLOC)
pte_flag |= AMDGPU_PTE_NOALLOC;
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
-
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
ttm_bo_pin(&bo->tbo);
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
&adev->visible_pin_size);
- } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
- unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
- domain = amdgpu_mem_type_to_domain(res->mem_type);
- switch (domain) {
- case AMDGPU_GEM_DOMAIN_VRAM:
+ switch (res->mem_type) {
+ case TTM_PL_VRAM:
stats->vram += size;
- if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ if (amdgpu_res_cpu_visible(adev, res))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
break;
- case AMDGPU_GEM_DOMAIN_GTT:
+ case TTM_PL_TT:
stats->gtt += size;
if (shared)
stats->gtt_shared += size;
break;
- case AMDGPU_GEM_DOMAIN_CPU:
+ case TTM_PL_SYSTEM:
default:
stats->cpu += size;
if (shared)
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
stats->requested_visible_vram += size;
- if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
+ if (res->mem_type != TTM_PL_VRAM) {
stats->evicted_vram += size;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
stats->evicted_visible_vram += size;
u64 size;
if (dma_resv_trylock(bo->tbo.base.resv)) {
- unsigned int domain;
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
- switch (domain) {
- case AMDGPU_GEM_DOMAIN_VRAM:
+ switch (bo->tbo.resource->mem_type) {
+ case TTM_PL_VRAM:
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
break;
- case AMDGPU_GEM_DOMAIN_GTT:
+ case TTM_PL_TT:
placement = "GTT";
break;
- case AMDGPU_GEM_DOMAIN_CPU:
+ case AMDGPU_PL_GDS:
+ placement = "GDS";
+ break;
+ case AMDGPU_PL_GWS:
+ placement = "GWS";
+ break;
+ case AMDGPU_PL_OA:
+ placement = "OA";
+ break;
+ case AMDGPU_PL_PREEMPT:
+ placement = "PREEMPTIBLE";
+ break;
+ case AMDGPU_PL_DOORBELL:
+ placement = "DOORBELL";
+ break;
+ case TTM_PL_SYSTEM:
default:
placement = "CPU";
break;
adev->virt.autoload_ucode_id = 0;
break;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
ret = psp_init_cap_microcode(psp, ucode_prefix);
ret &= psp_init_ta_microcode(psp, ucode_prefix);
break;
psp->boot_time_tmr = false;
fallthrough;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
bool ret = false;
int i;
- if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6))
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
return false;
db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
}
}
+ static bool psp_err_warn(struct psp_context *psp)
+ {
+ struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
+
+ /* This response indicates reg list is already loaded */
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
+ cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
+ cmd->resp.status == TEE_ERROR_CANCEL)
+ return false;
+
+ return true;
+ }
+
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
dev_warn(psp->adev->dev,
"failed to load ucode %s(0x%X) ",
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
- dev_warn(psp->adev->dev,
- "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
- psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
- psp->cmd_buf_mem->resp.status);
+ if (psp_err_warn(psp))
+ dev_warn(
+ psp->adev->dev,
+ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should
* return failure to stop the VF from initializing.
* Also return failure in case of timeout
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 14):
return true;
default:
return false;
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
+ if (WARN_ON(!hive))
+ return;
+
list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
struct psp_xgmi_topology_info *mirror_top_info;
int j;
(psp->xgmi_context.supports_extended_data &&
get_extended_data) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
- IP_VERSION(13, 0, 6);
+ IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
+ IP_VERSION(13, 0, 14);
bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
*type = GFX_FW_TYPE_DMUB;
break;
case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
+ case AMDGPU_UCODE_ID_SDMA_RS64:
*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
break;
case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
return 0;
- if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
0x0036003C;
if (psp->sos.fw_version < supp_vers)
int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
if (err)
goto out;
int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
if (err)
goto out;
int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *sos_hdr;
const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
uint8_t *ucode_array_start_addr;
int fw_index = 0;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
{
const struct common_firmware_header *hdr;
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
int err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
struct amdgpu_firmware_info *info = NULL;
int err = 0;
return -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
- err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
if (err) {
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret, idx;
- char fw_name[100];
const struct firmware *usbc_pd_fw;
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
if (!drm_dev_enter(ddev, &idx))
return -ENODEV;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
- ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
+ ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
if (ret)
goto fail;
amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
rel_buf:
- release_firmware(usbc_pd_fw);
+ amdgpu_ucode_release(&usbc_pd_fw);
fail:
if (ret) {
dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ktime_t *vblank_time,
bool in_vblank_irq)
{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = crtc->index;
struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (!READ_ONCE(vblank->enabled)) {
return 0;
}
afb = to_amdgpu_framebuffer(new_state->fb);
- obj = new_state->fb->obj[0];
+
+ obj = drm_gem_fb_get_obj(new_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return -EINVAL;
+ }
+
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
+ struct drm_gem_object *obj;
int r;
if (!old_state->fb)
return;
- rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
+ obj = drm_gem_fb_get_obj(old_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return;
+ }
+
+ rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n");
# SPDX-License-Identifier: MIT
+# Copyright © 2019-2024 Advanced Micro Devices, Inc. All rights reserved.
+
menu "Display Engine Configuration"
depends on DRM && DRM_AMDGPU
depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && (!ARM64 || !CC_IS_CLANG)
+ select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
case 8100000:
link_rate = LINK_RATE_HIGH3; // Rate_9 (HBR3)- 8.10 Gbps/Lane
break;
+ case 10000000:
+ link_rate = LINK_RATE_UHBR10; // UHBR10 - 10.0 Gbps/Lane
+ break;
default:
link_rate = LINK_RATE_UNKNOWN;
break;
bool dp_is_lttpr_present(struct dc_link *link)
{
+ /* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
}
/* in DP compliance test, DPR-120 may have
static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
{
- enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ enum dc_link_rate lttpr_max_link_rate = LINK_RATE_UNKNOWN;
+
+ switch (link->dpcd_caps.lttpr_caps.max_link_rate) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+ break;
+ default:
+ // Assume all LTTPRs support up to HBR3 to improve misbehaving sink interop
+ lttpr_max_link_rate = LINK_RATE_HIGH3;
+ }
if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
lttpr_max_link_rate = LINK_RATE_UHBR20;
struct dc_link_settings *cur,
enum link_training_result training_result)
{
- uint8_t cur_idx = 0, next_idx;
+ uint32_t cur_idx = 0, next_idx;
bool found = false;
if (training_result == LINK_TRAINING_ABORT)
memset(link_setting, 0, sizeof(*link_setting));
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
- */
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
+ if (dc_is_dp_signal(stream->signal) &&
+ link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) {
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
*link_setting = link->preferred_link_setting;
- return true;
- }
-
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ } else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
decide_mst_link_settings(link, link_setting);
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
/* enable edp link optimization for DSC eDP case */
} else {
edp_decide_link_settings(link, link_setting, req_bw);
}
+ } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
+ link_setting->lane_count = LANE_COUNT_FOUR;
+ link_setting->link_rate = LINK_RATE_HIGH3;
} else {
decide_dp_link_settings(link, link_setting, req_bw);
}
return false;
}
- if (dp_is_lttpr_present(link))
+ if (dp_is_lttpr_present(link)) {
configure_lttpr_mode_transparent(link);
+ // Echo TOTAL_LTTPR_CNT back downstream
+ core_link_write_dpcd(
+ link,
+ DP_TOTAL_LTTPR_CNT,
+ &link->dpcd_caps.lttpr_caps.phy_repeater_cnt,
+ sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
+ }
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
enum dc_link_rate lttpr_max_link_rate;
enum dc_link_rate cable_max_link_rate;
struct link_encoder *link_enc = NULL;
-
+ bool is_uhbr13_5_supported = true;
link_enc = link_enc_cfg_get_link_enc(link);
ASSERT(link_enc);
max_link_cap.link_spread =
link->reported_link_cap.link_spread;
+ if (!link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
+ is_uhbr13_5_supported = false;
+
/* Lower link settings based on cable attributes
* Cable ID is a DP2 feature to identify max certified link rate that
* a cable can carry. The cable identification method requires both
cable_max_link_rate = get_cable_max_link_rate(link);
if (!link->dc->debug.ignore_cable_id &&
- cable_max_link_rate != LINK_RATE_UNKNOWN &&
- cable_max_link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate = cable_max_link_rate;
+ cable_max_link_rate != LINK_RATE_UNKNOWN) {
+ if (cable_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = cable_max_link_rate;
+
+ if (!link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY &&
+ link->dpcd_caps.cable_id.bits.CABLE_TYPE >= 2)
+ is_uhbr13_5_supported = false;
+ }
/* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
if (lttpr_max_link_rate < max_link_cap.link_rate)
max_link_cap.link_rate = lttpr_max_link_rate;
+ if (!link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
+ is_uhbr13_5_supported = false;
+
DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
__func__,
max_link_cap.lane_count,
max_link_cap.link_rate);
}
+ if (max_link_cap.link_rate == LINK_RATE_UHBR13_5 &&
+ !is_uhbr13_5_supported)
+ max_link_cap.link_rate = LINK_RATE_UHBR10;
+
if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING &&
link->dc->debug.disable_uhbr)
max_link_cap.link_rate = LINK_RATE_HIGH3;
__SMU_DUMMY_MAP(SetSoftMinVpe), \
__SMU_DUMMY_MAP(GetMetricsVersion), \
__SMU_DUMMY_MAP(EnableUCLKShadow), \
- __SMU_DUMMY_MAP(RmaDueToBadPageThreshold),\
+ __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
+ __SMU_DUMMY_MAP(SelectPstatePolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState),
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
- if (!bridge->encoder) {
- DRM_ERROR("Missing encoder\n");
- return -ENODEV;
- }
-
drm_connector_helper_add(connector,
&panel_bridge_connector_helper_funcs);
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
{
- struct drm_bridge **bridge = res;
+ struct drm_bridge *bridge = *(struct drm_bridge **)res;
- drm_panel_bridge_remove(*bridge);
+ if (!bridge)
+ return;
+
+ drm_bridge_remove(bridge);
}
/**
* The fb helper functions are useful to provide an fbdev on top of a drm kernel
* mode setting driver. They can be used mostly independently from the crtc
* helper functions used by many drivers to implement the kernel mode setting
- * interfaces.
- *
- * Drivers that support a dumb buffer with a virtual address and mmap support,
- * should try out the generic fbdev emulation using drm_fbdev_generic_setup().
- * It will automatically set up deferred I/O if the driver requires a shadow
- * buffer.
+ * interfaces. Drivers that use one of the shared memory managers, TTM, SHMEM,
+ * DMA, should instead use the corresponding fbdev emulation.
*
* Existing fbdev implementations should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
* mmap page writes.
- *
- * Deferred I/O is not compatible with SHMEM. Such drivers should request an
- * fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
if (!info)
return ERR_PTR(-ENOMEM);
+ if (!drm_leak_fbdev_smem)
+ info->flags |= FBINFO_HIDE_SMEM_START;
+
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret)
goto err_release;
info = fb_helper->info;
info->var.pixclock = 0;
- if (!drm_leak_fbdev_smem)
- info->flags |= FBINFO_HIDE_SMEM_START;
-
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
* register the fbdev emulation instance in kernel_fb_helper_list. */
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
return 0;
}
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area);
+
+static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+
+ if (!dma->map_noncoherent)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ return fb_deferred_io_mmap(info, vma);
+}
+
static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
if (!fb_helper->dev)
return;
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
kfree(fb_helper);
}
-static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- struct drm_fb_helper *fb_helper = info->par;
-
- return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
-}
-
static const struct fb_ops drm_fbdev_dma_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
- __FB_DEFAULT_DMAMEM_OPS_RDWR,
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_DMAMEM_OPS_DRAW,
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
.fb_mmap = drm_fbdev_dma_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
- if (drm_WARN_ON(dev, fb->funcs->dirty)) {
- ret = -ENODEV; /* damage handling not supported; use generic emulation */
- goto err_drm_client_buffer_delete;
- }
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
}
fb_helper->buffer = buffer;
- fb_helper->fb = buffer->fb;
+ fb_helper->fb = fb;
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
info->flags |= FBINFO_READS_FAST; /* signal caching */
info->screen_size = sizes->surface_height * fb->pitches[0];
info->screen_buffer = map.vaddr;
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
info->fix.smem_len = info->screen_size;
+ /* deferred I/O */
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
+
return 0;
+err_drm_fb_helper_release_info:
+ drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
return ret;
}
+static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_device *dev = helper->dev;
+ int ret;
+
+ /* Call damage handlers only if necessary */
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
.fb_probe = drm_fbdev_dma_helper_fb_probe,
+ .fb_dirty = drm_fbdev_dma_helper_fb_dirty,
};
/*
DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO KUN */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
#endif
+MODULE_DESCRIPTION("Quirks for non-normal panel orientation");
MODULE_LICENSE("Dual MIT/GPL");
/* The remote port can be either a panel or a bridge */
dp->plat_data.panel = panel;
dp->plat_data.dev_type = EXYNOS_DP;
- dp->plat_data.power_on_start = exynos_dp_poweron;
+ dp->plat_data.power_on = exynos_dp_poweron;
dp->plat_data.power_off = exynos_dp_poweroff;
dp->plat_data.attach = exynos_dp_bridge_attach;
dp->plat_data.get_modes = exynos_dp_get_modes;
static void exynos_dp_remove(struct platform_device *pdev)
{
- struct exynos_dp_device *dp = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &exynos_dp_ops);
- analogix_dp_remove(dp->adp);
}
static int exynos_dp_suspend(struct device *dev)
.remove_new = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
- .owner = THIS_MODULE,
.pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
if (ret < 0)
goto err_deinit;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_dma_setup(drm, 32);
return 0;
of_node_put(private->comp_node[i]);
}
+ static void mtk_drm_shutdown(struct platform_device *pdev)
+ {
+ struct mtk_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(private->drm);
+ }
+
static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove_new = mtk_drm_remove,
+ .shutdown = mtk_drm_shutdown,
.driver = {
.name = "mediatek-drm",
.pm = &mtk_drm_pm_ops,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nvif_outp *outp = &nv_encoder->outp;
- if (!nv50_audio_supported(encoder) || !drm_detect_monitor_audio(nv_connector->edid))
+ if (!nv50_audio_supported(encoder) || !nv_connector->base.display_info.has_audio)
return;
mutex_lock(&drm->audio.lock);
if ((disp->disp->object.oclass == GT214_DISP ||
disp->disp->object.oclass >= GF110_DISP) &&
nv_encoder->dcb->type != DCB_OUTPUT_LVDS &&
- drm_detect_monitor_audio(nv_connector->edid))
+ nv_connector->base.display_info.has_audio)
hda = true;
if (!nvif_outp_acquired(outp))
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
if (disp->disp->object.oclass != NV50_DISP &&
- drm_detect_hdmi_monitor(nv_connector->edid))
+ nv_connector->base.display_info.is_hdmi)
nv50_hdmi_enable(encoder, nv_crtc, nv_connector, state, mode, hda);
if (nv_encoder->outp.or.link & 1) {
*/
if (mode->clock >= 165000 &&
nv_encoder->dcb->duallink_possible &&
- !drm_detect_hdmi_monitor(nv_connector->edid))
+ !nv_connector->base.display_info.is_hdmi)
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
} else {
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
nv50_mstm_fini(nouveau_encoder(encoder));
}
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
}
struct panel_simple {
struct drm_panel base;
- bool enabled;
-
- bool prepared;
ktime_t unprepared_time;
{
struct panel_simple *p = to_panel_simple(panel);
- if (!p->enabled)
- return 0;
-
if (p->desc->delay.disable)
msleep(p->desc->delay.disable);
- p->enabled = false;
-
return 0;
}
static int panel_simple_unprepare(struct drm_panel *panel)
{
- struct panel_simple *p = to_panel_simple(panel);
int ret;
- /* Unpreparing when already unprepared is a no-op */
- if (!p->prepared)
- return 0;
-
pm_runtime_mark_last_busy(panel->dev);
ret = pm_runtime_put_autosuspend(panel->dev);
if (ret < 0)
return ret;
- p->prepared = false;
return 0;
}
static int panel_simple_prepare(struct drm_panel *panel)
{
- struct panel_simple *p = to_panel_simple(panel);
int ret;
- /* Preparing when already prepared is a no-op */
- if (p->prepared)
- return 0;
-
ret = pm_runtime_get_sync(panel->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(panel->dev);
return ret;
}
- p->prepared = true;
-
return 0;
}
{
struct panel_simple *p = to_panel_simple(panel);
- if (p->enabled)
- return 0;
-
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
- p->enabled = true;
-
return 0;
}
if (!panel)
return -ENOMEM;
- panel->enabled = false;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
return err;
}
-static void panel_simple_remove(struct device *dev)
+static void panel_simple_shutdown(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- drm_panel_remove(&panel->base);
+ /*
+ * NOTE: the following two calls don't really belong here. It is the
+ * responsibility of a correctly written DRM modeset driver to call
+ * drm_atomic_helper_shutdown() at shutdown time and that should
+ * cause the panel to be disabled / unprepared if needed. For now,
+ * however, we'll keep these calls due to the sheer number of
+ * different DRM modeset drivers used with panel-simple. The fact that
+ * we're calling these and _also_ the drm_atomic_helper_shutdown()
+ * will try to disable/unprepare means that we can get a warning about
+ * trying to disable/unprepare an already disabled/unprepared panel,
+ * but that's something we'll have to live with until we've confirmed
+ * that all DRM modeset drivers are properly calling
+ * drm_atomic_helper_shutdown().
+ */
drm_panel_disable(&panel->base);
drm_panel_unprepare(&panel->base);
-
- pm_runtime_dont_use_autosuspend(dev);
- pm_runtime_disable(dev);
- if (panel->ddc)
- put_device(&panel->ddc->dev);
}
-static void panel_simple_shutdown(struct device *dev)
+static void panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
- drm_panel_disable(&panel->base);
- drm_panel_unprepare(&panel->base);
+ drm_panel_remove(&panel->base);
+ panel_simple_shutdown(dev);
+
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+ if (panel->ddc)
+ put_device(&panel->ddc->dev);
}
static const struct drm_display_mode ampire_am_1280800n3tzqw_t00h_mode = {
.vfront_porch = { 3, 5, 10 },
.vback_porch = { 2, 5, 10 },
.vsync_len = { 5, 5, 5 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc koe_tx26d202vm0bwa = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode lincolntech_lcd185_101ct_mode = {
+ .clock = 155127,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 128,
+ .hsync_end = 1920 + 128 + 20,
+ .htotal = 1920 + 128 + 20 + 12,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 19,
+ .vsync_end = 1200 + 19 + 4,
+ .vtotal = 1200 + 19 + 4 + 20,
+};
+
+static const struct panel_desc lincolntech_lcd185_101ct = {
+ .modes = &lincolntech_lcd185_101ct_mode,
+ .bpc = 8,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing logictechno_lt161010_2nh_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode microtips_mf_101hiebcaf0_c_mode = {
+ .clock = 150275,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 32,
+ .hsync_end = 1920 + 32 + 52,
+ .htotal = 1920 + 32 + 52 + 24,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 24,
+ .vsync_end = 1200 + 24 + 8,
+ .vtotal = 1200 + 24 + 8 + 3,
+};
+
+static const struct panel_desc microtips_mf_101hiebcaf0_c = {
+ .modes = µtips_mf_101hiebcaf0_c_mode,
+ .bpc = 8,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct drm_display_mode microtips_mf_103hieb0ga0_mode = {
+ .clock = 93301,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 72,
+ .hsync_end = 1920 + 72 + 72,
+ .htotal = 1920 + 72 + 72 + 72,
+ .vdisplay = 720,
+ .vsync_start = 720 + 3,
+ .vsync_end = 720 + 3 + 3,
+ .vtotal = 720 + 3 + 3 + 2,
+};
+
+static const struct panel_desc microtips_mf_103hieb0ga0 = {
+ .modes = µtips_mf_103hieb0ga0_mode,
+ .bpc = 8,
+ .num_modes = 1,
+ .size = {
+ .width = 244,
+ .height = 92,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode primeview_pm070wl4_mode = {
+ .clock = 32000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 42,
+ .hsync_end = 800 + 42 + 128,
+ .htotal = 800 + 42 + 128 + 86,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 2,
+ .vtotal = 480 + 10 + 2 + 33,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc primeview_pm070wl4 = {
+ .modes = &primeview_pm070wl4_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode qd43003c0_40_mode = {
.clock = 9000,
.hdisplay = 480,
}, {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
+ }, {
+ .compatible = "lincolntech,lcd185-101ct",
+ .data = &lincolntech_lcd185_101ct,
}, {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
.compatible = "logictechno,lttd800480070-l6wh-rt",
.data = &logictechno_lttd800480070_l6wh_rt,
+ }, {
+ .compatible = "microtips,mf-101hiebcaf0",
+ .data = µtips_mf_101hiebcaf0_c,
+ }, {
+ .compatible = "microtips,mf-103hieb0ga0",
+ .data = µtips_mf_103hieb0ga0,
}, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
.compatible = "powertip,ph800480t013-idf02",
.data = &powertip_ph800480t013_idf02,
+ }, {
+ .compatible = "primeview,pm070wl4",
+ .data = &primeview_pm070wl4,
}, {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
drm_kms_helper_poll_fini(ddev);
}
+ static void shmob_drm_shutdown(struct platform_device *pdev)
+ {
+ struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(&sdev->ddev);
+ }
+
static int shmob_drm_probe(struct platform_device *pdev)
{
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
if (ret < 0)
goto err_modeset_cleanup;
- drm_fbdev_generic_setup(ddev, 16);
+ drm_fbdev_dma_setup(ddev, 16);
return 0;
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
.remove_new = shmob_drm_remove,
+ .shutdown = shmob_drm_shutdown,
.driver = {
.name = "shmob-drm",
.of_match_table = of_match_ptr(shmob_drm_of_table),