S: Supported
- T: git git://people.freedesktop.org/~agd5f/linux
+ T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
S: Supported
- T: git git://people.freedesktop.org/~agd5f/linux
+ T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/
AMD SEATTLE DEVICE TREE SUPPORT
AMD SENSOR FUSION HUB DRIVER
-M: Sandeep Singh <sandeep.singh@amd.com>
+M: Basavaraj Natikar <basavaraj.natikar@amd.com>
S: Maintained
F: Documentation/hid/amd-sfh*
F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
F: arch/arm/mach-gemini/
+F: drivers/crypto/gemini/
F: drivers/net/ethernet/cortina/
F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c
ARM/CZ.NIC TURRIS SUPPORT
S: Maintained
W: https://www.turris.cz/
F: Documentation/ABI/testing/debugfs-moxtet
F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
F: arch/arm/mach-ixp4xx/
F: drivers/clocksource/timer-ixp4xx.c
+F: drivers/crypto/ixp4xx_crypto.c
F: drivers/gpio/gpio-ixp4xx.c
F: drivers/irqchip/irq-ixp4xx.c
F: include/linux/irqchip/irq-ixp4xx.h
F: scripts/clang-tools/
K: \b(?i:clang|llvm)\b
+CLANG CONTROL FLOW INTEGRITY SUPPORT
+S: Supported
+B: https://github.com/ClangBuiltLinux/linux/issues
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/clang/features
+F: include/linux/cfi.h
+F: kernel/cfi.c
+
CLEANCACHE API
F: drivers/video/console/
F: include/linux/console*
+CONTEXT TRACKING
+S: Maintained
+F: kernel/context_tracking.c
+F: include/linux/context_tracking*
+
CONTROL GROUP (CGROUP)
S: Maintained
-F: drivers/platform/x86/dell/dell-wmi.c
+F: drivers/platform/x86/dell/dell-wmi-base.c
+
+DELL WMI HARDWARE PRIVACY SUPPORT
+S: Maintained
+F: drivers/platform/x86/dell/dell-wmi-privacy.c
DELTA ST MEDIA DRIVER
T: git git://linuxtv.org/media_tree.git
F: drivers/media/platform/sti/delta
+DELTA DPS920AB PSU DRIVER
+S: Maintained
+F: Documentation/hwmon/dps920ab.rst
+F: drivers/hwmon/pmbus/dps920ab.c
+
DENALI NAND DRIVER
S: Orphan
F: drivers/gpu/drm/savage/
F: include/uapi/drm/savage_drm.h
+ DRM DRIVER FOR SIMPLE FRAMEBUFFERS
+ S: Maintained
+ T: git git://anongit.freedesktop.org/drm/drm-misc
+ F: drivers/gpu/drm/tiny/simpledrm.c
+
DRM DRIVER FOR SIS VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/sis/
F: Documentation/devicetree/bindings/display/hisilicon/
F: drivers/gpu/drm/hisilicon/
+ DRM DRIVER FOR HYPERV SYNTHETIC VIDEO DEVICE
+ S: Maintained
+ T: git git://anongit.freedesktop.org/drm/drm-misc
+ F: drivers/gpu/drm/hyperv
+
DRM DRIVERS FOR LIMA
S: Maintained
- T: git git://people.freedesktop.org/~agd5f/linux
+ T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
F: fs/ecryptfs/
EDAC-AMD64
-S: Maintained
+S: Supported
F: drivers/edac/amd64_edac*
+F: drivers/edac/mce_amd*
EDAC-ARMADA
EROFS FILE SYSTEM
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
F: Documentation/devicetree/bindings/net/qca,ar803x.yaml
F: Documentation/networking/phy.rst
F: drivers/net/mdio/
+F: drivers/net/mdio/acpi_mdio.c
+F: drivers/net/mdio/fwnode_mdio.c
F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/
F: drivers/net/phy/
FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
-M: Aymen Sghaier <aymen.sghaier@nxp.com>
+M: Pankaj Gupta <pankaj.gupta@nxp.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt
F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS
S: Supported
F: scripts/gdb/
+GEMINI CRYPTO DRIVER
+S: Maintained
+F: drivers/crypto/gemini/
+
GEMTEK FM RADIO RECEIVER DRIVER
S: Maintained
F: drivers/i2c/busses/i2c-icy.c
-IDE SUBSYSTEM
-S: Maintained
-Q: http://patchwork.ozlabs.org/project/linux-ide/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide.git
-F: Documentation/ide/
-F: drivers/ide/
-F: include/linux/ide.h
-
-IDE/ATAPI DRIVERS
-S: Orphan
-F: Documentation/cdrom/ide-cd.rst
-F: drivers/ide/ide-cd*
-
IDEAPAD LAPTOP EXTRAS DRIVER
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
+F: include/linux/net/intel/iidc.h
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
F: Documentation/userspace-api/media/v4l/pixfmt-meta-intel-ipu3.rst
F: drivers/staging/media/ipu3/
+INTEL IXP4XX CRYPTO SUPPORT
+S: Maintained
+F: drivers/crypto/ixp4xx_crypto.c
+
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
S: Maintained
F: arch/x86/include/asm/intel_scu_ipc.h
F: drivers/platform/x86/intel_scu_*
+INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER
+S: Maintained
+F: drivers/platform/x86/intel/int3472/
+
INTEL SPEED SELECT TECHNOLOGY
F: include/linux/firmware/intel/stratix10-svc-client.h
INTEL TELEMETRY DRIVER
+M: Rajneesh Bhardwaj <irenic.rajneesh@gmail.com>
S: Maintained
S: Maintained
F: drivers/platform/x86/intel-wmi-thunderbolt.c
+INTEL WWAN IOSM DRIVER
+S: Maintained
+F: drivers/net/wwan/iosm/
+
INTEL(R) TRACE HUB
S: Supported
T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/it913x*
+ ITE IT66121 HDMI BRIDGE DRIVER
+ S: Maintained
+ T: git git://anongit.freedesktop.org/drm/drm-misc
+ F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
+ F: drivers/gpu/drm/bridge/ite-it66121.c
+
IVTV VIDEO4LINUX DRIVER
F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/kvm/
F: include/kvm/arm_*
+F: tools/testing/selftests/kvm/*/aarch64/
+F: tools/testing/selftests/kvm/aarch64/
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
F: drivers/mailbox/
F: include/linux/mailbox_client.h
F: include/linux/mailbox_controller.h
+F: include/dt-bindings/mailbox/
F: Documentation/devicetree/bindings/mailbox/
MAILBOX ARM MHUv2
MARVELL MV88X3310 PHY DRIVER
S: Maintained
F: drivers/net/phy/marvell10g.c
MEDIA DRIVERS FOR FREESCALE IMX7
S: Maintained
T: git git://linuxtv.org/media_tree.git
S: Supported
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/renesas,csi2.yaml
+F: Documentation/devicetree/bindings/media/renesas,isp.yaml
F: Documentation/devicetree/bindings/media/renesas,vin.yaml
F: drivers/media/platform/rcar-vin/
S: Supported
-F: Documentation/devicetree/bindings/media/atmel-isc.txt
+F: Documentation/devicetree/bindings/media/atmel,isc.yaml
+F: Documentation/devicetree/bindings/media/microchip,xisc.yaml
F: drivers/media/platform/atmel/atmel-isc-base.c
F: drivers/media/platform/atmel/atmel-isc-regs.h
F: drivers/media/platform/atmel/atmel-isc.h
F: drivers/media/platform/atmel/atmel-sama5d2-isc.c
+F: drivers/media/platform/atmel/atmel-sama7g5-isc.c
F: include/linux/atmel-isc-media.h
MICROCHIP ISI DRIVER
S: Maintained
W: https://github.com/linux-surface/surface-aggregator-module
-C: irc://chat.freenode.net/##linux-surface
+C: irc://irc.libera.chat/linux-surface
F: Documentation/driver-api/surface_aggregator/
F: drivers/platform/surface/aggregator/
F: drivers/platform/surface/surface_acpi_notify.c
F: drivers/media/pci/meye/
F: include/uapi/linux/meye.h
+MOTORCOMM PHY DRIVER
+S: Maintained
+F: drivers/net/phy/motorcomm.c
+
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
S: Orphan
F: Documentation/driver-api/serial/moxa-smartio.rst
F: drivers/net/ethernet/natsemi/natsemi.c
NCR 5380 SCSI DRIVERS
-M: Finn Thain <fthain@telegraphics.com.au>
+M: Finn Thain <fthain@linux-m68k.org>
S: Maintained
W: http://www.iptables.org/
W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
+C: irc://irc.libera.chat/netfilter
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
F: include/linux/netfilter*
F: fs/ntfs/
NUBUS SUBSYSTEM
-M: Finn Thain <fthain@telegraphics.com.au>
+M: Finn Thain <fthain@linux-m68k.org>
S: Maintained
F: arch/*/include/asm/nubus.h
S: Maintained
F: drivers/net/dsa/sja1105
+F: drivers/net/pcs/pcs-xpcs-nxp.c
NXP TDA998X DRM DRIVER
F: Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt
F: drivers/cpufreq/qcom-cpufreq-nvmem.c
+QUALCOMM CRYPTO DRIVERS
+S: Maintained
+F: drivers/crypto/qce/
+
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
RADEON and AMDGPU DRM DRIVERS
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: include/uapi/linux/rpmsg.h
F: samples/rpmsg/
+REMOTE PROCESSOR MESSAGING (RPMSG) WWAN CONTROL DRIVER
+S: Maintained
+F: drivers/net/wwan/rpmsg_wwan_ctrl.c
+
RENESAS CLOCK DRIVERS
N: riscv
K: riscv
+RISC-V/MICROCHIP POLARFIRE SOC SUPPORT
+S: Supported
+F: drivers/mailbox/mailbox-mpfs.c
+F: drivers/soc/microchip/
+F: include/soc/microchip/mpfs.h
+
RNBD BLOCK DRIVERS
F: drivers/ssb/
F: include/linux/ssb/
+SONY IMX208 SENSOR DRIVER
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/i2c/imx208.c
+
SONY IMX214 SENSOR DRIVER
S: Supported
F: drivers/net/pcs/pcs-xpcs.c
+F: drivers/net/pcs/pcs-xpcs.h
F: include/linux/pcs/pcs-xpcs.h
SYNOPSYS DESIGNWARE I2C DRIVER
T: git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
F: drivers/platform/x86/thinkpad_acpi.c
+THINKPAD LMI DRIVER
+S: Maintained
+F: Documentation/ABI/testing/sysfs-class-firmware-attributes
+F: drivers/platform/x86/think-lmi.?
+
THUNDERBOLT DMA TRAFFIC TEST DRIVER
F: include/linux/regulator/
K: regulator_get_optional
+VOLTAGE AND CURRENT REGULATOR IRQ HELPERS
+F: drivers/regulator/irq_helpers.c
+
VRF
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pmladek/printk.git
F: Documentation/core-api/printk-formats.rst
F: lib/test_printf.c
+F: lib/test_scanf.c
F: lib/vsprintf.c
VT1211 HARDWARE MONITOR DRIVER
F: include/linux/workqueue.h
F: kernel/workqueue.c
+WWAN DRIVERS
+S: Maintained
+F: drivers/net/wwan/
+F: include/linux/wwan.h
+F: include/uapi/linux/wwan.h
+
X-POWERS AXP288 PMIC DRIVERS
S: Maintained
#include <linux/pci-p2pdma.h>
#include <linux/pm_runtime.h>
- /**
- * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
- * @obj: GEM BO
- * @vma: Virtual memory area
- *
- * Sets up a userspace mapping of the BO's memory in the given
- * virtual memory area.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
- int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
- {
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- unsigned asize = amdgpu_bo_size(bo);
- int ret;
-
- if (!vma->vm_file)
- return -ENODEV;
-
- if (adev == NULL)
- return -ENODEV;
-
- /* Check for valid size. */
- if (asize < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
- (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
- return -EPERM;
- }
- vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
-
- /* prime mmap does not need to check access, so allow here */
- ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
- if (ret)
- return ret;
-
- ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
- drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
-
- return ret;
- }
-
static int
__dma_resv_make_exclusive(struct dma_resv *obj)
{
unsigned int count;
int r;
- if (!dma_resv_get_list(obj)) /* no shared fences to convert */
+ if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
return 0;
- r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
+ r = dma_resv_get_fences(obj, NULL, &count, &fences);
if (r)
return r;
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int r;
/* pin buffer into GTT */
- return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ return r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r) {
+ amdgpu_bo_unpin(bo);
+ return r;
+ }
+ }
+ return 0;
}
/**
if (r)
return ERR_PTR(r);
- } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
AMDGPU_GEM_DOMAIN_GTT)) {
return ERR_PTR(-EBUSY);
}
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
break;
case TTM_PL_VRAM:
- r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
- bo->tbo.base.size, attach->dev, dir, &sgt);
+ r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
+ bo->tbo.base.size, attach->dev,
+ dir, &sgt);
if (r)
return ERR_PTR(r);
break;
struct amdgpu_vm_bo_base *bo_base;
int r;
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+ struct dma_resv *resv = vm->root.bo->tbo.base.resv;
if (ticket) {
/* When we get an error here it means that somebody
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
- #include <linux/hmm.h>
#include <linux/pagemap.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+ #include <drm/ttm/ttm_range_manager.h>
#include <drm/amdgpu_drm.h>
}
abo = ttm_to_amdgpu_bo(bo);
- switch (bo->mem.mem_type) {
+ if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
+ struct dma_fence *fence;
+ struct dma_resv *resv = &bo->base._resv;
+
+ rcu_read_lock();
+ fence = rcu_dereference(resv->fence_excl);
+ if (fence && !fence->ops->signaled)
+ dma_fence_enable_sw_signaling(fence);
+
+ placement->num_placement = 0;
+ placement->num_busy_placement = 0;
+ rcu_read_unlock();
+ return;
+ }
+
+ switch (bo->resource->mem_type) {
case AMDGPU_PL_GDS:
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
}
break;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
default:
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
break;
*placement = abo->placement;
}
- /**
- * amdgpu_verify_access - Verify access for a mmap call
- *
- * @bo: The buffer object to map
- * @filp: The file pointer from the process performing the mmap
- *
- * This is called by ttm_bo_mmap() to verify whether a process
- * has the right to mmap a BO to their process space.
- */
- static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
- {
- struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-
- /*
- * Don't verify access for KFD BOs. They don't have a GEM
- * object associated with them.
- */
- if (abo->kfd_bo)
- return 0;
-
- if (amdgpu_ttm_tt_get_usermm(bo->ttm))
- return -EPERM;
- return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
- filp->private_data);
- }
-
/**
* amdgpu_ttm_map_buffer - Map memory into the GART windows
* @bo: buffer object to map
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+ BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
/* Map only what can't be accessed directly */
if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
}
/**
- * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
* @adev: amdgpu device
* @src: buffer/address where to read from
* @dst: buffer/address where to write to
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int r;
- if (new_mem->mem_type == TTM_PL_TT) {
+ if (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT) {
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
if (r)
return r;
goto out;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT) {
+ (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
- if (old_mem->mem_type == TTM_PL_TT &&
+ if ((old_mem->mem_type == TTM_PL_TT ||
+ old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = ttm_bo_wait_ctx(bo, ctx);
if (r)
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
- static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
+ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct drm_mm_node *mm_node = mem->mm_node;
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
switch (mem->mem_type) {
/* system memory */
return 0;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
- /* Only physically contiguous buffers apply. In a contiguous
- * buffer, size of the first mm_node would match the number of
- * pages in ttm_resource.
- */
+
if (adev->mman.aper_base_kaddr &&
- (mm_node->size == mem->num_pages))
+ mem->placement & TTM_PL_FLAG_CONTIGUOUS)
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
mem->bus.offset;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_res_cursor cursor;
- amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+ amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
+ &cursor);
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
- struct hmm_range *range;
- unsigned long timeout;
struct mm_struct *mm;
- unsigned long i;
+ bool readonly;
int r = 0;
mm = bo->notifier.mm;
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
return -ESRCH;
- range = kzalloc(sizeof(*range), GFP_KERNEL);
- if (unlikely(!range)) {
- r = -ENOMEM;
- goto out;
- }
- range->notifier = &bo->notifier;
- range->start = bo->notifier.interval_tree.start;
- range->end = bo->notifier.interval_tree.last + 1;
- range->default_flags = HMM_PFN_REQ_FAULT;
- if (!amdgpu_ttm_tt_is_readonly(ttm))
- range->default_flags |= HMM_PFN_REQ_WRITE;
-
- range->hmm_pfns = kvmalloc_array(ttm->num_pages,
- sizeof(*range->hmm_pfns), GFP_KERNEL);
- if (unlikely(!range->hmm_pfns)) {
- r = -ENOMEM;
- goto out_free_ranges;
- }
-
mmap_read_lock(mm);
- vma = find_vma(mm, start);
- mmap_read_unlock(mm);
- if (unlikely(!vma || start < vma->vm_start)) {
+ vma = vma_lookup(mm, start);
+ if (unlikely(!vma)) {
r = -EFAULT;
- goto out_putmm;
+ goto out_unlock;
}
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
vma->vm_file)) {
r = -EPERM;
- goto out_putmm;
+ goto out_unlock;
}
- mmap_read_unlock(mm);
- timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
-
- retry:
- range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
-
- mmap_read_lock(mm);
- r = hmm_range_fault(range);
- mmap_read_unlock(mm);
- if (unlikely(r)) {
- /*
- * FIXME: This timeout should encompass the retry from
- * mmu_interval_read_retry() as well.
- */
- if (r == -EBUSY && !time_after(jiffies, timeout))
- goto retry;
- goto out_free_pfns;
- }
-
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; i < ttm->num_pages; i++)
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
-
- gtt->range = range;
- mmput(mm);
-
- return 0;
- false);
-out_putmm:
+ readonly = amdgpu_ttm_tt_is_readonly(ttm);
+ r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
+ ttm->num_pages, >t->range, readonly,
- out_free_pfns:
- kvfree(range->hmm_pfns);
- out_free_ranges:
- kfree(range);
- out:
++ true);
+out_unlock:
+ mmap_read_unlock(mm);
mmput(mm);
+
return r;
}
* FIXME: Must always hold notifier_lock for this, and must
* not ignore the return code.
*/
- r = mmu_interval_read_retry(gtt->range->notifier,
- gtt->range->notifier_seq);
- kvfree(gtt->range->hmm_pfns);
- kfree(gtt->range);
+ r = amdgpu_hmm_range_get_pages_done(gtt->range);
gtt->range = NULL;
}
uint64_t page_idx = 1;
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
if (r)
goto gart_bind_fail;
r = amdgpu_gart_bind(adev,
gtt->offset + (page_idx << PAGE_SHIFT),
ttm->num_pages - page_idx,
- &ttm->pages[page_idx],
&(gtt->ttm.dma_address[page_idx]), flags);
} else {
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
}
gart_bind_fail:
DRM_ERROR("failed to pin userptr\n");
return r;
}
+ } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
}
+
if (!ttm->num_pages) {
WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
/* bind pages into GART page tables */
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
if (r)
DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ struct ttm_resource *tmp;
uint64_t addr, flags;
int r;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
- bo->mem.start = addr >> PAGE_SHIFT;
- } else {
+ bo->resource->start = addr >> PAGE_SHIFT;
+ return 0;
+ }
- /* allocate GART space */
- tmp = bo->mem;
- tmp.mm_node = NULL;
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.mem_type = TTM_PL_TT;
- placements.flags = bo->mem.placement;
-
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
- if (unlikely(r))
- return r;
+ /* allocate GART space */
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->resource->placement;
- /* compute PTE flags for this buffer object */
- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
- /* Bind pages */
- gtt->offset = (u64)tmp.start << PAGE_SHIFT;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_resource_free(bo, &tmp);
- return r;
- }
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
- ttm_resource_free(bo, &bo->mem);
- bo->mem = tmp;
+ /* Bind pages */
+ gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_resource_free(bo, &tmp);
+ return r;
}
+ amdgpu_gart_invalidate_tlb(adev);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, tmp);
+
return 0;
}
if (!tbo->ttm)
return 0;
- flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
return r;
int r;
/* if the pages have userptr pinning then clear that first */
- if (gtt->userptr)
+ if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ } else if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ }
if (!gtt->bound)
return;
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
-
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
return 0;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
- if (!ttm->sg) {
- struct dma_buf_attachment *attach;
- struct sg_table *sgt;
-
- attach = gtt->gobj->import_attach;
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt))
- return PTR_ERR(sgt);
-
- ttm->sg = sgt;
- }
-
- drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
- ttm->num_pages);
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return 0;
- }
return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
}
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
ttm->sg = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
- return;
- }
-
- if (ttm->sg && gtt->gobj->import_attach) {
- struct dma_buf_attachment *attach;
-
- attach = gtt->gobj->import_attach;
- dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
- ttm->sg = NULL;
return;
}
return -ENOMEM;
}
+ /* Set TTM_PAGE_FLAG_SG before populate but after create. */
+ bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
+
gtt = (void *)bo->ttm;
gtt->userptr = addr;
gtt->userflags = flags;
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
- if (mem && mem->mem_type == TTM_PL_TT) {
+ if (mem && (mem->mem_type == TTM_PL_TT ||
+ mem->mem_type == AMDGPU_PL_PREEMPT)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->mem.num_pages;
+ unsigned long num_pages = bo->resource->num_pages;
struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
int i;
+ /* Swapout? */
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
if (bo->type == ttm_bo_type_kernel &&
!amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
return false;
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = dma_resv_get_list(bo->base.resv);
+ flist = dma_resv_shared_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
}
}
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
+ case AMDGPU_PL_PREEMPT:
+ /* Preemptible BOs don't own system resources managed by the
+ * driver (pages, VRAM, GART space). They point to resources
+ * owned by someone else (e.g. pageable memory in user mode
+ * or a DMABuf). They are used in a preemptible context so we
+ * can guarantee no deadlocks and good QoS in case of MMU
+ * notifiers or DMABuf move notifiers from the resource owner.
+ */
+ return false;
case TTM_PL_TT:
if (amdgpu_bo_is_amdgpu_bo(bo) &&
amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
case TTM_PL_VRAM:
/* Check each drm MM node individually */
- amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+ amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
&cursor);
while (cursor.remaining) {
if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
uint32_t value = 0;
int ret = 0;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
- amdgpu_res_first(&bo->mem, offset, len, &cursor);
+ amdgpu_res_first(bo->resource, offset, len, &cursor);
while (cursor.remaining) {
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
uint64_t bytes = 4 - (cursor.start & 3);
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
- .verify_access = &amdgpu_verify_access,
.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
.release_notify = &amdgpu_bo_release_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
bool mem_train_support = false;
if (!amdgpu_sriov_vf(adev)) {
- ret = amdgpu_mem_train_support(adev);
- if (ret == 1)
+ if (amdgpu_atomfirmware_mem_training_supported(adev))
mem_train_support = true;
- else if (ret == -1)
- return -EINVAL;
else
DRM_DEBUG("memory training does not support!\n");
}
NULL);
if (r)
return r;
+ r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
+ adev->mman.stolen_reserved_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mman.stolen_reserved_memory,
+ NULL);
+ if (r)
+ return r;
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned)(gtt_size / (1024 * 1024)));
+ /* Initialize preemptible memory pool */
+ r = amdgpu_preempt_mgr_init(adev);
+ if (r) {
+ DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ return r;
+ }
+
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
+ if (adev->mman.stolen_reserved_size)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
+ NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
- if (adev->mman.aper_base_kaddr)
- iounmap(adev->mman.aper_base_kaddr);
- adev->mman.aper_base_kaddr = NULL;
-
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
+ amdgpu_preempt_mgr_fini(adev);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
adev->mman.buffer_funcs_enabled = enable;
}
- static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
- {
- struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
- vm_fault_t ret;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- ret = amdgpu_bo_fault_reserve_notify(bo);
- if (ret)
- goto unlock;
-
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
- unlock:
- dma_resv_unlock(bo->base.resv);
- return ret;
- }
-
- static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
- .fault = amdgpu_ttm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
- };
-
- int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
- {
- struct drm_file *file_priv = filp->private_data;
- struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
- int r;
-
- r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
- if (unlikely(r != 0))
- return r;
-
- vma->vm_ops = &amdgpu_ttm_vm_ops;
- return 0;
- }
-
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
return -EINVAL;
}
- if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
+ DRM_ERROR("Trying to clear preemptible memory.\n");
+ return -EINVAL;
+ }
+
+ if (bo->tbo.resource->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
- num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
+ num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
num_loops = 0;
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
amdgpu_res_next(&cursor, cursor.size);
}
}
- amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
uint64_t dst_addr = cursor.start;
- dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ dst_addr += amdgpu_ttm_domain_start(adev,
+ bo->tbo.resource->mem_type);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
cur_size);
static unsigned int
setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) <= 2) {
+ if (GRAPHICS_VER(i915) <= 2) {
tile->height = 16;
tile->width = 128;
tile->size = 11;
tile->size = 12;
}
- if (INTEL_GEN(i915) < 4)
+ if (GRAPHICS_VER(i915) < 4)
return 8192 / tile->width;
- else if (INTEL_GEN(i915) < 7)
+ else if (GRAPHICS_VER(i915) < 7)
return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
else
return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
if (err)
goto out_unlock;
- if (pitch > 2 && INTEL_GEN(i915) >= 4) {
+ if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
tile.stride = tile.width * (pitch - 1);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto out_unlock;
}
- if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
+ if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
tile.stride = tile.width * (pitch + 1);
err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
}
}
- if (INTEL_GEN(i915) >= 4) {
+ if (GRAPHICS_VER(i915) >= 4) {
for_each_prime_number(pitch, max_pitch) {
tile.stride = tile.width * pitch;
err = check_partial_mappings(obj, &tile, end);
tile.stride =
i915_prandom_u32_max_state(max_pitch, &prng);
tile.stride = (1 + tile.stride) * tile.width;
- if (INTEL_GEN(i915) < 4)
+ if (GRAPHICS_VER(i915) < 4)
tile.stride = rounddown_pow_of_two(tile.stride);
}
return true;
}
+ static void object_set_placements(struct drm_i915_gem_object *obj,
+ struct intel_memory_region **placements,
+ unsigned int n_placements)
+ {
+ GEM_BUG_ON(!n_placements);
+
+ if (n_placements == 1) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mr = placements[0];
+
+ obj->mm.placements = &i915->mm.regions[mr->id];
+ obj->mm.n_placements = 1;
+ } else {
+ obj->mm.placements = placements;
+ obj->mm.n_placements = n_placements;
+ }
+ }
+
#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
static int __igt_mmap(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
- area = find_vma(current->mm, addr);
+ area = vma_lookup(current->mm, addr);
if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
if (IS_ERR(obj))
return PTR_ERR(obj);
+ object_set_placements(obj, &mr, 1);
+
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
if (err == 0)
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret;
- nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
+ u64 vram_size = drm->client.device.info.ram_size;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
- nvbo->bo.mem.num_pages < vram_pages / 4) {
+ nvbo->bo.base.size < vram_size / 4) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
* at the same time.
*/
if (nvbo->zeta) {
- fpfn = vram_pages / 2;
+ fpfn = (vram_size / 2) >> PAGE_SHIFT;
lpfn = ~0;
} else {
fpfn = 0;
- lpfn = vram_pages / 2;
+ lpfn = (vram_size / 2) >> PAGE_SHIFT;
}
for (i = 0; i < nvbo->placement.num_placement; ++i) {
nvbo->placements[i].fpfn = fpfn;
if (nvbo->bo.pin_count) {
bool error = evict;
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
break;
case TTM_PL_TT:
error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+ break;
default:
break;
}
if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- bo->mem.mem_type, domain);
+ bo->resource->mem_type, domain);
ret = -EBUSY;
}
ttm_bo_pin(&nvbo->bo);
ttm_bo_pin(&nvbo->bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available -= bo->base.size;
break;
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->base.size;
break;
if (ret)
return ret;
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i, j;
- if (!ttm_dma)
+ if (!ttm_dma || !ttm_dma->dma_address)
return;
if (!ttm_dma->pages) {
NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
NOUVEAU_GEM_DOMAIN_CPU);
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_resource *reg)
{
- struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+ struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
struct nouveau_mem *new_mem = nouveau_mem(reg);
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
- ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
+ ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
}
}
- if (new_reg) {
- if (new_reg->mm_node)
- nvbo->offset = (new_reg->start << PAGE_SHIFT);
- else
- nvbo->offset = 0;
- }
+ if (new_reg)
+ nvbo->offset = (new_reg->start << PAGE_SHIFT);
}
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+ struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_resource *old_reg = &bo->mem;
+ struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_reg);
goto out;
}
}
out_ntfy:
if (ret) {
- nouveau_bo_move_ntfy(bo, &bo->mem);
+ nouveau_bo_move_ntfy(bo, bo->resource);
}
return ret;
}
- static int
- nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
- {
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
- filp->private_data);
- }
-
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
struct ttm_resource *reg)
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
- nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+ nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
goto retry;
}
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
- if (bo->mem.mem_type != TTM_PL_VRAM) {
+ if (bo->resource->mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nvbo->kind)
return 0;
- if (bo->mem.mem_type != TTM_PL_SYSTEM)
+ if (bo->resource->mem_type != TTM_PL_SYSTEM)
return 0;
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->mem.start + bo->mem.num_pages < mappable)
+ bo->resource->start + bo->resource->num_pages < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
.evict_flags = nouveau_bo_evict_flags,
.delete_mem_notify = nouveau_bo_delete_mem_notify,
.move = nouveau_bo_move,
- .verify_access = nouveau_bo_verify_access,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
};
default:
break;
}
+ break;
case DRM_MODE_SCALE_FULLSCREEN:
case DRM_MODE_SCALE_CENTER:
case DRM_MODE_SCALE_ASPECT:
drm_connector_cleanup(connector);
if (nv_connector->aux.transfer) {
drm_dp_cec_unregister_connector(&nv_connector->aux);
- drm_dp_aux_unregister(&nv_connector->aux);
kfree(nv_connector->aux.name);
}
kfree(connector);
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
- struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct device_node *cn, *dn = pci_device_to_OF_node(pdev);
if (!dn ||
!((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
int ret;
ret = nouveau_backlight_init(connector);
+ if (ret)
+ return ret;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_aux_register(&nouveau_connector(connector)->aux);
+ if (ret)
+ goto backlight_fini;
+ }
+
+ return 0;
+ backlight_fini:
+ nouveau_backlight_fini(connector);
return ret;
}
static void
nouveau_connector_early_unregister(struct drm_connector *connector)
{
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
+ drm_dp_aux_unregister(&nouveau_connector(connector)->aux);
+
nouveau_backlight_fini(connector);
}
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
nv_connector->aux.dev = connector->kdev;
+ nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
dcbe->hasht, dcbe->hashm);
nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
- ret = drm_dp_aux_register(&nv_connector->aux);
+ drm_dp_aux_init(&nv_connector->aux);
if (ret) {
- NV_ERROR(drm, "failed to register aux channel\n");
+ NV_ERROR(drm, "Failed to init AUX adapter for sor-%04x-%04x: %d\n",
+ dcbe->hasht, dcbe->hashm, ret);
kfree(nv_connector);
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs;
- break;
+ fallthrough;
default:
funcs = &nouveau_connector_funcs;
break;
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+ #include <drm/drm_scdc_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/i2c.h>
#define VC5_HDMI_VERTB_VSPO_SHIFT 16
#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
+ #define VC5_HDMI_SCRAMBLER_CTL_ENABLE BIT(0)
+
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8
#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8)
# define VC4_HD_M_ENABLE BIT(0)
#define CEC_CLOCK_FREQ 40000
- #define VC4_HSM_MID_CLOCK 149985000
#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000)
+ static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode)
+ {
+ return (mode->clock * 1000) > HDMI_14_MAX_TMDS_CLK;
+ }
+
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
- if (vc4_hdmi->hpd_gpio) {
- if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
- vc4_hdmi->hpd_active_low)
- connected = true;
+ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
+ if (vc4_hdmi->hpd_gpio &&
+ gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
+ connected = true;
} else if (drm_probe_ddc(vc4_hdmi->ddc)) {
connected = true;
} else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
}
}
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
+ if (vc4_hdmi->disable_4kp60) {
+ struct drm_device *drm = connector->dev;
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (vc4_hdmi_mode_needs_scrambling(mode)) {
+ drm_warn_once(drm, "The core clock cannot reach frequencies high enough to support 4k @ 60Hz.");
+ drm_warn_once(drm, "Please change your config.txt file to add hdmi_enable_4kp60.");
+ }
+ }
+ }
+
return ret;
}
+ static int vc4_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+ {
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = new_state->crtc;
+
+ if (!crtc)
+ return 0;
+
+ if (old_state->colorspace != new_state->colorspace ||
+ !drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+ }
+
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
struct vc4_hdmi_connector_state *old_state =
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
.get_modes = vc4_hdmi_connector_get_modes,
+ .atomic_check = vc4_hdmi_connector_atomic_check,
};
static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
+ ret = drm_mode_create_hdmi_colorspace_property(connector);
+ if (ret)
+ return ret;
+
+ drm_connector_attach_colorspace_property(connector);
drm_connector_attach_tv_margin_properties(connector);
drm_connector_attach_max_bpc_property(connector, 8, 12);
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
+ if (vc4_hdmi->variant->supports_hdr)
+ drm_connector_attach_hdr_output_metadata_property(connector);
+
drm_connector_attach_encoder(connector, encoder);
return 0;
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
-
+ drm_hdmi_avi_infoframe_colorspace(&frame.avi, cstate);
drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
vc4_hdmi_write_infoframe(encoder, &frame);
}
+ static void vc4_hdmi_set_hdr_infoframe(struct drm_encoder *encoder)
+ {
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_connector_state *conn_state = connector->state;
+ union hdmi_infoframe frame;
+
+ if (!vc4_hdmi->variant->supports_hdr)
+ return;
+
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ if (drm_hdmi_infoframe_set_hdr_metadata(&frame.drm, conn_state))
+ return;
+
+ vc4_hdmi_write_infoframe(encoder, &frame);
+ }
+
static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
*/
if (vc4_hdmi->audio.streaming)
vc4_hdmi_set_audio_infoframe(encoder);
+
+ vc4_hdmi_set_hdr_infoframe(encoder);
+ }
+
+ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+ {
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_info *display = &vc4_hdmi->connector.display_info;
+
+ if (!vc4_encoder->hdmi_monitor)
+ return false;
+
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ return true;
+ }
+
+ #define SCRAMBLING_POLLING_DELAY_MS 1000
+
+ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
+ {
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
+ if (!vc4_hdmi_supports_scrambling(encoder, mode))
+ return;
+
+ if (!vc4_hdmi_mode_needs_scrambling(mode))
+ return;
+
+ drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
+ drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
+
+ HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) |
+ VC5_HDMI_SCRAMBLER_CTL_ENABLE);
+
+ queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
+ msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS));
+ }
+
+ static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
+ {
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+
+ /*
+ * At boot, encoder->crtc will be NULL. Since we don't know the
+ * state of the scrambler and in order to avoid any
+ * inconsistency, let's disable it all the time.
+ */
+ if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
+ return;
+
+ if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
+ return;
+
+ if (delayed_work_pending(&vc4_hdmi->scrambling_work))
+ cancel_delayed_work_sync(&vc4_hdmi->scrambling_work);
+
+ HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) &
+ ~VC5_HDMI_SCRAMBLER_CTL_ENABLE);
+
+ drm_scdc_set_scrambling(vc4_hdmi->ddc, false);
+ drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false);
+ }
+
+ static void vc4_hdmi_scrambling_wq(struct work_struct *work)
+ {
+ struct vc4_hdmi *vc4_hdmi = container_of(to_delayed_work(work),
+ struct vc4_hdmi,
+ scrambling_work);
+
+ if (drm_scdc_get_scrambling_status(vc4_hdmi->ddc))
+ return;
+
+ drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
+ drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
+
+ queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
+ msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS));
}
static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
+
+ vc4_hdmi_disable_scrambling(encoder);
}
static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- unsigned long pixel_rate, hsm_rate;
+ unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret;
- ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev);
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
return;
return;
}
- ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->pixel_clock);
- return;
- }
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
- /*
- * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
- * at 300MHz.
- */
- ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock,
- (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
+ if (pixel_rate > 297000000)
+ bvb_rate = 300000000;
+ else if (pixel_rate > 148500000)
+ bvb_rate = 150000000;
+ else
+ bvb_rate = 75000000;
+
+ ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
- clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
}
vc4_hdmi_recenter_fifo(vc4_hdmi);
+ vc4_hdmi_enable_scrambling(encoder);
}
static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
return -EINVAL;
+ if (vc4_hdmi->disable_4kp60 && (pixel_rate > HDMI_14_MAX_TMDS_CLK))
+ return -EINVAL;
+
vc4_state->pixel_rate = pixel_rate;
return 0;
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
+ if (vc4_hdmi->disable_4kp60 && vc4_hdmi_mode_needs_scrambling(mode))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
return 0;
}
+#ifdef CONFIG_PM
+static int vc4_hdmi_runtime_suspend(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+
+ return 0;
+}
+
+static int vc4_hdmi_runtime_resume(struct device *dev)
+{
+ struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#endif
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
struct vc4_hdmi *vc4_hdmi;
struct drm_encoder *encoder;
struct device_node *ddc_node;
- u32 value;
int ret;
vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
if (!vc4_hdmi)
return -ENOMEM;
+ INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
dev_set_drvdata(dev, vc4_hdmi);
encoder = &vc4_hdmi->encoder.base.base;
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
- if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
- enum of_gpio_flags hpd_gpio_flags;
-
- vc4_hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
- "hpd-gpios", 0,
- &hpd_gpio_flags);
- if (vc4_hdmi->hpd_gpio < 0) {
- ret = vc4_hdmi->hpd_gpio;
- goto err_unprepare_hsm;
- }
-
- vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
+ vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(vc4_hdmi->hpd_gpio)) {
+ ret = PTR_ERR(vc4_hdmi->hpd_gpio);
+ goto err_put_ddc;
}
vc4_hdmi->disable_wifi_frequencies =
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+ if (variant->max_pixel_clock == 600000000) {
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ long max_rate = clk_round_rate(vc4->hvs->core_clk, 550000000);
+
+ if (max_rate < 550000000)
+ vc4_hdmi->disable_4kp60 = true;
+ }
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
+ if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
+ HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
+ clk_prepare_enable(vc4_hdmi->pixel_clock);
+ clk_prepare_enable(vc4_hdmi->hsm_clock);
+ clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
+ }
+
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
err_destroy_encoder:
drm_encoder_cleanup(encoder);
- err_unprepare_hsm:
pm_runtime_disable(dev);
+ err_put_ddc:
put_device(&vc4_hdmi->ddc->dev);
return ret;
.phy_rng_enable = vc4_hdmi_phy_rng_enable,
.phy_rng_disable = vc4_hdmi_phy_rng_disable,
.channel_map = vc4_hdmi_channel_map,
+ .supports_hdr = false,
};
static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.phy_rng_enable = vc5_hdmi_phy_rng_enable,
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
};
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.phy_rng_enable = vc5_hdmi_phy_rng_enable,
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
};
static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
+static const struct dev_pm_ops vc4_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
+ vc4_hdmi_runtime_resume,
+ NULL)
+};
+
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
+ .pm = &vc4_hdmi_pm_ops,
},
};