S: Supported
W: http://ez.analog.com/community/linux-device-drivers
- F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt
+ F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml
F: drivers/iio/adc/ad7768-1.c
ANALOG DEVICES INC AD7780 DRIVER
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
- F: Documentation/devicetree/bindings/usb/cdns-usb3.txt
+ F: Documentation/devicetree/bindings/usb/cdns,usb3.yaml
F: drivers/usb/cdns3/
CADET FM/AM RADIO RECEIVER DRIVER
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
+DRM DRIVER FOR NOVATEK NT36672A PANELS
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
DRM DRIVERS FOR VC4
S: Supported
T: git git://github.com/anholt/linux
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
-S: Maintained
+S: Orphan
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/fb/
S: Maintained
W: http://www.hisilicon.com
- F: Documentation/devicetree/bindings/arm/hisilicon/hisilicon-low-pin-count.txt
+ F: Documentation/devicetree/bindings/arm/hisilicon/low-pin-count.yaml
F: drivers/bus/hisi_lpc.c
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
F: drivers/input/touchscreen/melfas_mip4.c
MELLANOX BLUEFIELD I2C DRIVER
- M: Khalil Blaiech <kblaiech@mellanox.com>
+ M: Khalil Blaiech <kblaiech@nvidia.com>
S: Supported
F: drivers/i2c/busses/i2c-mlxbf.c
F: drivers/mailbox/qcom-ipcc.c
F: include/dt-bindings/mailbox/qcom-ipcc.h
+ QUALCOMM IPQ4019 VQMMC REGULATOR DRIVER
+ S: Maintained
+ F: Documentation/devicetree/bindings/regulator/vqmmc-ipq4019-regulator.yaml
+ F: drivers/regulator/vqmmc-ipq4019-regulator.c
+
QUALCOMM RMNET DRIVER
- F: Documentation/devicetree/bindings/net/renesas,*.txt
F: Documentation/devicetree/bindings/net/renesas,*.yaml
F: drivers/net/ethernet/renesas/
F: include/linux/sh_eth.h
S: Maintained
- F: Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt
+ F: Documentation/devicetree/bindings/phy/hisilicon,hi3660-usb3.yaml
F: drivers/phy/hisilicon/phy-hi3660-usb3.c
USB ISP116X DRIVER
NV_NAVI12_P_A0 = 10,
NV_NAVI14_M_A0 = 20,
NV_SIENNA_CICHLID_P_A0 = 40,
+ NV_DIMGREY_CAVEFISH_P_A0 = 60,
NV_UNKNOWN = 0xFF
};
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
-#define ASICREV_IS_SIENNA_CICHLID_P(eChipRev) ((eChipRev >= NV_SIENNA_CICHLID_P_A0))
+#define ASICREV_IS_SIENNA_CICHLID_P(eChipRev) ((eChipRev >= NV_SIENNA_CICHLID_P_A0) && (eChipRev < NV_DIMGREY_CAVEFISH_P_A0))
+#define ASICREV_IS_DIMGREY_CAVEFISH_P(eChipRev) ((eChipRev >= NV_DIMGREY_CAVEFISH_P_A0) && (eChipRev < NV_UNKNOWN))
+#define GREEN_SARDINE_A0 0xA1
+#ifndef ASICREV_IS_GREEN_SARDINE
+#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
+#endif
+#define FAMILY_VGH 144
+#define DEVICE_ID_VGH_163F 0x163F
+#define VANGOGH_A0 0x01
+#define VANGOGH_UNKNOWN 0xFF
+
+#ifndef ASICREV_IS_VANGOGH
+#define ASICREV_IS_VANGOGH(eChipRev) ((eChipRev >= VANGOGH_A0) && (eChipRev < VANGOGH_UNKNOWN))
#endif
+ #define GREEN_SARDINE_A0 0xA1
+ #ifndef ASICREV_IS_GREEN_SARDINE
+ #define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
+ #endif
/*
* ASIC chip ID
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
break;
- case FORCEWAKE_BLITTER_GEN9_REG:
- ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
+ case FORCEWAKE_GT_GEN9_REG:
+ ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG;
break;
case FORCEWAKE_MEDIA_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
const struct intel_engine_cs *engine =
intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+ if (value != 0 &&
+ !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
return 0;
}
+ /**
+ * FixMe:
+ * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
+ * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
+ * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
+ * these MI_BATCH_BUFFER.
+ * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
+ * PML4 PTE: PAT(0) PCD(1) PWT(1).
+ * The performance is still expected to be low, will need further improvement.
+ */
+ static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+ {
+ u64 pat =
+ GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
+
+ return 0;
+ }
+
static int guc_status_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
- MMIO_D(WM0_PIPEA_ILK, D_ALL);
- MMIO_D(WM0_PIPEB_ILK, D_ALL);
- MMIO_D(WM0_PIPEC_IVB, D_ALL);
+ MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL);
+ MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL);
+ MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL);
MMIO_D(WM1_LP_ILK, D_ALL);
MMIO_D(WM2_LP_ILK, D_ALL);
MMIO_D(WM3_LP_ILK, D_ALL);
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
- MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
- MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+ MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL);
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
NULL, NULL);
MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
return 0;
}
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
+
return 0;
}
if (!pfdev->comp)
return -ENODEV;
+ pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
+
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
err_out1:
pm_runtime_disable(pfdev->dev);
panfrost_device_fini(pfdev);
+ pm_runtime_set_suspended(pfdev->dev);
err_out0:
drm_dev_put(ddev);
return err;
panfrost_gem_shrinker_cleanup(ddev);
pm_runtime_get_sync(pfdev->dev);
- panfrost_device_fini(pfdev);
- pm_runtime_put_sync_suspend(pfdev->dev);
pm_runtime_disable(pfdev->dev);
+ panfrost_device_fini(pfdev);
+ pm_runtime_set_suspended(pfdev->dev);
drm_dev_put(ddev);
return 0;
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
- void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+ void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
- mutex_lock(&bo->mappings.lock);
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
- mutex_unlock(&bo->mappings.lock);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
+ struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
+ obj->base.map_cached = pfdev->coherent;
return &obj->base.base;
}
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
+static vm_fault_t vc4_fault(struct vm_fault *vmf);
+
static const char * const bo_type_names[] = {
"kernel",
"V3D",
return bo;
}
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+ .free = vc4_free_object,
+ .export = vc4_prime_export,
+ .get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .vmap = vc4_prime_vmap,
+ .vunmap = drm_gem_cma_prime_vunmap,
+ .vm_ops = &vc4_vm_ops,
+};
+
/**
* vc4_gem_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
mutex_unlock(&vc4->bo_lock);
+ bo->base.base.funcs = &vc4_gem_object_funcs;
+
return &bo->base.base;
}
}
if (IS_ERR(cma_obj)) {
- struct drm_printer p = drm_info_printer(vc4->dev->dev);
+ struct drm_printer p = drm_info_printer(vc4->base.dev);
DRM_ERROR("Failed to allocate from CMA:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, bo_cache.time_work);
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
mutex_lock(&vc4->bo_lock);
vc4_bo_cache_free_old(dev);
return dmabuf;
}
-vm_fault_t vc4_fault(struct vm_fault *vmf)
+static vm_fault_t vc4_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
return 0;
}
+ static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
- return 0;
+ return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
}
- void vc4_bo_cache_destroy(struct drm_device *dev)
+ static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int i;
kfree(vc4file);
}
-static const struct vm_operations_struct vc4_vm_ops = {
- .fault = vc4_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations vc4_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
#endif
.gem_create_object = vc4_create_object,
- .gem_free_object_unlocked = vc4_free_object,
- .gem_vm_ops = &vc4_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = vc4_prime_export,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
- .gem_prime_vmap = vc4_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = vc4_prime_mmap,
.dumb_create = vc4_dumb_create,
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
- if (!vc4)
- return -ENOMEM;
-
/* If VC4 V3D is missing, don't advertise render nodes. */
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
if (!node || !of_device_is_available(node))
vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
of_node_put(node);
- drm = drm_dev_alloc(&vc4_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
+ if (IS_ERR(vc4))
+ return PTR_ERR(vc4);
+
+ drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- vc4->dev = drm;
- drm->dev_private = vc4;
INIT_LIST_HEAD(&vc4->debugfs_list);
mutex_init(&vc4->bin_bo_lock);
ret = vc4_bo_cache_init(drm);
if (ret)
- goto dev_put;
+ return ret;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
- vc4_gem_init(drm);
+ ret = vc4_gem_init(drm);
+ if (ret)
+ return ret;
ret = component_bind_all(dev, drm);
if (ret)
- goto gem_destroy;
+ return ret;
ret = vc4_plane_create_additional_planes(drm);
if (ret)
unbind_all:
component_unbind_all(dev, drm);
- gem_destroy:
- vc4_gem_destroy(drm);
- drm_mode_config_cleanup(drm);
- vc4_bo_cache_destroy(drm);
- dev_put:
- drm_dev_put(drm);
+
return ret;
}
static void vc4_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
-
- drm_mode_config_cleanup(drm);
-
- drm_atomic_private_obj_fini(&vc4->load_tracker);
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
-
- drm_dev_put(drm);
}
static const struct component_master_ops vc4_drm_ops = {
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
+ #include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
};
struct vc4_dev {
- struct drm_device *dev;
+ struct drm_device base;
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
static inline struct vc4_dev *
to_vc4_dev(struct drm_device *dev)
{
- return (struct vc4_dev *)dev->dev_private;
+ return container_of(dev, struct vc4_dev, base);
}
struct vc4_bo {
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
- void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
extern const struct dma_fence_ops vc4_fence_ops;
/* vc4_gem.c */
- void vc4_gem_init(struct drm_device *dev);
- void vc4_gem_destroy(struct drm_device *dev);
+ int vc4_gem_init(struct drm_device *dev);
int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
-void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state);
+void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
void vc4_hvs_dump_state(struct drm_device *dev);
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
}
void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))