1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
5 #include <drm/drm_syncobj.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/device.h>
11 #include <linux/sched/signal.h>
13 #include "uapi/drm/v3d_drm.h"
16 #include "v3d_trace.h"
19 v3d_init_core(struct v3d_dev *v3d, int core)
21 /* Set OVRTMUOUT, which means that the texture sampler uniform
22 * configuration's tmu output type field is used, instead of
23 * using the hardware default behavior based on the texture
24 * type. If you want the default behavior, you can still put
25 * "2" in the indirect texture state's output_type field.
27 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
29 /* Whenever we flush the L2T cache, we always want to flush
32 V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
33 V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
36 /* Sets invariant state for the HW. */
38 v3d_init_hw_state(struct v3d_dev *v3d)
40 v3d_init_core(v3d, 0);
44 v3d_idle_axi(struct v3d_dev *v3d, int core)
46 V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
48 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
49 (V3D_GMP_STATUS_RD_COUNT_MASK |
50 V3D_GMP_STATUS_WR_COUNT_MASK |
51 V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
52 DRM_ERROR("Failed to wait for safe GMP shutdown\n");
57 v3d_idle_gca(struct v3d_dev *v3d)
62 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
64 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
65 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
66 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
67 DRM_ERROR("Failed to wait for safe GCA shutdown\n");
72 v3d_reset_v3d(struct v3d_dev *v3d)
74 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
76 if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
77 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
78 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
79 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
81 /* GFXH-1383: The SW_INIT may cause a stray write to address 0
82 * of the unit, so reset it to its power-on value here.
84 V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
86 WARN_ON_ONCE(V3D_GET_FIELD(version,
87 V3D_TOP_GR_BRIDGE_MAJOR) != 7);
88 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
89 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
90 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
93 v3d_init_hw_state(v3d);
97 v3d_reset(struct v3d_dev *v3d)
99 struct drm_device *dev = &v3d->drm;
101 DRM_ERROR("Resetting GPU.\n");
102 trace_v3d_reset_begin(dev);
104 /* XXX: only needed for safe powerdown, not reset. */
106 v3d_idle_axi(v3d, 0);
111 v3d_mmu_set_page_table(v3d);
114 trace_v3d_reset_end(dev);
118 v3d_flush_l3(struct v3d_dev *v3d)
121 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
123 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
124 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
127 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
128 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
133 /* Invalidates the (read-only) L2 cache. */
135 v3d_invalidate_l2(struct v3d_dev *v3d, int core)
137 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
143 v3d_invalidate_l1td(struct v3d_dev *v3d, int core)
145 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
146 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
147 V3D_L2TCACTL_L2TFLS), 100)) {
148 DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
152 /* Invalidates texture L2 cachelines */
154 v3d_flush_l2t(struct v3d_dev *v3d, int core)
156 v3d_invalidate_l1td(v3d, core);
158 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
159 V3D_L2TCACTL_L2TFLS |
160 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
161 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
162 V3D_L2TCACTL_L2TFLS), 100)) {
163 DRM_ERROR("Timeout waiting for L2T flush\n");
167 /* Invalidates the slice caches. These are read-only caches. */
169 v3d_invalidate_slices(struct v3d_dev *v3d, int core)
171 V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
172 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
173 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
174 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
175 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
178 /* Invalidates texture L2 cachelines */
180 v3d_invalidate_l2t(struct v3d_dev *v3d, int core)
184 V3D_L2TCACTL_L2TFLS |
185 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM));
186 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
187 V3D_L2TCACTL_L2TFLS), 100)) {
188 DRM_ERROR("Timeout waiting for L2T invalidate\n");
193 v3d_invalidate_caches(struct v3d_dev *v3d)
197 v3d_invalidate_l2(v3d, 0);
198 v3d_invalidate_slices(v3d, 0);
199 v3d_flush_l2t(v3d, 0);
203 v3d_flush_caches(struct v3d_dev *v3d)
205 v3d_invalidate_l1td(v3d, 0);
206 v3d_invalidate_l2t(v3d, 0);
210 v3d_attach_object_fences(struct v3d_exec_info *exec)
212 struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
216 for (i = 0; i < exec->bo_count; i++) {
217 bo = to_v3d_bo(&exec->bo[i]->base);
219 /* XXX: Use shared fences for read-only objects. */
220 reservation_object_add_excl_fence(bo->resv, out_fence);
225 v3d_unlock_bo_reservations(struct drm_device *dev,
226 struct v3d_exec_info *exec,
227 struct ww_acquire_ctx *acquire_ctx)
231 for (i = 0; i < exec->bo_count; i++) {
232 struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
234 ww_mutex_unlock(&bo->resv->lock);
237 ww_acquire_fini(acquire_ctx);
240 /* Takes the reservation lock on all the BOs being referenced, so that
241 * at queue submit time we can update the reservations.
243 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
244 * (all of which are on exec->unref_list). They're entirely private
245 * to v3d, so we don't attach dma-buf fences to them.
248 v3d_lock_bo_reservations(struct drm_device *dev,
249 struct v3d_exec_info *exec,
250 struct ww_acquire_ctx *acquire_ctx)
252 int contended_lock = -1;
256 ww_acquire_init(acquire_ctx, &reservation_ww_class);
259 if (contended_lock != -1) {
260 bo = to_v3d_bo(&exec->bo[contended_lock]->base);
261 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
264 ww_acquire_done(acquire_ctx);
269 for (i = 0; i < exec->bo_count; i++) {
270 if (i == contended_lock)
273 bo = to_v3d_bo(&exec->bo[i]->base);
275 ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
279 for (j = 0; j < i; j++) {
280 bo = to_v3d_bo(&exec->bo[j]->base);
281 ww_mutex_unlock(&bo->resv->lock);
284 if (contended_lock != -1 && contended_lock >= i) {
285 bo = to_v3d_bo(&exec->bo[contended_lock]->base);
287 ww_mutex_unlock(&bo->resv->lock);
290 if (ret == -EDEADLK) {
295 ww_acquire_done(acquire_ctx);
300 ww_acquire_done(acquire_ctx);
302 /* Reserve space for our shared (read-only) fence references,
303 * before we commit the CL to the hardware.
305 for (i = 0; i < exec->bo_count; i++) {
306 bo = to_v3d_bo(&exec->bo[i]->base);
308 ret = reservation_object_reserve_shared(bo->resv);
310 v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
319 * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
320 * referenced by the job.
322 * @file_priv: DRM file for this fd
323 * @exec: V3D job being set up
325 * The command validator needs to reference BOs by their index within
326 * the submitted job's BO list. This does the validation of the job's
327 * BO list and reference counting for the lifetime of the job.
329 * Note that this function doesn't need to unreference the BOs on
330 * failure, because that will happen at v3d_exec_cleanup() time.
333 v3d_cl_lookup_bos(struct drm_device *dev,
334 struct drm_file *file_priv,
335 struct drm_v3d_submit_cl *args,
336 struct v3d_exec_info *exec)
342 exec->bo_count = args->bo_handle_count;
344 if (!exec->bo_count) {
345 /* See comment on bo_index for why we have to check
348 DRM_DEBUG("Rendering requires BOs\n");
352 exec->bo = kvmalloc_array(exec->bo_count,
353 sizeof(struct drm_gem_cma_object *),
354 GFP_KERNEL | __GFP_ZERO);
356 DRM_DEBUG("Failed to allocate validated BO pointers\n");
360 handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL);
363 DRM_DEBUG("Failed to allocate incoming GEM handles\n");
367 if (copy_from_user(handles,
368 (void __user *)(uintptr_t)args->bo_handles,
369 exec->bo_count * sizeof(u32))) {
371 DRM_DEBUG("Failed to copy in GEM handles\n");
375 spin_lock(&file_priv->table_lock);
376 for (i = 0; i < exec->bo_count; i++) {
377 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
380 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
383 spin_unlock(&file_priv->table_lock);
386 drm_gem_object_get(bo);
387 exec->bo[i] = to_v3d_bo(bo);
389 spin_unlock(&file_priv->table_lock);
397 v3d_exec_cleanup(struct kref *ref)
399 struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info,
401 struct v3d_dev *v3d = exec->v3d;
403 struct v3d_bo *bo, *save;
405 dma_fence_put(exec->bin.in_fence);
406 dma_fence_put(exec->render.in_fence);
408 dma_fence_put(exec->bin.done_fence);
409 dma_fence_put(exec->render.done_fence);
411 dma_fence_put(exec->bin_done_fence);
413 for (i = 0; i < exec->bo_count; i++)
414 drm_gem_object_put_unlocked(&exec->bo[i]->base);
417 list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
418 drm_gem_object_put_unlocked(&bo->base);
421 pm_runtime_mark_last_busy(v3d->dev);
422 pm_runtime_put_autosuspend(v3d->dev);
427 void v3d_exec_put(struct v3d_exec_info *exec)
429 kref_put(&exec->refcount, v3d_exec_cleanup);
433 v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
434 struct drm_file *file_priv)
437 struct drm_v3d_wait_bo *args = data;
438 struct drm_gem_object *gem_obj;
440 ktime_t start = ktime_get();
442 unsigned long timeout_jiffies =
443 nsecs_to_jiffies_timeout(args->timeout_ns);
448 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
450 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
453 bo = to_v3d_bo(gem_obj);
455 ret = reservation_object_wait_timeout_rcu(bo->resv,
464 /* Decrement the user's timeout, in case we got interrupted
465 * such that the ioctl will be restarted.
467 delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
468 if (delta_ns < args->timeout_ns)
469 args->timeout_ns -= delta_ns;
471 args->timeout_ns = 0;
473 /* Asked to wait beyond the jiffie/scheduler precision? */
474 if (ret == -ETIME && args->timeout_ns)
477 drm_gem_object_put_unlocked(gem_obj);
483 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
485 * @data: ioctl argument
486 * @file_priv: DRM file for this fd
488 * This is the main entrypoint for userspace to submit a 3D frame to
489 * the GPU. Userspace provides the binner command list (if
490 * applicable), and the kernel sets up the render command list to draw
491 * to the framebuffer described in the ioctl, using the command lists
492 * that the 3D engine's binner will produce.
495 v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
496 struct drm_file *file_priv)
498 struct v3d_dev *v3d = to_v3d_dev(dev);
499 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
500 struct drm_v3d_submit_cl *args = data;
501 struct v3d_exec_info *exec;
502 struct ww_acquire_ctx acquire_ctx;
503 struct drm_syncobj *sync_out;
506 if (args->pad != 0) {
507 DRM_INFO("pad must be zero: %d\n", args->pad);
511 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
515 ret = pm_runtime_get_sync(v3d->dev);
521 kref_init(&exec->refcount);
523 ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
524 0, &exec->bin.in_fence);
528 ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
529 0, &exec->render.in_fence);
533 exec->qma = args->qma;
534 exec->qms = args->qms;
535 exec->qts = args->qts;
536 exec->bin.exec = exec;
537 exec->bin.start = args->bcl_start;
538 exec->bin.end = args->bcl_end;
539 exec->render.exec = exec;
540 exec->render.start = args->rcl_start;
541 exec->render.end = args->rcl_end;
543 INIT_LIST_HEAD(&exec->unref_list);
545 ret = v3d_cl_lookup_bos(dev, file_priv, args, exec);
549 ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
553 mutex_lock(&v3d->sched_lock);
554 if (exec->bin.start != exec->bin.end) {
555 ret = drm_sched_job_init(&exec->bin.base,
556 &v3d_priv->sched_entity[V3D_BIN],
561 exec->bin_done_fence =
562 dma_fence_get(&exec->bin.base.s_fence->finished);
564 kref_get(&exec->refcount); /* put by scheduler job completion */
565 drm_sched_entity_push_job(&exec->bin.base,
566 &v3d_priv->sched_entity[V3D_BIN]);
569 ret = drm_sched_job_init(&exec->render.base,
570 &v3d_priv->sched_entity[V3D_RENDER],
575 kref_get(&exec->refcount); /* put by scheduler job completion */
576 drm_sched_entity_push_job(&exec->render.base,
577 &v3d_priv->sched_entity[V3D_RENDER]);
578 mutex_unlock(&v3d->sched_lock);
580 v3d_attach_object_fences(exec);
582 v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
584 /* Update the return sync object for the */
585 sync_out = drm_syncobj_find(file_priv, args->out_sync);
587 drm_syncobj_replace_fence(sync_out, 0,
588 &exec->render.base.s_fence->finished);
589 drm_syncobj_put(sync_out);
597 mutex_unlock(&v3d->sched_lock);
598 v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
606 v3d_gem_init(struct drm_device *dev)
608 struct v3d_dev *v3d = to_v3d_dev(dev);
609 u32 pt_size = 4096 * 1024;
612 for (i = 0; i < V3D_MAX_QUEUES; i++)
613 v3d->queue[i].fence_context = dma_fence_context_alloc(1);
615 spin_lock_init(&v3d->mm_lock);
616 spin_lock_init(&v3d->job_lock);
617 mutex_init(&v3d->bo_lock);
618 mutex_init(&v3d->reset_lock);
619 mutex_init(&v3d->sched_lock);
621 /* Note: We don't allocate address 0. Various bits of HW
622 * treat 0 as special, such as the occlusion query counters
623 * where 0 means "disabled".
625 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
627 v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
629 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
631 drm_mm_takedown(&v3d->mm);
633 "Failed to allocate page tables. "
634 "Please ensure you have CMA enabled.\n");
638 v3d_init_hw_state(v3d);
639 v3d_mmu_set_page_table(v3d);
641 ret = v3d_sched_init(v3d);
643 drm_mm_takedown(&v3d->mm);
644 dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
652 v3d_gem_destroy(struct drm_device *dev)
654 struct v3d_dev *v3d = to_v3d_dev(dev);
658 /* Waiting for exec to finish would need to be done before
661 WARN_ON(v3d->bin_job);
662 WARN_ON(v3d->render_job);
664 drm_mm_takedown(&v3d->mm);
666 dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);