1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2013 Red Hat
7 #ifndef __MSM_RINGBUFFER_H__
8 #define __MSM_RINGBUFFER_H__
10 #include "drm/gpu_scheduler.h"
13 #define rbmemptr(ring, member) \
14 ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
16 #define rbmemptr_stats(ring, index, member) \
17 (rbmemptr((ring), stats) + \
18 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
19 offsetof(struct msm_gpu_submit_stats, member))
21 struct msm_gpu_submit_stats {
28 #define MSM_GPU_SUBMIT_STATS_COUNT 64
30 struct msm_rbmemptrs {
31 volatile uint32_t rptr;
32 volatile uint32_t fence;
33 /* Introduced on A7xx */
34 volatile uint32_t bv_fence;
36 volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
41 uint64_t ib1_base, ib2_base;
42 uint32_t ib1_rem, ib2_rem;
45 struct msm_ringbuffer {
48 struct drm_gem_object *bo;
49 uint32_t *start, *end, *cur, *next;
52 * The job scheduler for this ring.
54 struct drm_gpu_scheduler sched;
57 * List of in-flight submits on this ring. Protected by submit_lock.
59 * Currently just submits that are already written into the ring, not
60 * submits that are still in drm_gpu_scheduler's queues. At a later
61 * step we could probably move to letting drm_gpu_scheduler manage
62 * hangcheck detection and keep track of submit jobs that are in-
65 struct list_head submits;
66 spinlock_t submit_lock;
69 uint32_t hangcheck_fence;
70 struct msm_rbmemptrs *memptrs;
71 uint64_t memptrs_iova;
72 struct msm_fence_context *fctx;
75 * hangcheck_progress_retries:
77 * The number of extra hangcheck duration cycles that we have given
78 * due to it appearing that the GPU is making forward progress.
80 * For GPU generations which support progress detection (see.
81 * msm_gpu_funcs::progress()), if the GPU appears to be making progress
82 * (ie. the CP has advanced in the command stream, we'll allow up to
83 * DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
84 * before killing the job. But to detect progress we need two sample
85 * points, so the duration of the hangcheck timer is halved. In other
86 * words we'll let the submit run for up to:
88 * (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
90 int hangcheck_progress_retries;
93 * last_cp_state: The state of the CP at the last call to gpu->progress()
95 struct msm_cp_state last_cp_state;
98 * preempt_lock protects preemption and serializes wptr updates against
99 * preemption. Can be aquired from irq context.
101 spinlock_t preempt_lock;
104 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
105 void *memptrs, uint64_t memptrs_iova);
106 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
108 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
111 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
114 * ring->next points to the current command being written - it won't be
115 * committed as ring->cur until the flush
117 if (ring->next == ring->end)
118 ring->next = ring->start;
119 *(ring->next++) = data;
122 #endif /* __MSM_RINGBUFFER_H__ */