1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2013 Red Hat
10 #include <linux/adreno-smmu-priv.h>
11 #include <linux/clk.h>
12 #include <linux/devfreq.h>
13 #include <linux/interconnect.h>
14 #include <linux/pm_opp.h>
15 #include <linux/regulator/consumer.h>
18 #include "msm_fence.h"
19 #include "msm_ringbuffer.h"
22 struct msm_gem_submit;
23 struct msm_gpu_perfcntr;
25 struct msm_file_private;
27 struct msm_gpu_config {
29 unsigned int nr_rings;
32 /* So far, with hardware that I've seen to date, we can have:
33 * + zero, one, or two z180 2d cores
34 * + a3xx or a2xx 3d core, which share a common CP (the firmware
35 * for the CP seems to implement some different PM4 packet types
36 * but the basics of cmdstream submission are the same)
38 * Which means that the eventual complete "class" hierarchy, once
39 * support for all past and present hw is in place, becomes:
46 struct msm_gpu_funcs {
47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
48 uint32_t param, uint64_t *value, uint32_t *len);
49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
50 uint32_t param, uint64_t value, uint32_t len);
51 int (*hw_init)(struct msm_gpu *gpu);
54 * @ucode_load: Optional hook to upload fw to GEM objs
56 int (*ucode_load)(struct msm_gpu *gpu);
58 int (*pm_suspend)(struct msm_gpu *gpu);
59 int (*pm_resume)(struct msm_gpu *gpu);
60 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
61 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
62 irqreturn_t (*irq)(struct msm_gpu *irq);
63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
64 void (*recover)(struct msm_gpu *gpu);
65 void (*destroy)(struct msm_gpu *gpu);
66 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
67 /* show GPU status in debugfs: */
68 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
69 struct drm_printer *p);
70 /* for generation specific debugfs: */
71 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
73 /* note: gpu_busy() can assume that we have been pm_resumed */
74 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
75 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
76 int (*gpu_state_put)(struct msm_gpu_state *state);
77 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
78 /* note: gpu_set_freq() can assume that we have been pm_resumed */
79 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
81 struct msm_gem_address_space *(*create_address_space)
82 (struct msm_gpu *gpu, struct platform_device *pdev);
83 struct msm_gem_address_space *(*create_private_address_space)
84 (struct msm_gpu *gpu);
85 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
88 * progress: Has the GPU made progress?
90 * Return true if GPU position in cmdstream has advanced (or changed)
91 * since the last call. To avoid false negatives, this should account
92 * for cmdstream that is buffered in this FIFO upstream of the CP fw.
94 bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
97 /* Additional state for iommu faults: */
98 struct msm_gpu_fault_info {
107 * struct msm_gpu_devfreq - devfreq related state
109 struct msm_gpu_devfreq {
110 /** devfreq: devfreq instance */
111 struct devfreq *devfreq;
113 /** lock: lock for "suspended", "busy_cycles", and "time" */
119 * Shadow frequency used while the GPU is idle. From the PoV of
120 * the devfreq governor, we are continuing to sample busyness and
121 * adjust frequency while the GPU is idle, but we use this shadow
122 * value as the GPU is actually clamped to minimum frequency while
125 unsigned long idle_freq;
130 * A PM QoS constraint to boost min freq for a period of time
131 * until the boost expires.
133 struct dev_pm_qos_request boost_freq;
136 * busy_cycles: Last busy counter value, for calculating elapsed busy
137 * cycles since last sampling period.
141 /** time: Time of last sampling period. */
144 /** idle_time: Time of last transition to idle: */
150 * Used to delay clamping to idle freq on active->idle transition.
152 struct msm_hrtimer_work idle_work;
157 * Used to reset the boost_constraint after the boost period has
160 struct msm_hrtimer_work boost_work;
162 /** suspended: tracks if we're suspended */
168 struct drm_device *dev;
169 struct platform_device *pdev;
170 const struct msm_gpu_funcs *funcs;
172 struct adreno_smmu_priv adreno_smmu;
174 /* performance counters (hw & sw): */
175 spinlock_t perf_lock;
176 bool perfcntr_active;
181 uint32_t totaltime, activetime; /* sw counters */
182 uint32_t last_cntrs[5]; /* hw counters */
183 const struct msm_gpu_perfcntr *perfcntrs;
184 uint32_t num_perfcntrs;
186 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
192 * The count of contexts that have enabled system profiling.
194 refcount_t sysprof_active;
199 * The ctx->seqno value of the last context to submit rendering,
200 * and the one with current pgtables installed (for generations
201 * that support per-context pgtables). Tracked by seqno rather
202 * than pointer value to avoid dangling pointers, and cases where
203 * a ctx can be freed and a new one created with the same address.
210 * General lock for serializing all the gpu things.
212 * TODO move to per-ring locking where feasible (ie. submit/retire
220 * The number of submitted but not yet retired submits, used to
221 * determine transitions between active and idle.
223 * Protected by active_lock
227 /** lock: protects active_submits and idle/active transitions */
228 struct mutex active_lock;
230 /* does gpu need hw_init? */
234 * global_faults: number of GPU hangs not attributed to a particular
242 struct msm_gem_address_space *aspace;
245 struct regulator *gpu_reg, *gpu_cx;
246 struct clk_bulk_data *grp_clks;
248 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
251 /* Hang and Inactivity Detection:
253 #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
255 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
256 #define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
257 struct timer_list hangcheck_timer;
259 /* Fault info for most recent iova fault: */
260 struct msm_gpu_fault_info fault_info;
262 /* work for handling GPU ioval faults: */
263 struct kthread_work fault_work;
265 /* work for handling GPU recovery: */
266 struct kthread_work recover_work;
268 /** retire_event: notified when submits are retired: */
269 wait_queue_head_t retire_event;
271 /* work for handling active-list retiring: */
272 struct kthread_work retire_work;
274 /* worker for retire/recover: */
275 struct kthread_worker *worker;
277 struct drm_gem_object *memptrs_bo;
279 struct msm_gpu_devfreq devfreq;
281 uint32_t suspend_count;
283 struct msm_gpu_state *crashstate;
285 /* True if the hardware supports expanded apriv (a650 and newer) */
288 struct thermal_cooling_device *cooling;
291 static inline struct msm_gpu *dev_to_gpu(struct device *dev)
293 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
298 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
301 /* It turns out that all targets use the same ringbuffer size */
302 #define MSM_GPU_RINGBUFFER_SZ SZ_32K
303 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
305 #define MSM_GPU_RB_CNTL_DEFAULT \
306 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
307 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
309 static inline bool msm_gpu_active(struct msm_gpu *gpu)
313 for (i = 0; i < gpu->nr_rings; i++) {
314 struct msm_ringbuffer *ring = gpu->rb[i];
316 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
324 * The select_reg and select_val are just there for the benefit of the child
325 * class that actually enables the perf counter.. but msm_gpu base class
326 * will handle sampling/displaying the counters.
329 struct msm_gpu_perfcntr {
337 * The number of priority levels provided by drm gpu scheduler. The
338 * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
339 * cases, so we don't use it (no need for kernel generated jobs).
341 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
344 * struct msm_file_private - per-drm_file context
346 * @queuelock: synchronizes access to submitqueues list
347 * @submitqueues: list of &msm_gpu_submitqueue created by userspace
348 * @queueid: counter incremented each time a submitqueue is created,
349 * used to assign &msm_gpu_submitqueue.id
350 * @aspace: the per-process GPU address-space
351 * @ref: reference count
352 * @seqno: unique per process seqno
354 struct msm_file_private {
356 struct list_head submitqueues;
358 struct msm_gem_address_space *aspace;
365 * The value of MSM_PARAM_SYSPROF set by userspace. This is
366 * intended to be used by system profiling tools like Mesa's
367 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
369 * Setting a value of 1 will preserve performance counters across
370 * context switches. Setting a value of 2 will in addition
371 * suppress suspend. (Performance counters lose state across
372 * power collapse, which is undesirable for profiling in some
375 * The value automatically reverts to zero when the drm device
381 * comm: Overridden task comm, see MSM_PARAM_COMM
383 * Accessed under msm_gpu::lock
388 * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
390 * Accessed under msm_gpu::lock
397 * The total (cumulative) elapsed time GPU was busy with rendering
398 * from this context in ns.
405 * The total (cumulative) GPU cycles elapsed attributed to this
413 * Table of per-priority-level sched entities used by submitqueues
414 * associated with this &drm_file. Because some userspace apps
415 * make assumptions about rendering from multiple gl contexts
416 * (of the same priority) within the process happening in FIFO
417 * order without requiring any fencing beyond MakeCurrent(), we
418 * create at most one &drm_sched_entity per-process per-priority-
421 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
425 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
427 * @gpu: the gpu instance
428 * @prio: the userspace priority level
429 * @ring_nr: [out] the ringbuffer the userspace priority maps to
430 * @sched_prio: [out] the gpu scheduler priority level which the userspace
433 * With drm/scheduler providing it's own level of prioritization, our total
434 * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
435 * Each ring is associated with it's own scheduler instance. However, our
436 * UABI is that lower numerical values are higher priority. So mapping the
437 * single userspace priority level into ring_nr and sched_prio takes some
438 * care. The userspace provided priority (when a submitqueue is created)
439 * is mapped to ring nr and scheduler priority as such:
441 * ring_nr = userspace_prio / NR_SCHED_PRIORITIES
442 * sched_prio = NR_SCHED_PRIORITIES -
443 * (userspace_prio % NR_SCHED_PRIORITIES) - 1
445 * This allows generations without preemption (nr_rings==1) to have some
446 * amount of prioritization, and provides more priority levels for gens
447 * that do have preemption.
449 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
450 unsigned *ring_nr, enum drm_sched_priority *sched_prio)
454 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
456 /* invert sched priority to map to higher-numeric-is-higher-
457 * priority convention
459 sp = NR_SCHED_PRIORITIES - sp - 1;
461 if (rn >= gpu->nr_rings)
471 * struct msm_gpu_submitqueues - Userspace created context.
473 * A submitqueue is associated with a gl context or vk queue (or equiv)
476 * @id: userspace id for the submitqueue, unique within the drm_file
477 * @flags: userspace flags for the submitqueue, specified at creation
478 * (currently unusued)
479 * @ring_nr: the ringbuffer used by this submitqueue, which is determined
480 * by the submitqueue's priority
481 * @faults: the number of GPU hangs associated with this submitqueue
482 * @last_fence: the sequence number of the last allocated fence (for error
484 * @ctx: the per-drm_file context associated with the submitqueue (ie.
485 * which set of pgtables do submits jobs associated with the
487 * @node: node in the context's list of submitqueues
488 * @fence_idr: maps fence-id to dma_fence for userspace visible fence
489 * seqno, protected by submitqueue lock
490 * @idr_lock: for serializing access to fence_idr
491 * @lock: submitqueue lock for serializing submits on a queue
492 * @ref: reference count
493 * @entity: the submit job-queue
495 struct msm_gpu_submitqueue {
501 struct msm_file_private *ctx;
502 struct list_head node;
503 struct idr fence_idr;
504 struct spinlock idr_lock;
507 struct drm_sched_entity *entity;
510 struct msm_gpu_state_bo {
518 struct msm_gpu_state {
520 struct timespec64 time;
531 } ring[MSM_GPU_MAX_RINGS];
541 struct msm_gpu_fault_info fault_info;
544 struct msm_gpu_state_bo *bos;
547 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
549 msm_writel(data, gpu->mmio + (reg << 2));
552 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
554 return msm_readl(gpu->mmio + (reg << 2));
557 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
559 msm_rmw(gpu->mmio + (reg << 2), mask, or);
562 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
567 * Why not a readq here? Two reasons: 1) many of the LO registers are
568 * not quad word aligned and 2) the GPU hardware designers have a bit
569 * of a history of putting registers where they fit, especially in
570 * spins. The longer a GPU family goes the higher the chance that
571 * we'll get burned. We could do a series of validity checks if we
572 * wanted to, but really is a readq() that much better? Nah.
576 * For some lo/hi registers (like perfcounters), the hi value is latched
577 * when the lo is read, so make sure to read the lo first to trigger
580 val = (u64) msm_readl(gpu->mmio + (reg << 2));
581 val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
586 static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
588 /* Why not a writeq here? Read the screed above */
589 msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
590 msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
593 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
594 int msm_gpu_pm_resume(struct msm_gpu *gpu);
596 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
597 struct drm_printer *p);
599 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
600 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
602 int msm_submitqueue_create(struct drm_device *drm,
603 struct msm_file_private *ctx,
604 u32 prio, u32 flags, u32 *id);
605 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
606 struct drm_msm_submitqueue_query *args);
607 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
608 void msm_submitqueue_close(struct msm_file_private *ctx);
610 void msm_submitqueue_destroy(struct kref *kref);
612 int msm_file_private_set_sysprof(struct msm_file_private *ctx,
613 struct msm_gpu *gpu, int sysprof);
614 void __msm_file_private_destroy(struct kref *kref);
616 static inline void msm_file_private_put(struct msm_file_private *ctx)
618 kref_put(&ctx->ref, __msm_file_private_destroy);
621 static inline struct msm_file_private *msm_file_private_get(
622 struct msm_file_private *ctx)
628 void msm_devfreq_init(struct msm_gpu *gpu);
629 void msm_devfreq_cleanup(struct msm_gpu *gpu);
630 void msm_devfreq_resume(struct msm_gpu *gpu);
631 void msm_devfreq_suspend(struct msm_gpu *gpu);
632 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
633 void msm_devfreq_active(struct msm_gpu *gpu);
634 void msm_devfreq_idle(struct msm_gpu *gpu);
636 int msm_gpu_hw_init(struct msm_gpu *gpu);
638 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
639 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
640 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
641 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
643 void msm_gpu_retire(struct msm_gpu *gpu);
644 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
646 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
647 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
648 const char *name, struct msm_gpu_config *config);
650 struct msm_gem_address_space *
651 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
653 void msm_gpu_cleanup(struct msm_gpu *gpu);
655 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
656 void __init adreno_register(void);
657 void __exit adreno_unregister(void);
659 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
662 kref_put(&queue->ref, msm_submitqueue_destroy);
665 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
667 struct msm_gpu_state *state = NULL;
669 mutex_lock(&gpu->lock);
671 if (gpu->crashstate) {
672 kref_get(&gpu->crashstate->ref);
673 state = gpu->crashstate;
676 mutex_unlock(&gpu->lock);
681 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
683 mutex_lock(&gpu->lock);
685 if (gpu->crashstate) {
686 if (gpu->funcs->gpu_state_put(gpu->crashstate))
687 gpu->crashstate = NULL;
690 mutex_unlock(&gpu->lock);
694 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
695 * support expanded privileges
697 #define check_apriv(gpu, flags) \
698 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
701 #endif /* __MSM_GPU_H__ */