1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
10 #include "msm_fence.h"
11 #include "msm_gpu_trace.h"
12 #include "adreno/adreno_gpu.h"
14 #include <generated/utsrelease.h>
15 #include <linux/string_helpers.h>
16 #include <linux/devfreq.h>
17 #include <linux/devfreq_cooling.h>
18 #include <linux/devcoredump.h>
19 #include <linux/sched/task.h>
25 static int msm_devfreq_target(struct device *dev, unsigned long *freq,
28 struct msm_gpu *gpu = dev_to_gpu(dev);
29 struct dev_pm_opp *opp;
31 opp = devfreq_recommended_opp(dev, freq, flags);
36 trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
38 if (gpu->funcs->gpu_set_freq)
39 gpu->funcs->gpu_set_freq(gpu, opp);
41 clk_set_rate(gpu->core_clk, *freq);
48 static int msm_devfreq_get_dev_status(struct device *dev,
49 struct devfreq_dev_status *status)
51 struct msm_gpu *gpu = dev_to_gpu(dev);
54 if (gpu->funcs->gpu_get_freq)
55 status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
57 status->current_frequency = clk_get_rate(gpu->core_clk);
59 status->busy_time = gpu->funcs->gpu_busy(gpu);
62 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
63 gpu->devfreq.time = time;
68 static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
70 struct msm_gpu *gpu = dev_to_gpu(dev);
72 if (gpu->funcs->gpu_get_freq)
73 *freq = gpu->funcs->gpu_get_freq(gpu);
75 *freq = clk_get_rate(gpu->core_clk);
80 static struct devfreq_dev_profile msm_devfreq_profile = {
82 .target = msm_devfreq_target,
83 .get_dev_status = msm_devfreq_get_dev_status,
84 .get_cur_freq = msm_devfreq_get_cur_freq,
87 static void msm_devfreq_init(struct msm_gpu *gpu)
89 /* We need target support to do devfreq */
90 if (!gpu->funcs->gpu_busy)
93 msm_devfreq_profile.initial_freq = gpu->fast_rate;
96 * Don't set the freq_table or max_state and let devfreq build the table
98 * After a deferred probe, these may have be left to non-zero values,
99 * so set them back to zero before creating the devfreq device
101 msm_devfreq_profile.freq_table = NULL;
102 msm_devfreq_profile.max_state = 0;
104 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
105 &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
108 if (IS_ERR(gpu->devfreq.devfreq)) {
109 DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
110 gpu->devfreq.devfreq = NULL;
114 devfreq_suspend_device(gpu->devfreq.devfreq);
116 gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
117 gpu->devfreq.devfreq);
118 if (IS_ERR(gpu->cooling)) {
119 DRM_DEV_ERROR(&gpu->pdev->dev,
120 "Couldn't register GPU cooling device\n");
125 static int enable_pwrrail(struct msm_gpu *gpu)
127 struct drm_device *dev = gpu->dev;
131 ret = regulator_enable(gpu->gpu_reg);
133 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
139 ret = regulator_enable(gpu->gpu_cx);
141 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
149 static int disable_pwrrail(struct msm_gpu *gpu)
152 regulator_disable(gpu->gpu_cx);
154 regulator_disable(gpu->gpu_reg);
158 static int enable_clk(struct msm_gpu *gpu)
160 if (gpu->core_clk && gpu->fast_rate)
161 clk_set_rate(gpu->core_clk, gpu->fast_rate);
163 /* Set the RBBM timer rate to 19.2Mhz */
164 if (gpu->rbbmtimer_clk)
165 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
167 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
170 static int disable_clk(struct msm_gpu *gpu)
172 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
175 * Set the clock to a deliberately low rate. On older targets the clock
176 * speed had to be non zero to avoid problems. On newer targets this
177 * will be rounded down to zero anyway so it all works out.
180 clk_set_rate(gpu->core_clk, 27000000);
182 if (gpu->rbbmtimer_clk)
183 clk_set_rate(gpu->rbbmtimer_clk, 0);
188 static int enable_axi(struct msm_gpu *gpu)
190 return clk_prepare_enable(gpu->ebi1_clk);
193 static int disable_axi(struct msm_gpu *gpu)
195 clk_disable_unprepare(gpu->ebi1_clk);
199 void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
201 gpu->devfreq.busy_cycles = 0;
202 gpu->devfreq.time = ktime_get();
204 devfreq_resume_device(gpu->devfreq.devfreq);
207 int msm_gpu_pm_resume(struct msm_gpu *gpu)
211 DBG("%s", gpu->name);
212 trace_msm_gpu_resume(0);
214 ret = enable_pwrrail(gpu);
218 ret = enable_clk(gpu);
222 ret = enable_axi(gpu);
226 msm_gpu_resume_devfreq(gpu);
228 gpu->needs_hw_init = true;
233 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
237 DBG("%s", gpu->name);
238 trace_msm_gpu_suspend(0);
240 devfreq_suspend_device(gpu->devfreq.devfreq);
242 ret = disable_axi(gpu);
246 ret = disable_clk(gpu);
250 ret = disable_pwrrail(gpu);
254 gpu->suspend_count++;
259 int msm_gpu_hw_init(struct msm_gpu *gpu)
263 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
265 if (!gpu->needs_hw_init)
268 disable_irq(gpu->irq);
269 ret = gpu->funcs->hw_init(gpu);
271 gpu->needs_hw_init = false;
272 enable_irq(gpu->irq);
277 static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
280 struct msm_gem_submit *submit;
282 spin_lock(&ring->submit_lock);
283 list_for_each_entry(submit, &ring->submits, node) {
284 if (submit->seqno > fence)
287 msm_update_fence(submit->ring->fctx,
288 submit->fence->seqno);
290 spin_unlock(&ring->submit_lock);
293 #ifdef CONFIG_DEV_COREDUMP
294 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
295 size_t count, void *data, size_t datalen)
297 struct msm_gpu *gpu = data;
298 struct drm_print_iterator iter;
299 struct drm_printer p;
300 struct msm_gpu_state *state;
302 state = msm_gpu_crashstate_get(gpu);
311 p = drm_coredump_printer(&iter);
313 drm_printf(&p, "---\n");
314 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
315 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
316 drm_printf(&p, "time: %lld.%09ld\n",
317 state->time.tv_sec, state->time.tv_nsec);
319 drm_printf(&p, "comm: %s\n", state->comm);
321 drm_printf(&p, "cmdline: %s\n", state->cmd);
323 gpu->funcs->show(gpu, state, &p);
325 msm_gpu_crashstate_put(gpu);
327 return count - iter.remain;
330 static void msm_gpu_devcoredump_free(void *data)
332 struct msm_gpu *gpu = data;
334 msm_gpu_crashstate_put(gpu);
337 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
338 struct msm_gem_object *obj, u64 iova, u32 flags)
340 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
342 /* Don't record write only objects */
343 state_bo->size = obj->base.size;
344 state_bo->iova = iova;
346 /* Only store data for non imported buffer objects marked for read */
347 if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
350 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
354 msm_gem_lock(&obj->base);
355 ptr = msm_gem_get_vaddr_active(&obj->base);
356 msm_gem_unlock(&obj->base);
358 kvfree(state_bo->data);
359 state_bo->data = NULL;
363 memcpy(state_bo->data, ptr, obj->base.size);
364 msm_gem_put_vaddr(&obj->base);
370 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
371 struct msm_gem_submit *submit, char *comm, char *cmd)
373 struct msm_gpu_state *state;
375 /* Check if the target supports capturing crash state */
376 if (!gpu->funcs->gpu_state_get)
379 /* Only save one crash state at a time */
383 state = gpu->funcs->gpu_state_get(gpu);
384 if (IS_ERR_OR_NULL(state))
387 /* Fill in the additional crash state information */
388 state->comm = kstrdup(comm, GFP_KERNEL);
389 state->cmd = kstrdup(cmd, GFP_KERNEL);
390 state->fault_info = gpu->fault_info;
395 /* count # of buffers to dump: */
396 for (i = 0; i < submit->nr_bos; i++)
397 if (should_dump(submit, i))
399 /* always dump cmd bo's, but don't double count them: */
400 for (i = 0; i < submit->nr_cmds; i++)
401 if (!should_dump(submit, submit->cmd[i].idx))
404 state->bos = kcalloc(nr,
405 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
407 for (i = 0; i < submit->nr_bos; i++) {
408 if (should_dump(submit, i)) {
409 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
410 submit->bos[i].iova, submit->bos[i].flags);
414 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
415 int idx = submit->cmd[i].idx;
417 if (!should_dump(submit, submit->cmd[i].idx)) {
418 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
419 submit->bos[idx].iova, submit->bos[idx].flags);
424 /* Set the active crash state to be dumped on failure */
425 gpu->crashstate = state;
427 /* FIXME: Release the crashstate if this errors out? */
428 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
429 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
432 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
433 struct msm_gem_submit *submit, char *comm, char *cmd)
439 * Hangcheck detection for locked gpu:
442 static struct msm_gem_submit *
443 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
445 struct msm_gem_submit *submit;
447 spin_lock(&ring->submit_lock);
448 list_for_each_entry(submit, &ring->submits, node) {
449 if (submit->seqno == fence) {
450 spin_unlock(&ring->submit_lock);
454 spin_unlock(&ring->submit_lock);
459 static void retire_submits(struct msm_gpu *gpu);
461 static void recover_worker(struct kthread_work *work)
463 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
464 struct drm_device *dev = gpu->dev;
465 struct msm_drm_private *priv = dev->dev_private;
466 struct msm_gem_submit *submit;
467 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
468 char *comm = NULL, *cmd = NULL;
471 mutex_lock(&dev->struct_mutex);
473 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
475 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
477 struct task_struct *task;
479 /* Increment the fault counts */
480 gpu->global_faults++;
481 submit->queue->faults++;
483 task = get_pid_task(submit->pid, PIDTYPE_PID);
485 comm = kstrdup(task->comm, GFP_KERNEL);
486 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
487 put_task_struct(task);
490 /* msm_rd_dump_submit() needs bo locked to dump: */
491 for (i = 0; i < submit->nr_bos; i++)
492 msm_gem_lock(&submit->bos[i].obj->base);
495 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
496 gpu->name, comm, cmd);
498 msm_rd_dump_submit(priv->hangrd, submit,
499 "offending task: %s (%s)", comm, cmd);
501 msm_rd_dump_submit(priv->hangrd, submit, NULL);
504 for (i = 0; i < submit->nr_bos; i++)
505 msm_gem_unlock(&submit->bos[i].obj->base);
508 /* Record the crash state */
509 pm_runtime_get_sync(&gpu->pdev->dev);
510 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
511 pm_runtime_put_sync(&gpu->pdev->dev);
517 * Update all the rings with the latest and greatest fence.. this
518 * needs to happen after msm_rd_dump_submit() to ensure that the
519 * bo's referenced by the offending submit are still around.
521 for (i = 0; i < gpu->nr_rings; i++) {
522 struct msm_ringbuffer *ring = gpu->rb[i];
524 uint32_t fence = ring->memptrs->fence;
527 * For the current (faulting?) ring/submit advance the fence by
528 * one more to clear the faulting submit
530 if (ring == cur_ring)
533 update_fences(gpu, ring, fence);
536 if (msm_gpu_active(gpu)) {
537 /* retire completed submits, plus the one that hung: */
540 pm_runtime_get_sync(&gpu->pdev->dev);
541 gpu->funcs->recover(gpu);
542 pm_runtime_put_sync(&gpu->pdev->dev);
545 * Replay all remaining submits starting with highest priority
548 for (i = 0; i < gpu->nr_rings; i++) {
549 struct msm_ringbuffer *ring = gpu->rb[i];
551 spin_lock(&ring->submit_lock);
552 list_for_each_entry(submit, &ring->submits, node)
553 gpu->funcs->submit(gpu, submit);
554 spin_unlock(&ring->submit_lock);
558 mutex_unlock(&dev->struct_mutex);
563 static void fault_worker(struct kthread_work *work)
565 struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
566 struct drm_device *dev = gpu->dev;
567 struct msm_gem_submit *submit;
568 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
569 char *comm = NULL, *cmd = NULL;
571 mutex_lock(&dev->struct_mutex);
573 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
574 if (submit && submit->fault_dumped)
578 struct task_struct *task;
580 task = get_pid_task(submit->pid, PIDTYPE_PID);
582 comm = kstrdup(task->comm, GFP_KERNEL);
583 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
584 put_task_struct(task);
588 * When we get GPU iova faults, we can get 1000s of them,
589 * but we really only want to log the first one.
591 submit->fault_dumped = true;
594 /* Record the crash state */
595 pm_runtime_get_sync(&gpu->pdev->dev);
596 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
597 pm_runtime_put_sync(&gpu->pdev->dev);
603 memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
604 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
606 mutex_unlock(&dev->struct_mutex);
609 static void hangcheck_timer_reset(struct msm_gpu *gpu)
611 struct msm_drm_private *priv = gpu->dev->dev_private;
612 mod_timer(&gpu->hangcheck_timer,
613 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
616 static void hangcheck_handler(struct timer_list *t)
618 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
619 struct drm_device *dev = gpu->dev;
620 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
621 uint32_t fence = ring->memptrs->fence;
623 if (fence != ring->hangcheck_fence) {
624 /* some progress has been made.. ya! */
625 ring->hangcheck_fence = fence;
626 } else if (fence < ring->seqno) {
627 /* no progress and not done.. hung! */
628 ring->hangcheck_fence = fence;
629 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
630 gpu->name, ring->id);
631 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
633 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
634 gpu->name, ring->seqno);
636 kthread_queue_work(gpu->worker, &gpu->recover_work);
639 /* if still more pending work, reset the hangcheck timer: */
640 if (ring->seqno > ring->hangcheck_fence)
641 hangcheck_timer_reset(gpu);
643 /* workaround for missing irq: */
644 kthread_queue_work(gpu->worker, &gpu->retire_work);
648 * Performance Counters:
651 /* called under perf_lock */
652 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
654 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
655 int i, n = min(ncntrs, gpu->num_perfcntrs);
657 /* read current values: */
658 for (i = 0; i < gpu->num_perfcntrs; i++)
659 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
662 for (i = 0; i < n; i++)
663 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
665 /* save current values: */
666 for (i = 0; i < gpu->num_perfcntrs; i++)
667 gpu->last_cntrs[i] = current_cntrs[i];
672 static void update_sw_cntrs(struct msm_gpu *gpu)
678 spin_lock_irqsave(&gpu->perf_lock, flags);
679 if (!gpu->perfcntr_active)
683 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
685 gpu->totaltime += elapsed;
686 if (gpu->last_sample.active)
687 gpu->activetime += elapsed;
689 gpu->last_sample.active = msm_gpu_active(gpu);
690 gpu->last_sample.time = time;
693 spin_unlock_irqrestore(&gpu->perf_lock, flags);
696 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
700 pm_runtime_get_sync(&gpu->pdev->dev);
702 spin_lock_irqsave(&gpu->perf_lock, flags);
703 /* we could dynamically enable/disable perfcntr registers too.. */
704 gpu->last_sample.active = msm_gpu_active(gpu);
705 gpu->last_sample.time = ktime_get();
706 gpu->activetime = gpu->totaltime = 0;
707 gpu->perfcntr_active = true;
708 update_hw_cntrs(gpu, 0, NULL);
709 spin_unlock_irqrestore(&gpu->perf_lock, flags);
712 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
714 gpu->perfcntr_active = false;
715 pm_runtime_put_sync(&gpu->pdev->dev);
718 /* returns -errno or # of cntrs sampled */
719 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
720 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
725 spin_lock_irqsave(&gpu->perf_lock, flags);
727 if (!gpu->perfcntr_active) {
732 *activetime = gpu->activetime;
733 *totaltime = gpu->totaltime;
735 gpu->activetime = gpu->totaltime = 0;
737 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
740 spin_unlock_irqrestore(&gpu->perf_lock, flags);
746 * Cmdstream submission/retirement:
749 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
750 struct msm_gem_submit *submit)
752 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
753 volatile struct msm_gpu_submit_stats *stats;
754 u64 elapsed, clock = 0;
757 stats = &ring->memptrs->stats[index];
758 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
759 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
760 do_div(elapsed, 192);
762 /* Calculate the clock frequency from the number of CP cycles */
764 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
765 do_div(clock, elapsed);
768 trace_msm_gpu_submit_retired(submit, elapsed, clock,
769 stats->alwayson_start, stats->alwayson_end);
771 for (i = 0; i < submit->nr_bos; i++) {
772 struct drm_gem_object *obj = &submit->bos[i].obj->base;
775 msm_gem_active_put(obj);
776 msm_gem_unpin_iova_locked(obj, submit->aspace);
778 drm_gem_object_put(obj);
781 pm_runtime_mark_last_busy(&gpu->pdev->dev);
782 pm_runtime_put_autosuspend(&gpu->pdev->dev);
784 spin_lock(&ring->submit_lock);
785 list_del(&submit->node);
786 spin_unlock(&ring->submit_lock);
788 msm_gem_submit_put(submit);
791 static void retire_submits(struct msm_gpu *gpu)
795 /* Retire the commits starting with highest priority */
796 for (i = 0; i < gpu->nr_rings; i++) {
797 struct msm_ringbuffer *ring = gpu->rb[i];
800 struct msm_gem_submit *submit = NULL;
802 spin_lock(&ring->submit_lock);
803 submit = list_first_entry_or_null(&ring->submits,
804 struct msm_gem_submit, node);
805 spin_unlock(&ring->submit_lock);
808 * If no submit, we are done. If submit->fence hasn't
809 * been signalled, then later submits are not signalled
810 * either, so we are also done.
812 if (submit && dma_fence_is_signaled(submit->fence)) {
813 retire_submit(gpu, ring, submit);
821 static void retire_worker(struct kthread_work *work)
823 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
826 for (i = 0; i < gpu->nr_rings; i++)
827 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
832 /* call from irq handler to schedule work to retire bo's */
833 void msm_gpu_retire(struct msm_gpu *gpu)
835 kthread_queue_work(gpu->worker, &gpu->retire_work);
836 update_sw_cntrs(gpu);
839 /* add bo's to gpu's ring, and kick gpu: */
840 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
842 struct drm_device *dev = gpu->dev;
843 struct msm_drm_private *priv = dev->dev_private;
844 struct msm_ringbuffer *ring = submit->ring;
847 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
849 pm_runtime_get_sync(&gpu->pdev->dev);
851 msm_gpu_hw_init(gpu);
853 submit->seqno = ++ring->seqno;
855 msm_rd_dump_submit(priv->rd, submit, NULL);
857 update_sw_cntrs(gpu);
859 for (i = 0; i < submit->nr_bos; i++) {
860 struct msm_gem_object *msm_obj = submit->bos[i].obj;
861 struct drm_gem_object *drm_obj = &msm_obj->base;
864 /* submit takes a reference to the bo and iova until retired: */
865 drm_gem_object_get(&msm_obj->base);
866 msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
868 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
869 dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
870 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
871 dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
873 msm_gem_active_get(drm_obj, gpu);
877 * ring->submits holds a ref to the submit, to deal with the case
878 * that a submit completes before msm_ioctl_gem_submit() returns.
880 msm_gem_submit_get(submit);
882 spin_lock(&ring->submit_lock);
883 list_add_tail(&submit->node, &ring->submits);
884 spin_unlock(&ring->submit_lock);
886 gpu->funcs->submit(gpu, submit);
887 priv->lastctx = submit->queue->ctx;
889 hangcheck_timer_reset(gpu);
896 static irqreturn_t irq_handler(int irq, void *data)
898 struct msm_gpu *gpu = data;
899 return gpu->funcs->irq(gpu);
902 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
904 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
911 gpu->nr_clocks = ret;
913 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
914 gpu->nr_clocks, "core");
916 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
917 gpu->nr_clocks, "rbbmtimer");
922 /* Return a new address space for a msm_drm_private instance */
923 struct msm_gem_address_space *
924 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
926 struct msm_gem_address_space *aspace = NULL;
931 * If the target doesn't support private address spaces then return
934 if (gpu->funcs->create_private_address_space) {
935 aspace = gpu->funcs->create_private_address_space(gpu);
937 aspace->pid = get_pid(task_pid(task));
940 if (IS_ERR_OR_NULL(aspace))
941 aspace = msm_gem_address_space_get(gpu->aspace);
946 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
947 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
948 const char *name, struct msm_gpu_config *config)
950 int i, ret, nr_rings = config->nr_rings;
952 uint64_t memptrs_iova;
954 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
955 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
961 gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
962 if (IS_ERR(gpu->worker)) {
963 ret = PTR_ERR(gpu->worker);
968 sched_set_fifo_low(gpu->worker->task);
970 INIT_LIST_HEAD(&gpu->active_list);
971 kthread_init_work(&gpu->retire_work, retire_worker);
972 kthread_init_work(&gpu->recover_work, recover_worker);
973 kthread_init_work(&gpu->fault_work, fault_worker);
975 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
977 spin_lock_init(&gpu->perf_lock);
981 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
982 if (IS_ERR(gpu->mmio)) {
983 ret = PTR_ERR(gpu->mmio);
988 gpu->irq = platform_get_irq(pdev, 0);
991 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
995 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
996 IRQF_TRIGGER_HIGH, gpu->name, gpu);
998 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
1002 ret = get_clocks(pdev, gpu);
1006 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
1007 DBG("ebi1_clk: %p", gpu->ebi1_clk);
1008 if (IS_ERR(gpu->ebi1_clk))
1009 gpu->ebi1_clk = NULL;
1011 /* Acquire regulators: */
1012 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
1013 DBG("gpu_reg: %p", gpu->gpu_reg);
1014 if (IS_ERR(gpu->gpu_reg))
1015 gpu->gpu_reg = NULL;
1017 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
1018 DBG("gpu_cx: %p", gpu->gpu_cx);
1019 if (IS_ERR(gpu->gpu_cx))
1023 platform_set_drvdata(pdev, &gpu->adreno_smmu);
1025 msm_devfreq_init(gpu);
1028 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
1030 if (gpu->aspace == NULL)
1031 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
1032 else if (IS_ERR(gpu->aspace)) {
1033 ret = PTR_ERR(gpu->aspace);
1037 memptrs = msm_gem_kernel_new(drm,
1038 sizeof(struct msm_rbmemptrs) * nr_rings,
1039 check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
1042 if (IS_ERR(memptrs)) {
1043 ret = PTR_ERR(memptrs);
1044 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
1048 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
1050 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
1051 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
1052 ARRAY_SIZE(gpu->rb));
1053 nr_rings = ARRAY_SIZE(gpu->rb);
1056 /* Create ringbuffer(s): */
1057 for (i = 0; i < nr_rings; i++) {
1058 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
1060 if (IS_ERR(gpu->rb[i])) {
1061 ret = PTR_ERR(gpu->rb[i]);
1062 DRM_DEV_ERROR(drm->dev,
1063 "could not create ringbuffer %d: %d\n", i, ret);
1067 memptrs += sizeof(struct msm_rbmemptrs);
1068 memptrs_iova += sizeof(struct msm_rbmemptrs);
1071 gpu->nr_rings = nr_rings;
1076 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1077 msm_ringbuffer_destroy(gpu->rb[i]);
1081 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
1083 platform_set_drvdata(pdev, NULL);
1087 void msm_gpu_cleanup(struct msm_gpu *gpu)
1091 DBG("%s", gpu->name);
1093 WARN_ON(!list_empty(&gpu->active_list));
1095 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1096 msm_ringbuffer_destroy(gpu->rb[i]);
1100 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
1102 if (!IS_ERR_OR_NULL(gpu->aspace)) {
1103 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1104 msm_gem_address_space_put(gpu->aspace);
1108 kthread_destroy_worker(gpu->worker);
1111 devfreq_cooling_unregister(gpu->cooling);