]> Git Repo - linux.git/blob - drivers/gpu/drm/msm/msm_gpu.c
Merge existing fixes from regulator/for-5.15
[linux.git] / drivers / gpu / drm / msm / msm_gpu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <[email protected]>
5  */
6
7 #include "msm_gpu.h"
8 #include "msm_gem.h"
9 #include "msm_mmu.h"
10 #include "msm_fence.h"
11 #include "msm_gpu_trace.h"
12 #include "adreno/adreno_gpu.h"
13
14 #include <generated/utsrelease.h>
15 #include <linux/string_helpers.h>
16 #include <linux/devcoredump.h>
17 #include <linux/sched/task.h>
18
19 /*
20  * Power Management:
21  */
22
23 static int enable_pwrrail(struct msm_gpu *gpu)
24 {
25         struct drm_device *dev = gpu->dev;
26         int ret = 0;
27
28         if (gpu->gpu_reg) {
29                 ret = regulator_enable(gpu->gpu_reg);
30                 if (ret) {
31                         DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
32                         return ret;
33                 }
34         }
35
36         if (gpu->gpu_cx) {
37                 ret = regulator_enable(gpu->gpu_cx);
38                 if (ret) {
39                         DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
40                         return ret;
41                 }
42         }
43
44         return 0;
45 }
46
47 static int disable_pwrrail(struct msm_gpu *gpu)
48 {
49         if (gpu->gpu_cx)
50                 regulator_disable(gpu->gpu_cx);
51         if (gpu->gpu_reg)
52                 regulator_disable(gpu->gpu_reg);
53         return 0;
54 }
55
56 static int enable_clk(struct msm_gpu *gpu)
57 {
58         if (gpu->core_clk && gpu->fast_rate)
59                 clk_set_rate(gpu->core_clk, gpu->fast_rate);
60
61         /* Set the RBBM timer rate to 19.2Mhz */
62         if (gpu->rbbmtimer_clk)
63                 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
64
65         return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
66 }
67
68 static int disable_clk(struct msm_gpu *gpu)
69 {
70         clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
71
72         /*
73          * Set the clock to a deliberately low rate. On older targets the clock
74          * speed had to be non zero to avoid problems. On newer targets this
75          * will be rounded down to zero anyway so it all works out.
76          */
77         if (gpu->core_clk)
78                 clk_set_rate(gpu->core_clk, 27000000);
79
80         if (gpu->rbbmtimer_clk)
81                 clk_set_rate(gpu->rbbmtimer_clk, 0);
82
83         return 0;
84 }
85
86 static int enable_axi(struct msm_gpu *gpu)
87 {
88         return clk_prepare_enable(gpu->ebi1_clk);
89 }
90
91 static int disable_axi(struct msm_gpu *gpu)
92 {
93         clk_disable_unprepare(gpu->ebi1_clk);
94         return 0;
95 }
96
97 int msm_gpu_pm_resume(struct msm_gpu *gpu)
98 {
99         int ret;
100
101         DBG("%s", gpu->name);
102         trace_msm_gpu_resume(0);
103
104         ret = enable_pwrrail(gpu);
105         if (ret)
106                 return ret;
107
108         ret = enable_clk(gpu);
109         if (ret)
110                 return ret;
111
112         ret = enable_axi(gpu);
113         if (ret)
114                 return ret;
115
116         msm_devfreq_resume(gpu);
117
118         gpu->needs_hw_init = true;
119
120         return 0;
121 }
122
123 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
124 {
125         int ret;
126
127         DBG("%s", gpu->name);
128         trace_msm_gpu_suspend(0);
129
130         msm_devfreq_suspend(gpu);
131
132         ret = disable_axi(gpu);
133         if (ret)
134                 return ret;
135
136         ret = disable_clk(gpu);
137         if (ret)
138                 return ret;
139
140         ret = disable_pwrrail(gpu);
141         if (ret)
142                 return ret;
143
144         gpu->suspend_count++;
145
146         return 0;
147 }
148
149 int msm_gpu_hw_init(struct msm_gpu *gpu)
150 {
151         int ret;
152
153         WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
154
155         if (!gpu->needs_hw_init)
156                 return 0;
157
158         disable_irq(gpu->irq);
159         ret = gpu->funcs->hw_init(gpu);
160         if (!ret)
161                 gpu->needs_hw_init = false;
162         enable_irq(gpu->irq);
163
164         return ret;
165 }
166
167 static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
168                 uint32_t fence)
169 {
170         struct msm_gem_submit *submit;
171         unsigned long flags;
172
173         spin_lock_irqsave(&ring->submit_lock, flags);
174         list_for_each_entry(submit, &ring->submits, node) {
175                 if (submit->seqno > fence)
176                         break;
177
178                 msm_update_fence(submit->ring->fctx,
179                         submit->hw_fence->seqno);
180                 dma_fence_signal(submit->hw_fence);
181         }
182         spin_unlock_irqrestore(&ring->submit_lock, flags);
183 }
184
185 #ifdef CONFIG_DEV_COREDUMP
186 static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
187                 size_t count, void *data, size_t datalen)
188 {
189         struct msm_gpu *gpu = data;
190         struct drm_print_iterator iter;
191         struct drm_printer p;
192         struct msm_gpu_state *state;
193
194         state = msm_gpu_crashstate_get(gpu);
195         if (!state)
196                 return 0;
197
198         iter.data = buffer;
199         iter.offset = 0;
200         iter.start = offset;
201         iter.remain = count;
202
203         p = drm_coredump_printer(&iter);
204
205         drm_printf(&p, "---\n");
206         drm_printf(&p, "kernel: " UTS_RELEASE "\n");
207         drm_printf(&p, "module: " KBUILD_MODNAME "\n");
208         drm_printf(&p, "time: %lld.%09ld\n",
209                 state->time.tv_sec, state->time.tv_nsec);
210         if (state->comm)
211                 drm_printf(&p, "comm: %s\n", state->comm);
212         if (state->cmd)
213                 drm_printf(&p, "cmdline: %s\n", state->cmd);
214
215         gpu->funcs->show(gpu, state, &p);
216
217         msm_gpu_crashstate_put(gpu);
218
219         return count - iter.remain;
220 }
221
222 static void msm_gpu_devcoredump_free(void *data)
223 {
224         struct msm_gpu *gpu = data;
225
226         msm_gpu_crashstate_put(gpu);
227 }
228
229 static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
230                 struct msm_gem_object *obj, u64 iova, u32 flags)
231 {
232         struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
233
234         /* Don't record write only objects */
235         state_bo->size = obj->base.size;
236         state_bo->iova = iova;
237
238         /* Only store data for non imported buffer objects marked for read */
239         if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
240                 void *ptr;
241
242                 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
243                 if (!state_bo->data)
244                         goto out;
245
246                 msm_gem_lock(&obj->base);
247                 ptr = msm_gem_get_vaddr_active(&obj->base);
248                 msm_gem_unlock(&obj->base);
249                 if (IS_ERR(ptr)) {
250                         kvfree(state_bo->data);
251                         state_bo->data = NULL;
252                         goto out;
253                 }
254
255                 memcpy(state_bo->data, ptr, obj->base.size);
256                 msm_gem_put_vaddr(&obj->base);
257         }
258 out:
259         state->nr_bos++;
260 }
261
262 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
263                 struct msm_gem_submit *submit, char *comm, char *cmd)
264 {
265         struct msm_gpu_state *state;
266
267         /* Check if the target supports capturing crash state */
268         if (!gpu->funcs->gpu_state_get)
269                 return;
270
271         /* Only save one crash state at a time */
272         if (gpu->crashstate)
273                 return;
274
275         state = gpu->funcs->gpu_state_get(gpu);
276         if (IS_ERR_OR_NULL(state))
277                 return;
278
279         /* Fill in the additional crash state information */
280         state->comm = kstrdup(comm, GFP_KERNEL);
281         state->cmd = kstrdup(cmd, GFP_KERNEL);
282         state->fault_info = gpu->fault_info;
283
284         if (submit) {
285                 int i, nr = 0;
286
287                 /* count # of buffers to dump: */
288                 for (i = 0; i < submit->nr_bos; i++)
289                         if (should_dump(submit, i))
290                                 nr++;
291                 /* always dump cmd bo's, but don't double count them: */
292                 for (i = 0; i < submit->nr_cmds; i++)
293                         if (!should_dump(submit, submit->cmd[i].idx))
294                                 nr++;
295
296                 state->bos = kcalloc(nr,
297                         sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
298
299                 for (i = 0; i < submit->nr_bos; i++) {
300                         if (should_dump(submit, i)) {
301                                 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
302                                         submit->bos[i].iova, submit->bos[i].flags);
303                         }
304                 }
305
306                 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
307                         int idx = submit->cmd[i].idx;
308
309                         if (!should_dump(submit, submit->cmd[i].idx)) {
310                                 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
311                                         submit->bos[idx].iova, submit->bos[idx].flags);
312                         }
313                 }
314         }
315
316         /* Set the active crash state to be dumped on failure */
317         gpu->crashstate = state;
318
319         /* FIXME: Release the crashstate if this errors out? */
320         dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
321                 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
322 }
323 #else
324 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
325                 struct msm_gem_submit *submit, char *comm, char *cmd)
326 {
327 }
328 #endif
329
330 /*
331  * Hangcheck detection for locked gpu:
332  */
333
334 static struct msm_gem_submit *
335 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
336 {
337         struct msm_gem_submit *submit;
338         unsigned long flags;
339
340         spin_lock_irqsave(&ring->submit_lock, flags);
341         list_for_each_entry(submit, &ring->submits, node) {
342                 if (submit->seqno == fence) {
343                         spin_unlock_irqrestore(&ring->submit_lock, flags);
344                         return submit;
345                 }
346         }
347         spin_unlock_irqrestore(&ring->submit_lock, flags);
348
349         return NULL;
350 }
351
352 static void retire_submits(struct msm_gpu *gpu);
353
354 static void recover_worker(struct kthread_work *work)
355 {
356         struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
357         struct drm_device *dev = gpu->dev;
358         struct msm_drm_private *priv = dev->dev_private;
359         struct msm_gem_submit *submit;
360         struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
361         char *comm = NULL, *cmd = NULL;
362         int i;
363
364         mutex_lock(&dev->struct_mutex);
365
366         DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
367
368         submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
369         if (submit) {
370                 struct task_struct *task;
371
372                 /* Increment the fault counts */
373                 gpu->global_faults++;
374                 submit->queue->faults++;
375
376                 task = get_pid_task(submit->pid, PIDTYPE_PID);
377                 if (task) {
378                         comm = kstrdup(task->comm, GFP_KERNEL);
379                         cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
380                         put_task_struct(task);
381                 }
382
383                 if (comm && cmd) {
384                         DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
385                                 gpu->name, comm, cmd);
386
387                         msm_rd_dump_submit(priv->hangrd, submit,
388                                 "offending task: %s (%s)", comm, cmd);
389                 } else {
390                         msm_rd_dump_submit(priv->hangrd, submit, NULL);
391                 }
392         }
393
394         /* Record the crash state */
395         pm_runtime_get_sync(&gpu->pdev->dev);
396         msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
397         pm_runtime_put_sync(&gpu->pdev->dev);
398
399         kfree(cmd);
400         kfree(comm);
401
402         /*
403          * Update all the rings with the latest and greatest fence.. this
404          * needs to happen after msm_rd_dump_submit() to ensure that the
405          * bo's referenced by the offending submit are still around.
406          */
407         for (i = 0; i < gpu->nr_rings; i++) {
408                 struct msm_ringbuffer *ring = gpu->rb[i];
409
410                 uint32_t fence = ring->memptrs->fence;
411
412                 /*
413                  * For the current (faulting?) ring/submit advance the fence by
414                  * one more to clear the faulting submit
415                  */
416                 if (ring == cur_ring)
417                         fence++;
418
419                 update_fences(gpu, ring, fence);
420         }
421
422         if (msm_gpu_active(gpu)) {
423                 /* retire completed submits, plus the one that hung: */
424                 retire_submits(gpu);
425
426                 pm_runtime_get_sync(&gpu->pdev->dev);
427                 gpu->funcs->recover(gpu);
428                 pm_runtime_put_sync(&gpu->pdev->dev);
429
430                 /*
431                  * Replay all remaining submits starting with highest priority
432                  * ring
433                  */
434                 for (i = 0; i < gpu->nr_rings; i++) {
435                         struct msm_ringbuffer *ring = gpu->rb[i];
436                         unsigned long flags;
437
438                         spin_lock_irqsave(&ring->submit_lock, flags);
439                         list_for_each_entry(submit, &ring->submits, node)
440                                 gpu->funcs->submit(gpu, submit);
441                         spin_unlock_irqrestore(&ring->submit_lock, flags);
442                 }
443         }
444
445         mutex_unlock(&dev->struct_mutex);
446
447         msm_gpu_retire(gpu);
448 }
449
450 static void fault_worker(struct kthread_work *work)
451 {
452         struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
453         struct drm_device *dev = gpu->dev;
454         struct msm_gem_submit *submit;
455         struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
456         char *comm = NULL, *cmd = NULL;
457
458         mutex_lock(&dev->struct_mutex);
459
460         submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
461         if (submit && submit->fault_dumped)
462                 goto resume_smmu;
463
464         if (submit) {
465                 struct task_struct *task;
466
467                 task = get_pid_task(submit->pid, PIDTYPE_PID);
468                 if (task) {
469                         comm = kstrdup(task->comm, GFP_KERNEL);
470                         cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
471                         put_task_struct(task);
472                 }
473
474                 /*
475                  * When we get GPU iova faults, we can get 1000s of them,
476                  * but we really only want to log the first one.
477                  */
478                 submit->fault_dumped = true;
479         }
480
481         /* Record the crash state */
482         pm_runtime_get_sync(&gpu->pdev->dev);
483         msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
484         pm_runtime_put_sync(&gpu->pdev->dev);
485
486         kfree(cmd);
487         kfree(comm);
488
489 resume_smmu:
490         memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
491         gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
492
493         mutex_unlock(&dev->struct_mutex);
494 }
495
496 static void hangcheck_timer_reset(struct msm_gpu *gpu)
497 {
498         struct msm_drm_private *priv = gpu->dev->dev_private;
499         mod_timer(&gpu->hangcheck_timer,
500                         round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
501 }
502
503 static void hangcheck_handler(struct timer_list *t)
504 {
505         struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
506         struct drm_device *dev = gpu->dev;
507         struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
508         uint32_t fence = ring->memptrs->fence;
509
510         if (fence != ring->hangcheck_fence) {
511                 /* some progress has been made.. ya! */
512                 ring->hangcheck_fence = fence;
513         } else if (fence < ring->seqno) {
514                 /* no progress and not done.. hung! */
515                 ring->hangcheck_fence = fence;
516                 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
517                                 gpu->name, ring->id);
518                 DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
519                                 gpu->name, fence);
520                 DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
521                                 gpu->name, ring->seqno);
522
523                 kthread_queue_work(gpu->worker, &gpu->recover_work);
524         }
525
526         /* if still more pending work, reset the hangcheck timer: */
527         if (ring->seqno > ring->hangcheck_fence)
528                 hangcheck_timer_reset(gpu);
529
530         /* workaround for missing irq: */
531         msm_gpu_retire(gpu);
532 }
533
534 /*
535  * Performance Counters:
536  */
537
538 /* called under perf_lock */
539 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
540 {
541         uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
542         int i, n = min(ncntrs, gpu->num_perfcntrs);
543
544         /* read current values: */
545         for (i = 0; i < gpu->num_perfcntrs; i++)
546                 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
547
548         /* update cntrs: */
549         for (i = 0; i < n; i++)
550                 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
551
552         /* save current values: */
553         for (i = 0; i < gpu->num_perfcntrs; i++)
554                 gpu->last_cntrs[i] = current_cntrs[i];
555
556         return n;
557 }
558
559 static void update_sw_cntrs(struct msm_gpu *gpu)
560 {
561         ktime_t time;
562         uint32_t elapsed;
563         unsigned long flags;
564
565         spin_lock_irqsave(&gpu->perf_lock, flags);
566         if (!gpu->perfcntr_active)
567                 goto out;
568
569         time = ktime_get();
570         elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
571
572         gpu->totaltime += elapsed;
573         if (gpu->last_sample.active)
574                 gpu->activetime += elapsed;
575
576         gpu->last_sample.active = msm_gpu_active(gpu);
577         gpu->last_sample.time = time;
578
579 out:
580         spin_unlock_irqrestore(&gpu->perf_lock, flags);
581 }
582
583 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
584 {
585         unsigned long flags;
586
587         pm_runtime_get_sync(&gpu->pdev->dev);
588
589         spin_lock_irqsave(&gpu->perf_lock, flags);
590         /* we could dynamically enable/disable perfcntr registers too.. */
591         gpu->last_sample.active = msm_gpu_active(gpu);
592         gpu->last_sample.time = ktime_get();
593         gpu->activetime = gpu->totaltime = 0;
594         gpu->perfcntr_active = true;
595         update_hw_cntrs(gpu, 0, NULL);
596         spin_unlock_irqrestore(&gpu->perf_lock, flags);
597 }
598
599 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
600 {
601         gpu->perfcntr_active = false;
602         pm_runtime_put_sync(&gpu->pdev->dev);
603 }
604
605 /* returns -errno or # of cntrs sampled */
606 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
607                 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
608 {
609         unsigned long flags;
610         int ret;
611
612         spin_lock_irqsave(&gpu->perf_lock, flags);
613
614         if (!gpu->perfcntr_active) {
615                 ret = -EINVAL;
616                 goto out;
617         }
618
619         *activetime = gpu->activetime;
620         *totaltime = gpu->totaltime;
621
622         gpu->activetime = gpu->totaltime = 0;
623
624         ret = update_hw_cntrs(gpu, ncntrs, cntrs);
625
626 out:
627         spin_unlock_irqrestore(&gpu->perf_lock, flags);
628
629         return ret;
630 }
631
632 /*
633  * Cmdstream submission/retirement:
634  */
635
636 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
637                 struct msm_gem_submit *submit)
638 {
639         int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
640         volatile struct msm_gpu_submit_stats *stats;
641         u64 elapsed, clock = 0;
642         unsigned long flags;
643
644         stats = &ring->memptrs->stats[index];
645         /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
646         elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
647         do_div(elapsed, 192);
648
649         /* Calculate the clock frequency from the number of CP cycles */
650         if (elapsed) {
651                 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
652                 do_div(clock, elapsed);
653         }
654
655         trace_msm_gpu_submit_retired(submit, elapsed, clock,
656                 stats->alwayson_start, stats->alwayson_end);
657
658         msm_submit_retire(submit);
659
660         pm_runtime_mark_last_busy(&gpu->pdev->dev);
661         pm_runtime_put_autosuspend(&gpu->pdev->dev);
662
663         spin_lock_irqsave(&ring->submit_lock, flags);
664         list_del(&submit->node);
665         spin_unlock_irqrestore(&ring->submit_lock, flags);
666
667         /* Update devfreq on transition from active->idle: */
668         mutex_lock(&gpu->active_lock);
669         gpu->active_submits--;
670         WARN_ON(gpu->active_submits < 0);
671         if (!gpu->active_submits)
672                 msm_devfreq_idle(gpu);
673         mutex_unlock(&gpu->active_lock);
674
675         msm_gem_submit_put(submit);
676 }
677
678 static void retire_submits(struct msm_gpu *gpu)
679 {
680         int i;
681
682         /* Retire the commits starting with highest priority */
683         for (i = 0; i < gpu->nr_rings; i++) {
684                 struct msm_ringbuffer *ring = gpu->rb[i];
685
686                 while (true) {
687                         struct msm_gem_submit *submit = NULL;
688                         unsigned long flags;
689
690                         spin_lock_irqsave(&ring->submit_lock, flags);
691                         submit = list_first_entry_or_null(&ring->submits,
692                                         struct msm_gem_submit, node);
693                         spin_unlock_irqrestore(&ring->submit_lock, flags);
694
695                         /*
696                          * If no submit, we are done.  If submit->fence hasn't
697                          * been signalled, then later submits are not signalled
698                          * either, so we are also done.
699                          */
700                         if (submit && dma_fence_is_signaled(submit->hw_fence)) {
701                                 retire_submit(gpu, ring, submit);
702                         } else {
703                                 break;
704                         }
705                 }
706         }
707 }
708
709 static void retire_worker(struct kthread_work *work)
710 {
711         struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
712
713         retire_submits(gpu);
714 }
715
716 /* call from irq handler to schedule work to retire bo's */
717 void msm_gpu_retire(struct msm_gpu *gpu)
718 {
719         int i;
720
721         for (i = 0; i < gpu->nr_rings; i++)
722                 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
723
724         kthread_queue_work(gpu->worker, &gpu->retire_work);
725         update_sw_cntrs(gpu);
726 }
727
728 /* add bo's to gpu's ring, and kick gpu: */
729 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
730 {
731         struct drm_device *dev = gpu->dev;
732         struct msm_drm_private *priv = dev->dev_private;
733         struct msm_ringbuffer *ring = submit->ring;
734         unsigned long flags;
735
736         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
737
738         pm_runtime_get_sync(&gpu->pdev->dev);
739
740         msm_gpu_hw_init(gpu);
741
742         submit->seqno = ++ring->seqno;
743
744         msm_rd_dump_submit(priv->rd, submit, NULL);
745
746         update_sw_cntrs(gpu);
747
748         /*
749          * ring->submits holds a ref to the submit, to deal with the case
750          * that a submit completes before msm_ioctl_gem_submit() returns.
751          */
752         msm_gem_submit_get(submit);
753
754         spin_lock_irqsave(&ring->submit_lock, flags);
755         list_add_tail(&submit->node, &ring->submits);
756         spin_unlock_irqrestore(&ring->submit_lock, flags);
757
758         /* Update devfreq on transition from idle->active: */
759         mutex_lock(&gpu->active_lock);
760         if (!gpu->active_submits)
761                 msm_devfreq_active(gpu);
762         gpu->active_submits++;
763         mutex_unlock(&gpu->active_lock);
764
765         gpu->funcs->submit(gpu, submit);
766         priv->lastctx = submit->queue->ctx;
767
768         hangcheck_timer_reset(gpu);
769 }
770
771 /*
772  * Init/Cleanup:
773  */
774
775 static irqreturn_t irq_handler(int irq, void *data)
776 {
777         struct msm_gpu *gpu = data;
778         return gpu->funcs->irq(gpu);
779 }
780
781 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
782 {
783         int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
784
785         if (ret < 1) {
786                 gpu->nr_clocks = 0;
787                 return ret;
788         }
789
790         gpu->nr_clocks = ret;
791
792         gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
793                 gpu->nr_clocks, "core");
794
795         gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
796                 gpu->nr_clocks, "rbbmtimer");
797
798         return 0;
799 }
800
801 /* Return a new address space for a msm_drm_private instance */
802 struct msm_gem_address_space *
803 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
804 {
805         struct msm_gem_address_space *aspace = NULL;
806         if (!gpu)
807                 return NULL;
808
809         /*
810          * If the target doesn't support private address spaces then return
811          * the global one
812          */
813         if (gpu->funcs->create_private_address_space) {
814                 aspace = gpu->funcs->create_private_address_space(gpu);
815                 if (!IS_ERR(aspace))
816                         aspace->pid = get_pid(task_pid(task));
817         }
818
819         if (IS_ERR_OR_NULL(aspace))
820                 aspace = msm_gem_address_space_get(gpu->aspace);
821
822         return aspace;
823 }
824
825 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
826                 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
827                 const char *name, struct msm_gpu_config *config)
828 {
829         int i, ret, nr_rings = config->nr_rings;
830         void *memptrs;
831         uint64_t memptrs_iova;
832
833         if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
834                 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
835
836         gpu->dev = drm;
837         gpu->funcs = funcs;
838         gpu->name = name;
839
840         gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
841         if (IS_ERR(gpu->worker)) {
842                 ret = PTR_ERR(gpu->worker);
843                 gpu->worker = NULL;
844                 goto fail;
845         }
846
847         sched_set_fifo_low(gpu->worker->task);
848
849         INIT_LIST_HEAD(&gpu->active_list);
850         mutex_init(&gpu->active_lock);
851         kthread_init_work(&gpu->retire_work, retire_worker);
852         kthread_init_work(&gpu->recover_work, recover_worker);
853         kthread_init_work(&gpu->fault_work, fault_worker);
854
855         timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
856
857         spin_lock_init(&gpu->perf_lock);
858
859
860         /* Map registers: */
861         gpu->mmio = msm_ioremap(pdev, config->ioname, name);
862         if (IS_ERR(gpu->mmio)) {
863                 ret = PTR_ERR(gpu->mmio);
864                 goto fail;
865         }
866
867         /* Get Interrupt: */
868         gpu->irq = platform_get_irq(pdev, 0);
869         if (gpu->irq < 0) {
870                 ret = gpu->irq;
871                 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
872                 goto fail;
873         }
874
875         ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
876                         IRQF_TRIGGER_HIGH, gpu->name, gpu);
877         if (ret) {
878                 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
879                 goto fail;
880         }
881
882         ret = get_clocks(pdev, gpu);
883         if (ret)
884                 goto fail;
885
886         gpu->ebi1_clk = msm_clk_get(pdev, "bus");
887         DBG("ebi1_clk: %p", gpu->ebi1_clk);
888         if (IS_ERR(gpu->ebi1_clk))
889                 gpu->ebi1_clk = NULL;
890
891         /* Acquire regulators: */
892         gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
893         DBG("gpu_reg: %p", gpu->gpu_reg);
894         if (IS_ERR(gpu->gpu_reg))
895                 gpu->gpu_reg = NULL;
896
897         gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
898         DBG("gpu_cx: %p", gpu->gpu_cx);
899         if (IS_ERR(gpu->gpu_cx))
900                 gpu->gpu_cx = NULL;
901
902         gpu->pdev = pdev;
903         platform_set_drvdata(pdev, &gpu->adreno_smmu);
904
905         msm_devfreq_init(gpu);
906
907
908         gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
909
910         if (gpu->aspace == NULL)
911                 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
912         else if (IS_ERR(gpu->aspace)) {
913                 ret = PTR_ERR(gpu->aspace);
914                 goto fail;
915         }
916
917         memptrs = msm_gem_kernel_new(drm,
918                 sizeof(struct msm_rbmemptrs) * nr_rings,
919                 check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
920                 &memptrs_iova);
921
922         if (IS_ERR(memptrs)) {
923                 ret = PTR_ERR(memptrs);
924                 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
925                 goto fail;
926         }
927
928         msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
929
930         if (nr_rings > ARRAY_SIZE(gpu->rb)) {
931                 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
932                         ARRAY_SIZE(gpu->rb));
933                 nr_rings = ARRAY_SIZE(gpu->rb);
934         }
935
936         /* Create ringbuffer(s): */
937         for (i = 0; i < nr_rings; i++) {
938                 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
939
940                 if (IS_ERR(gpu->rb[i])) {
941                         ret = PTR_ERR(gpu->rb[i]);
942                         DRM_DEV_ERROR(drm->dev,
943                                 "could not create ringbuffer %d: %d\n", i, ret);
944                         goto fail;
945                 }
946
947                 memptrs += sizeof(struct msm_rbmemptrs);
948                 memptrs_iova += sizeof(struct msm_rbmemptrs);
949         }
950
951         gpu->nr_rings = nr_rings;
952
953         return 0;
954
955 fail:
956         for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
957                 msm_ringbuffer_destroy(gpu->rb[i]);
958                 gpu->rb[i] = NULL;
959         }
960
961         msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
962
963         platform_set_drvdata(pdev, NULL);
964         return ret;
965 }
966
967 void msm_gpu_cleanup(struct msm_gpu *gpu)
968 {
969         int i;
970
971         DBG("%s", gpu->name);
972
973         WARN_ON(!list_empty(&gpu->active_list));
974
975         for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
976                 msm_ringbuffer_destroy(gpu->rb[i]);
977                 gpu->rb[i] = NULL;
978         }
979
980         msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
981
982         if (!IS_ERR_OR_NULL(gpu->aspace)) {
983                 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
984                 msm_gem_address_space_put(gpu->aspace);
985         }
986
987         if (gpu->worker) {
988                 kthread_destroy_worker(gpu->worker);
989         }
990
991         msm_devfreq_cleanup(gpu);
992 }
This page took 0.093563 seconds and 4 git commands to generate.