]> Git Repo - linux.git/blame - drivers/gpu/drm/etnaviv/etnaviv_sched.c
drm/sched: Convert the GPU scheduler to variable number of run-queues
[linux.git] / drivers / gpu / drm / etnaviv / etnaviv_sched.c
CommitLineData
f6ffbd4f 1// SPDX-License-Identifier: GPL-2.0
e93b6dee
LS
2/*
3 * Copyright (C) 2017 Etnaviv Project
e93b6dee
LS
4 */
5
2e737e52 6#include <linux/moduleparam.h>
e93b6dee
LS
7
8#include "etnaviv_drv.h"
6d7a20c0 9#include "etnaviv_dump.h"
e93b6dee
LS
10#include "etnaviv_gem.h"
11#include "etnaviv_gpu.h"
6d7a20c0 12#include "etnaviv_sched.h"
2c83a726 13#include "state.xml.h"
e93b6dee
LS
14
15static int etnaviv_job_hang_limit = 0;
16module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
4ed75c3e 17static int etnaviv_hw_jobs_limit = 4;
e93b6dee
LS
18module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19
fc0775da 20static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
e93b6dee
LS
21{
22 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
6d7a20c0 23 struct dma_fence *fence = NULL;
e93b6dee 24
6d7a20c0
LS
25 if (likely(!sched_job->s_fence->finished.error))
26 fence = etnaviv_gpu_submit(submit);
27 else
28 dev_dbg(submit->gpu->dev, "skipping bad job\n");
e93b6dee
LS
29
30 return fence;
31}
32
a6a1f036
LT
33static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
34 *sched_job)
e93b6dee 35{
6d7a20c0
LS
36 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
37 struct etnaviv_gpu *gpu = submit->gpu;
2c83a726
LS
38 u32 dma_addr;
39 int change;
40
50248a3e
LS
41 /* block scheduler */
42 drm_sched_stop(&gpu->sched, sched_job);
43
2c83a726
LS
44 /*
45 * If the GPU managed to complete this jobs fence, the timout is
46 * spurious. Bail out.
47 */
6fce3a40 48 if (dma_fence_is_signaled(submit->out_fence))
50248a3e 49 goto out_no_timeout;
2c83a726
LS
50
51 /*
52 * If the GPU is still making forward progress on the front-end (which
53 * should never loop) we shift out the timeout to give it a chance to
54 * finish the job.
55 */
56 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
57 change = dma_addr - gpu->hangcheck_dma_addr;
9ec2afde
LS
58 if (gpu->state == ETNA_GPU_STATE_RUNNING &&
59 (gpu->completed_fence != gpu->hangcheck_fence ||
60 change < 0 || change > 16)) {
2c83a726 61 gpu->hangcheck_dma_addr = dma_addr;
cdd15695 62 gpu->hangcheck_fence = gpu->completed_fence;
50248a3e 63 goto out_no_timeout;
2c83a726 64 }
6d7a20c0 65
222b5f04
AG
66 if(sched_job)
67 drm_sched_increase_karma(sched_job);
6d7a20c0
LS
68
69 /* get the GPU back into the init state */
9a1fdae5 70 etnaviv_core_dump(submit);
f51d753f 71 etnaviv_gpu_recover_hang(submit);
6d7a20c0 72
222b5f04
AG
73 drm_sched_resubmit_jobs(&gpu->sched);
74
a6a1f036
LT
75 drm_sched_start(&gpu->sched, true);
76 return DRM_GPU_SCHED_STAT_NOMINAL;
77
50248a3e 78out_no_timeout:
6d7a20c0 79 /* restart scheduler after GPU is usable again */
222b5f04 80 drm_sched_start(&gpu->sched, true);
a6a1f036 81 return DRM_GPU_SCHED_STAT_NOMINAL;
e93b6dee
LS
82}
83
84static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
85{
86 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
87
26efecf9
SM
88 drm_sched_job_cleanup(sched_job);
89
e93b6dee
LS
90 etnaviv_submit_put(submit);
91}
92
93static const struct drm_sched_backend_ops etnaviv_sched_ops = {
e93b6dee
LS
94 .run_job = etnaviv_sched_run_job,
95 .timedout_job = etnaviv_sched_timedout_job,
96 .free_job = etnaviv_sched_free_job,
97};
98
b827c84f 99int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
e93b6dee 100{
2cd5bd98 101 struct etnaviv_gpu *gpu = submit->gpu;
764be123 102 int ret;
a0780bb1
LS
103
104 /*
2cd5bd98 105 * Hold the sched lock across the whole operation to avoid jobs being
a0780bb1 106 * pushed out of order with regard to their sched fence seqnos as
b827c84f 107 * allocated in drm_sched_job_arm.
a0780bb1 108 */
2cd5bd98 109 mutex_lock(&gpu->sched_lock);
e93b6dee 110
dbe48d03
SV
111 drm_sched_job_arm(&submit->sched_job);
112
e93b6dee 113 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
764be123
LS
114 ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
115 submit->out_fence, xa_limit_32b,
116 &gpu->next_user_fence, GFP_KERNEL);
117 if (ret < 0) {
26efecf9 118 drm_sched_job_cleanup(&submit->sched_job);
a0780bb1
LS
119 goto out_unlock;
120 }
e93b6dee
LS
121
122 /* the scheduler holds on to the job now */
123 kref_get(&submit->refcount);
124
0e10e9a1 125 drm_sched_entity_push_job(&submit->sched_job);
e93b6dee 126
a0780bb1 127out_unlock:
2cd5bd98 128 mutex_unlock(&gpu->sched_lock);
a0780bb1
LS
129
130 return ret;
e93b6dee
LS
131}
132
133int etnaviv_sched_init(struct etnaviv_gpu *gpu)
134{
135 int ret;
136
137 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
56e44960 138 DRM_SCHED_PRIORITY_COUNT,
e93b6dee 139 etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
78efe21b 140 msecs_to_jiffies(500), NULL, NULL,
8ab62eda 141 dev_name(gpu->dev), gpu->dev);
e93b6dee
LS
142 if (ret)
143 return ret;
144
145 return 0;
146}
147
148void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
149{
150 drm_sched_fini(&gpu->sched);
151}
This page took 0.488029 seconds and 4 git commands to generate.