]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
Merge tag 'hwlock-v4.18' of git://github.com/andersson/remoteproc
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_queue_mgr.c
1 /*
2  * Copyright 2017 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Andres Rodriguez
23  */
24
25 #include "amdgpu.h"
26 #include "amdgpu_ring.h"
27
28 static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper,
29                                     int hw_ip)
30 {
31         if (!mapper)
32                 return -EINVAL;
33
34         if (hw_ip > AMDGPU_MAX_IP_NUM)
35                 return -EINVAL;
36
37         mapper->hw_ip = hw_ip;
38         mutex_init(&mapper->lock);
39
40         memset(mapper->queue_map, 0, sizeof(mapper->queue_map));
41
42         return 0;
43 }
44
45 static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper,
46                                           int ring)
47 {
48         return mapper->queue_map[ring];
49 }
50
51 static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
52                              int ring, struct amdgpu_ring *pring)
53 {
54         if (WARN_ON(mapper->queue_map[ring])) {
55                 DRM_ERROR("Un-expected ring re-map\n");
56                 return -EINVAL;
57         }
58
59         mapper->queue_map[ring] = pring;
60
61         return 0;
62 }
63
64 static int amdgpu_identity_map(struct amdgpu_device *adev,
65                                struct amdgpu_queue_mapper *mapper,
66                                u32 ring,
67                                struct amdgpu_ring **out_ring)
68 {
69         u32 instance;
70
71         switch (mapper->hw_ip) {
72         case AMDGPU_HW_IP_GFX:
73                 *out_ring = &adev->gfx.gfx_ring[ring];
74                 break;
75         case AMDGPU_HW_IP_COMPUTE:
76                 *out_ring = &adev->gfx.compute_ring[ring];
77                 break;
78         case AMDGPU_HW_IP_DMA:
79                 *out_ring = &adev->sdma.instance[ring].ring;
80                 break;
81         case AMDGPU_HW_IP_UVD:
82                 instance = ring;
83                 *out_ring = &adev->uvd.inst[instance].ring;
84                 break;
85         case AMDGPU_HW_IP_VCE:
86                 *out_ring = &adev->vce.ring[ring];
87                 break;
88         case AMDGPU_HW_IP_UVD_ENC:
89                 instance = ring / adev->uvd.num_enc_rings;
90                 *out_ring =
91                 &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
92                 break;
93         case AMDGPU_HW_IP_VCN_DEC:
94                 *out_ring = &adev->vcn.ring_dec;
95                 break;
96         case AMDGPU_HW_IP_VCN_ENC:
97                 *out_ring = &adev->vcn.ring_enc[ring];
98                 break;
99         default:
100                 *out_ring = NULL;
101                 DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
102                 return -EINVAL;
103         }
104
105         return amdgpu_update_cached_map(mapper, ring, *out_ring);
106 }
107
108 static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
109 {
110         switch (hw_ip) {
111         case AMDGPU_HW_IP_GFX:
112                 return AMDGPU_RING_TYPE_GFX;
113         case AMDGPU_HW_IP_COMPUTE:
114                 return AMDGPU_RING_TYPE_COMPUTE;
115         case AMDGPU_HW_IP_DMA:
116                 return AMDGPU_RING_TYPE_SDMA;
117         case AMDGPU_HW_IP_UVD:
118                 return AMDGPU_RING_TYPE_UVD;
119         case AMDGPU_HW_IP_VCE:
120                 return AMDGPU_RING_TYPE_VCE;
121         default:
122                 DRM_ERROR("Invalid HW IP specified %d\n", hw_ip);
123                 return -1;
124         }
125 }
126
127 static int amdgpu_lru_map(struct amdgpu_device *adev,
128                           struct amdgpu_queue_mapper *mapper,
129                           u32 user_ring, bool lru_pipe_order,
130                           struct amdgpu_ring **out_ring)
131 {
132         int r, i, j;
133         int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
134         int ring_blacklist[AMDGPU_MAX_RINGS];
135         struct amdgpu_ring *ring;
136
137         /* 0 is a valid ring index, so initialize to -1 */
138         memset(ring_blacklist, 0xff, sizeof(ring_blacklist));
139
140         for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) {
141                 ring = mapper->queue_map[i];
142                 if (ring)
143                         ring_blacklist[j++] = ring->idx;
144         }
145
146         r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
147                                 j, lru_pipe_order, out_ring);
148         if (r)
149                 return r;
150
151         return amdgpu_update_cached_map(mapper, user_ring, *out_ring);
152 }
153
154 /**
155  * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
156  *
157  * @adev: amdgpu_device pointer
158  * @mgr: amdgpu_queue_mgr structure holding queue information
159  *
160  * Initialize the the selected @mgr (all asics).
161  *
162  * Returns 0 on success, error on failure.
163  */
164 int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
165                           struct amdgpu_queue_mgr *mgr)
166 {
167         int i, r;
168
169         if (!adev || !mgr)
170                 return -EINVAL;
171
172         memset(mgr, 0, sizeof(*mgr));
173
174         for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) {
175                 r = amdgpu_queue_mapper_init(&mgr->mapper[i], i);
176                 if (r)
177                         return r;
178         }
179
180         return 0;
181 }
182
183 /**
184  * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
185  *
186  * @adev: amdgpu_device pointer
187  * @mgr: amdgpu_queue_mgr structure holding queue information
188  *
189  * De-initialize the the selected @mgr (all asics).
190  *
191  * Returns 0 on success, error on failure.
192  */
193 int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
194                           struct amdgpu_queue_mgr *mgr)
195 {
196         return 0;
197 }
198
199 /**
200  * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
201  *
202  * @adev: amdgpu_device pointer
203  * @mgr: amdgpu_queue_mgr structure holding queue information
204  * @hw_ip: HW IP enum
205  * @instance: HW instance
206  * @ring: user ring id
207  * @our_ring: pointer to mapped amdgpu_ring
208  *
209  * Map a userspace ring id to an appropriate kernel ring. Different
210  * policies are configurable at a HW IP level.
211  *
212  * Returns 0 on success, error on failure.
213  */
214 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
215                          struct amdgpu_queue_mgr *mgr,
216                          u32 hw_ip, u32 instance, u32 ring,
217                          struct amdgpu_ring **out_ring)
218 {
219         int r, ip_num_rings;
220         struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
221
222         if (!adev || !mgr || !out_ring)
223                 return -EINVAL;
224
225         if (hw_ip >= AMDGPU_MAX_IP_NUM)
226                 return -EINVAL;
227
228         if (ring >= AMDGPU_MAX_RINGS)
229                 return -EINVAL;
230
231         /* Right now all IPs have only one instance - multiple rings. */
232         if (instance != 0) {
233                 DRM_DEBUG("invalid ip instance: %d\n", instance);
234                 return -EINVAL;
235         }
236
237         switch (hw_ip) {
238         case AMDGPU_HW_IP_GFX:
239                 ip_num_rings = adev->gfx.num_gfx_rings;
240                 break;
241         case AMDGPU_HW_IP_COMPUTE:
242                 ip_num_rings = adev->gfx.num_compute_rings;
243                 break;
244         case AMDGPU_HW_IP_DMA:
245                 ip_num_rings = adev->sdma.num_instances;
246                 break;
247         case AMDGPU_HW_IP_UVD:
248                 ip_num_rings = adev->uvd.num_uvd_inst;
249                 break;
250         case AMDGPU_HW_IP_VCE:
251                 ip_num_rings = adev->vce.num_rings;
252                 break;
253         case AMDGPU_HW_IP_UVD_ENC:
254                 ip_num_rings =
255                         adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
256                 break;
257         case AMDGPU_HW_IP_VCN_DEC:
258                 ip_num_rings = 1;
259                 break;
260         case AMDGPU_HW_IP_VCN_ENC:
261                 ip_num_rings = adev->vcn.num_enc_rings;
262                 break;
263         default:
264                 DRM_DEBUG("unknown ip type: %d\n", hw_ip);
265                 return -EINVAL;
266         }
267
268         if (ring >= ip_num_rings) {
269                 DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
270                           ring, ip_num_rings, hw_ip);
271                 return -EINVAL;
272         }
273
274         mutex_lock(&mapper->lock);
275
276         *out_ring = amdgpu_get_cached_map(mapper, ring);
277         if (*out_ring) {
278                 /* cache hit */
279                 r = 0;
280                 goto out_unlock;
281         }
282
283         switch (mapper->hw_ip) {
284         case AMDGPU_HW_IP_GFX:
285         case AMDGPU_HW_IP_UVD:
286         case AMDGPU_HW_IP_VCE:
287         case AMDGPU_HW_IP_UVD_ENC:
288         case AMDGPU_HW_IP_VCN_DEC:
289         case AMDGPU_HW_IP_VCN_ENC:
290                 r = amdgpu_identity_map(adev, mapper, ring, out_ring);
291                 break;
292         case AMDGPU_HW_IP_DMA:
293                 r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
294                 break;
295         case AMDGPU_HW_IP_COMPUTE:
296                 r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
297                 break;
298         default:
299                 *out_ring = NULL;
300                 r = -EINVAL;
301                 DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip);
302         }
303
304 out_unlock:
305         mutex_unlock(&mapper->lock);
306         return r;
307 }
This page took 0.070077 seconds and 4 git commands to generate.