]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
Merge tag 'pm-6.5-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_xcp.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "amdgpu_drv.h"
26
27 #include <drm/drm_drv.h>
28 #include "../amdxcp/amdgpu_xcp_drv.h"
29
30 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
31                             struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
32 {
33         int (*run_func)(void *handle, uint32_t inst_mask);
34         int ret = 0;
35
36         if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
37                 return 0;
38
39         run_func = NULL;
40
41         switch (xcp_state) {
42         case AMDGPU_XCP_PREPARE_SUSPEND:
43                 run_func = xcp_ip->ip_funcs->prepare_suspend;
44                 break;
45         case AMDGPU_XCP_SUSPEND:
46                 run_func = xcp_ip->ip_funcs->suspend;
47                 break;
48         case AMDGPU_XCP_PREPARE_RESUME:
49                 run_func = xcp_ip->ip_funcs->prepare_resume;
50                 break;
51         case AMDGPU_XCP_RESUME:
52                 run_func = xcp_ip->ip_funcs->resume;
53                 break;
54         }
55
56         if (run_func)
57                 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
58
59         return ret;
60 }
61
62 static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
63                                      int state)
64 {
65         struct amdgpu_xcp_ip *xcp_ip;
66         struct amdgpu_xcp *xcp;
67         int i, ret;
68
69         if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
70                 return -EINVAL;
71
72         xcp = &xcp_mgr->xcp[xcp_id];
73         for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
74                 xcp_ip = &xcp->ip[i];
75                 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
76                 if (ret)
77                         break;
78         }
79
80         return ret;
81 }
82
83 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
84 {
85         return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
86                                          AMDGPU_XCP_PREPARE_SUSPEND);
87 }
88
89 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
90 {
91         return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
92 }
93
94 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
95 {
96         return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
97                                          AMDGPU_XCP_PREPARE_RESUME);
98 }
99
100 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
101 {
102         return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
103 }
104
105 static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
106                                    struct amdgpu_xcp_ip *ip)
107 {
108         struct amdgpu_xcp *xcp;
109
110         if (!ip)
111                 return;
112
113         xcp = &xcp_mgr->xcp[xcp_id];
114         xcp->ip[ip->ip_id] = *ip;
115         xcp->ip[ip->ip_id].valid = true;
116
117         xcp->valid = true;
118 }
119
120 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
121 {
122         struct amdgpu_device *adev = xcp_mgr->adev;
123         struct amdgpu_xcp_ip ip;
124         uint8_t mem_id;
125         int i, j, ret;
126
127         if (!num_xcps || num_xcps > MAX_XCP)
128                 return -EINVAL;
129
130         xcp_mgr->mode = mode;
131
132         for (i = 0; i < MAX_XCP; ++i)
133                 xcp_mgr->xcp[i].valid = false;
134
135         for (i = 0; i < num_xcps; ++i) {
136                 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
137                         ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
138                                                              &ip);
139                         if (ret)
140                                 continue;
141
142                         __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
143                 }
144
145                 xcp_mgr->xcp[i].id = i;
146
147                 if (xcp_mgr->funcs->get_xcp_mem_id) {
148                         ret = xcp_mgr->funcs->get_xcp_mem_id(
149                                 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
150                         if (ret)
151                                 continue;
152                         else
153                                 xcp_mgr->xcp[i].mem_id = mem_id;
154                 }
155         }
156
157         xcp_mgr->num_xcps = num_xcps;
158         amdgpu_xcp_update_partition_sched_list(adev);
159
160         xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
161         return 0;
162 }
163
164 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
165 {
166         int ret, curr_mode, num_xcps = 0;
167
168         if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
169                 return -EINVAL;
170
171         if (xcp_mgr->mode == mode)
172                 return 0;
173
174         if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
175                 return 0;
176
177         mutex_lock(&xcp_mgr->xcp_lock);
178
179         curr_mode = xcp_mgr->mode;
180         /* State set to transient mode */
181         xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
182
183         ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
184
185         if (ret) {
186                 /* Failed, get whatever mode it's at now */
187                 if (xcp_mgr->funcs->query_partition_mode)
188                         xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
189                                 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
190                 else
191                         xcp_mgr->mode = curr_mode;
192
193                 goto out;
194         }
195
196 out:
197         mutex_unlock(&xcp_mgr->xcp_lock);
198
199         return ret;
200 }
201
202 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
203 {
204         int mode;
205
206         if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
207                 return xcp_mgr->mode;
208
209         if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
210                 return xcp_mgr->mode;
211
212         if (!(flags & AMDGPU_XCP_FL_LOCKED))
213                 mutex_lock(&xcp_mgr->xcp_lock);
214         mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
215         if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
216                 dev_WARN(
217                         xcp_mgr->adev->dev,
218                         "Cached partition mode %d not matching with device mode %d",
219                         xcp_mgr->mode, mode);
220
221         if (!(flags & AMDGPU_XCP_FL_LOCKED))
222                 mutex_unlock(&xcp_mgr->xcp_lock);
223
224         return mode;
225 }
226
227 static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
228 {
229         struct drm_device *p_ddev;
230         struct drm_device *ddev;
231         int i, ret;
232
233         ddev = adev_to_drm(adev);
234
235         for (i = 0; i < MAX_XCP; i++) {
236                 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
237                 if (ret)
238                         return ret;
239
240                 /* Redirect all IOCTLs to the primary device */
241                 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
242                 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
243                 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
244                 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
245                 p_ddev->render->dev = ddev;
246                 p_ddev->primary->dev = ddev;
247                 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
248                 p_ddev->driver = &amdgpu_partition_driver;
249                 adev->xcp_mgr->xcp[i].ddev = p_ddev;
250         }
251
252         return 0;
253 }
254
255 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
256                         int init_num_xcps,
257                         struct amdgpu_xcp_mgr_funcs *xcp_funcs)
258 {
259         struct amdgpu_xcp_mgr *xcp_mgr;
260
261         if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
262             !xcp_funcs->get_ip_details)
263                 return -EINVAL;
264
265         xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
266
267         if (!xcp_mgr)
268                 return -ENOMEM;
269
270         xcp_mgr->adev = adev;
271         xcp_mgr->funcs = xcp_funcs;
272         xcp_mgr->mode = init_mode;
273         mutex_init(&xcp_mgr->xcp_lock);
274
275         if (init_mode != AMDGPU_XCP_MODE_NONE)
276                 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
277
278         adev->xcp_mgr = xcp_mgr;
279
280         return amdgpu_xcp_dev_alloc(adev);
281 }
282
283 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
284                              enum AMDGPU_XCP_IP_BLOCK ip, int instance)
285 {
286         struct amdgpu_xcp *xcp;
287         int i, id_mask = 0;
288
289         if (ip >= AMDGPU_XCP_MAX_BLOCKS)
290                 return -EINVAL;
291
292         for (i = 0; i < xcp_mgr->num_xcps; ++i) {
293                 xcp = &xcp_mgr->xcp[i];
294                 if ((xcp->valid) && (xcp->ip[ip].valid) &&
295                     (xcp->ip[ip].inst_mask & BIT(instance)))
296                         id_mask |= BIT(i);
297         }
298
299         if (!id_mask)
300                 id_mask = -ENXIO;
301
302         return id_mask;
303 }
304
305 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
306                                 enum AMDGPU_XCP_IP_BLOCK ip,
307                                 uint32_t *inst_mask)
308 {
309         if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
310                 return -EINVAL;
311
312         *inst_mask = xcp->ip[ip].inst_mask;
313
314         return 0;
315 }
316
317 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
318                         const struct pci_device_id *ent)
319 {
320         int i, ret;
321
322         if (!adev->xcp_mgr)
323                 return 0;
324
325         for (i = 0; i < MAX_XCP; i++) {
326                 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
327                 if (ret)
328                         return ret;
329         }
330
331         return 0;
332 }
333
334 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
335 {
336         struct drm_device *p_ddev;
337         int i;
338
339         if (!adev->xcp_mgr)
340                 return;
341
342         for (i = 0; i < MAX_XCP; i++) {
343                 p_ddev = adev->xcp_mgr->xcp[i].ddev;
344                 drm_dev_unplug(p_ddev);
345                 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
346                 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
347                 p_ddev->driver =  adev->xcp_mgr->xcp[i].driver;
348                 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
349         }
350 }
351
352 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
353                            struct amdgpu_fpriv *fpriv,
354                            struct drm_file *file_priv)
355 {
356         int i;
357
358         if (!adev->xcp_mgr)
359                 return 0;
360
361         fpriv->xcp_id = ~0;
362         for (i = 0; i < MAX_XCP; ++i) {
363                 if (!adev->xcp_mgr->xcp[i].ddev)
364                         break;
365
366                 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
367                         if (adev->xcp_mgr->xcp[i].valid == FALSE) {
368                                 dev_err(adev->dev, "renderD%d partition %d not valid!",
369                                                 file_priv->minor->index, i);
370                                 return -ENOENT;
371                         }
372                         dev_dbg(adev->dev, "renderD%d partition %d opened!",
373                                         file_priv->minor->index, i);
374                         fpriv->xcp_id = i;
375                         break;
376                 }
377         }
378
379         fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 :
380                                 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
381         return 0;
382 }
383
384 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
385                                   struct amdgpu_ctx_entity *entity)
386 {
387         struct drm_gpu_scheduler *sched;
388         struct amdgpu_ring *ring;
389
390         if (!adev->xcp_mgr)
391                 return;
392
393         sched = entity->entity.rq->sched;
394         if (sched->ready) {
395                 ring = to_amdgpu_ring(entity->entity.rq->sched);
396                 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
397         }
398 }
399
This page took 0.057671 seconds and 4 git commands to generate.