]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
Merge remote-tracking branches 'regulator/fix/axp20x', 'regulator/fix/cpcap' and...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gfx_v7.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/firmware.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_amdkfd.h"
29 #include "cikd.h"
30 #include "cik_sdma.h"
31 #include "amdgpu_ucode.h"
32 #include "gfx_v7_0.h"
33 #include "gca/gfx_7_2_d.h"
34 #include "gca/gfx_7_2_enum.h"
35 #include "gca/gfx_7_2_sh_mask.h"
36 #include "oss/oss_2_0_d.h"
37 #include "oss/oss_2_0_sh_mask.h"
38 #include "gmc/gmc_7_1_d.h"
39 #include "gmc/gmc_7_1_sh_mask.h"
40 #include "cik_structs.h"
41
42 enum {
43         MAX_TRAPID = 8,         /* 3 bits in the bitfield. */
44         MAX_WATCH_ADDRESSES = 4
45 };
46
47 enum {
48         ADDRESS_WATCH_REG_ADDR_HI = 0,
49         ADDRESS_WATCH_REG_ADDR_LO,
50         ADDRESS_WATCH_REG_CNTL,
51         ADDRESS_WATCH_REG_MAX
52 };
53
54 /*  not defined in the CI/KV reg file  */
55 enum {
56         ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
57         ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
58         ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
59         /* extend the mask to 26 bits to match the low address field */
60         ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
61         ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
62 };
63
64 static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
65         mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
66         mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
67         mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
68         mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
69 };
70
71 union TCP_WATCH_CNTL_BITS {
72         struct {
73                 uint32_t mask:24;
74                 uint32_t vmid:4;
75                 uint32_t atc:1;
76                 uint32_t mode:2;
77                 uint32_t valid:1;
78         } bitfields, bits;
79         uint32_t u32All;
80         signed int i32All;
81         float f32All;
82 };
83
84 /*
85  * Register access functions
86  */
87
88 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
89                 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
90                 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
91
92 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
93                                         unsigned int vmid);
94
95 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
96                                 uint32_t hpd_size, uint64_t hpd_gpu_addr);
97 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
98 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
99                         uint32_t queue_id, uint32_t __user *wptr);
100 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
101 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
102                                 uint32_t pipe_id, uint32_t queue_id);
103
104 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
105                                 unsigned int utimeout, uint32_t pipe_id,
106                                 uint32_t queue_id);
107 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
108 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
109                                 unsigned int utimeout);
110 static int kgd_address_watch_disable(struct kgd_dev *kgd);
111 static int kgd_address_watch_execute(struct kgd_dev *kgd,
112                                         unsigned int watch_point_id,
113                                         uint32_t cntl_val,
114                                         uint32_t addr_hi,
115                                         uint32_t addr_lo);
116 static int kgd_wave_control_execute(struct kgd_dev *kgd,
117                                         uint32_t gfx_index_val,
118                                         uint32_t sq_cmd);
119 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
120                                         unsigned int watch_point_id,
121                                         unsigned int reg_offset);
122
123 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
124 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
125                                                         uint8_t vmid);
126 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
127
128 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
129
130 static const struct kfd2kgd_calls kfd2kgd = {
131         .init_gtt_mem_allocation = alloc_gtt_mem,
132         .free_gtt_mem = free_gtt_mem,
133         .get_vmem_size = get_vmem_size,
134         .get_gpu_clock_counter = get_gpu_clock_counter,
135         .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
136         .program_sh_mem_settings = kgd_program_sh_mem_settings,
137         .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
138         .init_pipeline = kgd_init_pipeline,
139         .init_interrupts = kgd_init_interrupts,
140         .hqd_load = kgd_hqd_load,
141         .hqd_sdma_load = kgd_hqd_sdma_load,
142         .hqd_is_occupied = kgd_hqd_is_occupied,
143         .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
144         .hqd_destroy = kgd_hqd_destroy,
145         .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
146         .address_watch_disable = kgd_address_watch_disable,
147         .address_watch_execute = kgd_address_watch_execute,
148         .wave_control_execute = kgd_wave_control_execute,
149         .address_watch_get_offset = kgd_address_watch_get_offset,
150         .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
151         .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
152         .write_vmid_invalidate_request = write_vmid_invalidate_request,
153         .get_fw_version = get_fw_version
154 };
155
156 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
157 {
158         return (struct kfd2kgd_calls *)&kfd2kgd;
159 }
160
161 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
162 {
163         return (struct amdgpu_device *)kgd;
164 }
165
166 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
167                         uint32_t queue, uint32_t vmid)
168 {
169         struct amdgpu_device *adev = get_amdgpu_device(kgd);
170         uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
171
172         mutex_lock(&adev->srbm_mutex);
173         WREG32(mmSRBM_GFX_CNTL, value);
174 }
175
176 static void unlock_srbm(struct kgd_dev *kgd)
177 {
178         struct amdgpu_device *adev = get_amdgpu_device(kgd);
179
180         WREG32(mmSRBM_GFX_CNTL, 0);
181         mutex_unlock(&adev->srbm_mutex);
182 }
183
184 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
185                                 uint32_t queue_id)
186 {
187         struct amdgpu_device *adev = get_amdgpu_device(kgd);
188
189         uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
190         uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
191
192         lock_srbm(kgd, mec, pipe, queue_id, 0);
193 }
194
195 static void release_queue(struct kgd_dev *kgd)
196 {
197         unlock_srbm(kgd);
198 }
199
200 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
201                                         uint32_t sh_mem_config,
202                                         uint32_t sh_mem_ape1_base,
203                                         uint32_t sh_mem_ape1_limit,
204                                         uint32_t sh_mem_bases)
205 {
206         struct amdgpu_device *adev = get_amdgpu_device(kgd);
207
208         lock_srbm(kgd, 0, 0, 0, vmid);
209
210         WREG32(mmSH_MEM_CONFIG, sh_mem_config);
211         WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
212         WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
213         WREG32(mmSH_MEM_BASES, sh_mem_bases);
214
215         unlock_srbm(kgd);
216 }
217
218 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
219                                         unsigned int vmid)
220 {
221         struct amdgpu_device *adev = get_amdgpu_device(kgd);
222
223         /*
224          * We have to assume that there is no outstanding mapping.
225          * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
226          * a mapping is in progress or because a mapping finished and the
227          * SW cleared it. So the protocol is to always wait & clear.
228          */
229         uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
230                         ATC_VMID0_PASID_MAPPING__VALID_MASK;
231
232         WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
233
234         while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
235                 cpu_relax();
236         WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
237
238         /* Mapping vmid to pasid also for IH block */
239         WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
240
241         return 0;
242 }
243
244 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
245                                 uint32_t hpd_size, uint64_t hpd_gpu_addr)
246 {
247         /* amdgpu owns the per-pipe state */
248         return 0;
249 }
250
251 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
252 {
253         struct amdgpu_device *adev = get_amdgpu_device(kgd);
254         uint32_t mec;
255         uint32_t pipe;
256
257         mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
258         pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
259
260         lock_srbm(kgd, mec, pipe, 0, 0);
261
262         WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
263                         CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
264
265         unlock_srbm(kgd);
266
267         return 0;
268 }
269
270 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
271 {
272         uint32_t retval;
273
274         retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
275                         m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
276
277         pr_debug("kfd: sdma base address: 0x%x\n", retval);
278
279         return retval;
280 }
281
282 static inline struct cik_mqd *get_mqd(void *mqd)
283 {
284         return (struct cik_mqd *)mqd;
285 }
286
287 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
288 {
289         return (struct cik_sdma_rlc_registers *)mqd;
290 }
291
292 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
293                         uint32_t queue_id, uint32_t __user *wptr)
294 {
295         struct amdgpu_device *adev = get_amdgpu_device(kgd);
296         uint32_t wptr_shadow, is_wptr_shadow_valid;
297         struct cik_mqd *m;
298
299         m = get_mqd(mqd);
300
301         is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
302         if (is_wptr_shadow_valid)
303                 m->cp_hqd_pq_wptr = wptr_shadow;
304
305         acquire_queue(kgd, pipe_id, queue_id);
306         gfx_v7_0_mqd_commit(adev, m);
307         release_queue(kgd);
308
309         return 0;
310 }
311
312 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
313 {
314         struct amdgpu_device *adev = get_amdgpu_device(kgd);
315         struct cik_sdma_rlc_registers *m;
316         uint32_t sdma_base_addr;
317
318         m = get_sdma_mqd(mqd);
319         sdma_base_addr = get_sdma_base_addr(m);
320
321         WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
322                         m->sdma_rlc_virtual_addr);
323
324         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
325                         m->sdma_rlc_rb_base);
326
327         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
328                         m->sdma_rlc_rb_base_hi);
329
330         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
331                         m->sdma_rlc_rb_rptr_addr_lo);
332
333         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
334                         m->sdma_rlc_rb_rptr_addr_hi);
335
336         WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
337                         m->sdma_rlc_doorbell);
338
339         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
340                         m->sdma_rlc_rb_cntl);
341
342         return 0;
343 }
344
345 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
346                                 uint32_t pipe_id, uint32_t queue_id)
347 {
348         struct amdgpu_device *adev = get_amdgpu_device(kgd);
349         uint32_t act;
350         bool retval = false;
351         uint32_t low, high;
352
353         acquire_queue(kgd, pipe_id, queue_id);
354         act = RREG32(mmCP_HQD_ACTIVE);
355         if (act) {
356                 low = lower_32_bits(queue_address >> 8);
357                 high = upper_32_bits(queue_address >> 8);
358
359                 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
360                                 high == RREG32(mmCP_HQD_PQ_BASE_HI))
361                         retval = true;
362         }
363         release_queue(kgd);
364         return retval;
365 }
366
367 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
368 {
369         struct amdgpu_device *adev = get_amdgpu_device(kgd);
370         struct cik_sdma_rlc_registers *m;
371         uint32_t sdma_base_addr;
372         uint32_t sdma_rlc_rb_cntl;
373
374         m = get_sdma_mqd(mqd);
375         sdma_base_addr = get_sdma_base_addr(m);
376
377         sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
378
379         if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
380                 return true;
381
382         return false;
383 }
384
385 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
386                                 unsigned int utimeout, uint32_t pipe_id,
387                                 uint32_t queue_id)
388 {
389         struct amdgpu_device *adev = get_amdgpu_device(kgd);
390         uint32_t temp;
391         int timeout = utimeout;
392
393         acquire_queue(kgd, pipe_id, queue_id);
394         WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
395
396         WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
397
398         while (true) {
399                 temp = RREG32(mmCP_HQD_ACTIVE);
400                 if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
401                         break;
402                 if (timeout <= 0) {
403                         pr_err("kfd: cp queue preemption time out.\n");
404                         release_queue(kgd);
405                         return -ETIME;
406                 }
407                 msleep(20);
408                 timeout -= 20;
409         }
410
411         release_queue(kgd);
412         return 0;
413 }
414
415 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
416                                 unsigned int utimeout)
417 {
418         struct amdgpu_device *adev = get_amdgpu_device(kgd);
419         struct cik_sdma_rlc_registers *m;
420         uint32_t sdma_base_addr;
421         uint32_t temp;
422         int timeout = utimeout;
423
424         m = get_sdma_mqd(mqd);
425         sdma_base_addr = get_sdma_base_addr(m);
426
427         temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
428         temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
429         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
430
431         while (true) {
432                 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
433                 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
434                         break;
435                 if (timeout <= 0)
436                         return -ETIME;
437                 msleep(20);
438                 timeout -= 20;
439         }
440
441         WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
442         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
443         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
444         WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
445
446         return 0;
447 }
448
449 static int kgd_address_watch_disable(struct kgd_dev *kgd)
450 {
451         struct amdgpu_device *adev = get_amdgpu_device(kgd);
452         union TCP_WATCH_CNTL_BITS cntl;
453         unsigned int i;
454
455         cntl.u32All = 0;
456
457         cntl.bitfields.valid = 0;
458         cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
459         cntl.bitfields.atc = 1;
460
461         /* Turning off this address until we set all the registers */
462         for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
463                 WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
464                         ADDRESS_WATCH_REG_CNTL], cntl.u32All);
465
466         return 0;
467 }
468
469 static int kgd_address_watch_execute(struct kgd_dev *kgd,
470                                         unsigned int watch_point_id,
471                                         uint32_t cntl_val,
472                                         uint32_t addr_hi,
473                                         uint32_t addr_lo)
474 {
475         struct amdgpu_device *adev = get_amdgpu_device(kgd);
476         union TCP_WATCH_CNTL_BITS cntl;
477
478         cntl.u32All = cntl_val;
479
480         /* Turning off this watch point until we set all the registers */
481         cntl.bitfields.valid = 0;
482         WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
483                 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
484
485         WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
486                 ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
487
488         WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
489                 ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
490
491         /* Enable the watch point */
492         cntl.bitfields.valid = 1;
493
494         WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
495                 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
496
497         return 0;
498 }
499
500 static int kgd_wave_control_execute(struct kgd_dev *kgd,
501                                         uint32_t gfx_index_val,
502                                         uint32_t sq_cmd)
503 {
504         struct amdgpu_device *adev = get_amdgpu_device(kgd);
505         uint32_t data;
506
507         mutex_lock(&adev->grbm_idx_mutex);
508
509         WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
510         WREG32(mmSQ_CMD, sq_cmd);
511
512         /*  Restore the GRBM_GFX_INDEX register  */
513
514         data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
515                 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
516                 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
517
518         WREG32(mmGRBM_GFX_INDEX, data);
519
520         mutex_unlock(&adev->grbm_idx_mutex);
521
522         return 0;
523 }
524
525 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
526                                         unsigned int watch_point_id,
527                                         unsigned int reg_offset)
528 {
529         return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
530 }
531
532 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
533                                                         uint8_t vmid)
534 {
535         uint32_t reg;
536         struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
537
538         reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
539         return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
540 }
541
542 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
543                                                                 uint8_t vmid)
544 {
545         uint32_t reg;
546         struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
547
548         reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
549         return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
550 }
551
552 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
553 {
554         struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
555
556         WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
557 }
558
559 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
560 {
561         struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
562         const union amdgpu_firmware_header *hdr;
563
564         BUG_ON(kgd == NULL);
565
566         switch (type) {
567         case KGD_ENGINE_PFP:
568                 hdr = (const union amdgpu_firmware_header *)
569                                                         adev->gfx.pfp_fw->data;
570                 break;
571
572         case KGD_ENGINE_ME:
573                 hdr = (const union amdgpu_firmware_header *)
574                                                         adev->gfx.me_fw->data;
575                 break;
576
577         case KGD_ENGINE_CE:
578                 hdr = (const union amdgpu_firmware_header *)
579                                                         adev->gfx.ce_fw->data;
580                 break;
581
582         case KGD_ENGINE_MEC1:
583                 hdr = (const union amdgpu_firmware_header *)
584                                                         adev->gfx.mec_fw->data;
585                 break;
586
587         case KGD_ENGINE_MEC2:
588                 hdr = (const union amdgpu_firmware_header *)
589                                                         adev->gfx.mec2_fw->data;
590                 break;
591
592         case KGD_ENGINE_RLC:
593                 hdr = (const union amdgpu_firmware_header *)
594                                                         adev->gfx.rlc_fw->data;
595                 break;
596
597         case KGD_ENGINE_SDMA1:
598                 hdr = (const union amdgpu_firmware_header *)
599                                                         adev->sdma.instance[0].fw->data;
600                 break;
601
602         case KGD_ENGINE_SDMA2:
603                 hdr = (const union amdgpu_firmware_header *)
604                                                         adev->sdma.instance[1].fw->data;
605                 break;
606
607         default:
608                 return 0;
609         }
610
611         if (hdr == NULL)
612                 return 0;
613
614         /* Only 12 bit in use*/
615         return hdr->common.ucode_version;
616 }
617
This page took 0.096648 seconds and 4 git commands to generate.