]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
Merge tag 'for-linus-5.10b-rc1c-tag' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / amd / amdgpu / gmc_v10_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
28 #include "umc_v8_7.h"
29
30 #include "hdp/hdp_5_0_0_offset.h"
31 #include "hdp/hdp_5_0_0_sh_mask.h"
32 #include "athub/athub_2_0_0_sh_mask.h"
33 #include "athub/athub_2_0_0_offset.h"
34 #include "dcn/dcn_2_0_0_offset.h"
35 #include "dcn/dcn_2_0_0_sh_mask.h"
36 #include "oss/osssys_5_0_0_offset.h"
37 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
38 #include "navi10_enum.h"
39
40 #include "soc15.h"
41 #include "soc15d.h"
42 #include "soc15_common.h"
43
44 #include "nbio_v2_3.h"
45
46 #include "gfxhub_v2_0.h"
47 #include "gfxhub_v2_1.h"
48 #include "mmhub_v2_0.h"
49 #include "athub_v2_0.h"
50 #include "athub_v2_1.h"
51
52 #if 0
53 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
54 {
55         /* TODO add golden setting for hdp */
56 };
57 #endif
58
59 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
60                                          struct amdgpu_irq_src *src,
61                                          unsigned type,
62                                          enum amdgpu_interrupt_state state)
63 {
64         return 0;
65 }
66
67 static int
68 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
69                                    struct amdgpu_irq_src *src, unsigned type,
70                                    enum amdgpu_interrupt_state state)
71 {
72         switch (state) {
73         case AMDGPU_IRQ_STATE_DISABLE:
74                 /* MM HUB */
75                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
76                 /* GFX HUB */
77                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
78                 break;
79         case AMDGPU_IRQ_STATE_ENABLE:
80                 /* MM HUB */
81                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
82                 /* GFX HUB */
83                 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
84                 break;
85         default:
86                 break;
87         }
88
89         return 0;
90 }
91
92 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
93                                        struct amdgpu_irq_src *source,
94                                        struct amdgpu_iv_entry *entry)
95 {
96         struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
97         uint32_t status = 0;
98         u64 addr;
99
100         addr = (u64)entry->src_data[0] << 12;
101         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
102
103         if (!amdgpu_sriov_vf(adev)) {
104                 /*
105                  * Issue a dummy read to wait for the status register to
106                  * be updated to avoid reading an incorrect value due to
107                  * the new fast GRBM interface.
108                  */
109                 if (entry->vmid_src == AMDGPU_GFXHUB_0)
110                         RREG32(hub->vm_l2_pro_fault_status);
111
112                 status = RREG32(hub->vm_l2_pro_fault_status);
113                 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
114         }
115
116         if (printk_ratelimit()) {
117                 struct amdgpu_task_info task_info;
118
119                 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
120                 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
121
122                 dev_err(adev->dev,
123                         "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
124                         "for process %s pid %d thread %s pid %d)\n",
125                         entry->vmid_src ? "mmhub" : "gfxhub",
126                         entry->src_id, entry->ring_id, entry->vmid,
127                         entry->pasid, task_info.process_name, task_info.tgid,
128                         task_info.task_name, task_info.pid);
129                 dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
130                         addr, entry->client_id);
131                 if (!amdgpu_sriov_vf(adev))
132                         hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
133         }
134
135         return 0;
136 }
137
138 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
139         .set = gmc_v10_0_vm_fault_interrupt_state,
140         .process = gmc_v10_0_process_interrupt,
141 };
142
143 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
144         .set = gmc_v10_0_ecc_interrupt_state,
145         .process = amdgpu_umc_process_ecc_irq,
146 };
147
148  static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
149 {
150         adev->gmc.vm_fault.num_types = 1;
151         adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
152
153         if (!amdgpu_sriov_vf(adev)) {
154                 adev->gmc.ecc_irq.num_types = 1;
155                 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
156         }
157 }
158
159 /**
160  * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
161  *
162  * @adev: amdgpu_device pointer
163  * @vmhub: vmhub type
164  *
165  */
166 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
167                                        uint32_t vmhub)
168 {
169         return ((vmhub == AMDGPU_MMHUB_0 ||
170                  vmhub == AMDGPU_MMHUB_1) &&
171                 (!amdgpu_sriov_vf(adev)));
172 }
173
174 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
175                                         struct amdgpu_device *adev,
176                                         uint8_t vmid, uint16_t *p_pasid)
177 {
178         uint32_t value;
179
180         value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
181                      + vmid);
182         *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
183
184         return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
185 }
186
187 /*
188  * GART
189  * VMID 0 is the physical GPU addresses as used by the kernel.
190  * VMIDs 1-15 are used for userspace clients and are handled
191  * by the amdgpu vm/hsa code.
192  */
193
194 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
195                                    unsigned int vmhub, uint32_t flush_type)
196 {
197         bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
198         struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
199         u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
200         u32 tmp;
201         /* Use register 17 for GART */
202         const unsigned eng = 17;
203         unsigned int i;
204
205         spin_lock(&adev->gmc.invalidate_lock);
206         /*
207          * It may lose gpuvm invalidate acknowldege state across power-gating
208          * off cycle, add semaphore acquire before invalidation and semaphore
209          * release after invalidation to avoid entering power gated state
210          * to WA the Issue
211          */
212
213         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
214         if (use_semaphore) {
215                 for (i = 0; i < adev->usec_timeout; i++) {
216                         /* a read return value of 1 means semaphore acuqire */
217                         tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
218                                             hub->eng_distance * eng);
219                         if (tmp & 0x1)
220                                 break;
221                         udelay(1);
222                 }
223
224                 if (i >= adev->usec_timeout)
225                         DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
226         }
227
228         WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
229
230         /*
231          * Issue a dummy read to wait for the ACK register to be cleared
232          * to avoid a false ACK due to the new fast GRBM interface.
233          */
234         if (vmhub == AMDGPU_GFXHUB_0)
235                 RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
236
237         /* Wait for ACK with a delay.*/
238         for (i = 0; i < adev->usec_timeout; i++) {
239                 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
240                                     hub->eng_distance * eng);
241                 tmp &= 1 << vmid;
242                 if (tmp)
243                         break;
244
245                 udelay(1);
246         }
247
248         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
249         if (use_semaphore)
250                 /*
251                  * add semaphore release after invalidation,
252                  * write with 0 means semaphore release
253                  */
254                 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
255                               hub->eng_distance * eng, 0);
256
257         spin_unlock(&adev->gmc.invalidate_lock);
258
259         if (i < adev->usec_timeout)
260                 return;
261
262         DRM_ERROR("Timeout waiting for VM flush ACK!\n");
263 }
264
265 /**
266  * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
267  *
268  * @adev: amdgpu_device pointer
269  * @vmid: vm instance to flush
270  *
271  * Flush the TLB for the requested page table.
272  */
273 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
274                                         uint32_t vmhub, uint32_t flush_type)
275 {
276         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
277         struct dma_fence *fence;
278         struct amdgpu_job *job;
279
280         int r;
281
282         /* flush hdp cache */
283         adev->nbio.funcs->hdp_flush(adev, NULL);
284
285         /* For SRIOV run time, driver shouldn't access the register through MMIO
286          * Directly use kiq to do the vm invalidation instead
287          */
288         if (adev->gfx.kiq.ring.sched.ready &&
289             (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
290             down_read_trylock(&adev->reset_sem)) {
291                 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
292                 const unsigned eng = 17;
293                 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
294                 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
295                 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
296
297                 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
298                                 1 << vmid);
299
300                 up_read(&adev->reset_sem);
301                 return;
302         }
303
304         mutex_lock(&adev->mman.gtt_window_lock);
305
306         if (vmhub == AMDGPU_MMHUB_0) {
307                 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
308                 mutex_unlock(&adev->mman.gtt_window_lock);
309                 return;
310         }
311
312         BUG_ON(vmhub != AMDGPU_GFXHUB_0);
313
314         if (!adev->mman.buffer_funcs_enabled ||
315             !adev->ib_pool_ready ||
316             amdgpu_in_reset(adev) ||
317             ring->sched.ready == false) {
318                 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
319                 mutex_unlock(&adev->mman.gtt_window_lock);
320                 return;
321         }
322
323         /* The SDMA on Navi has a bug which can theoretically result in memory
324          * corruption if an invalidation happens at the same time as an VA
325          * translation. Avoid this by doing the invalidation from the SDMA
326          * itself.
327          */
328         r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
329                                      &job);
330         if (r)
331                 goto error_alloc;
332
333         job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
334         job->vm_needs_flush = true;
335         job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
336         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
337         r = amdgpu_job_submit(job, &adev->mman.entity,
338                               AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
339         if (r)
340                 goto error_submit;
341
342         mutex_unlock(&adev->mman.gtt_window_lock);
343
344         dma_fence_wait(fence, false);
345         dma_fence_put(fence);
346
347         return;
348
349 error_submit:
350         amdgpu_job_free(job);
351
352 error_alloc:
353         mutex_unlock(&adev->mman.gtt_window_lock);
354         DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
355 }
356
357 /**
358  * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
359  *
360  * @adev: amdgpu_device pointer
361  * @pasid: pasid to be flush
362  *
363  * Flush the TLB for the requested pasid.
364  */
365 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
366                                         uint16_t pasid, uint32_t flush_type,
367                                         bool all_hub)
368 {
369         int vmid, i;
370         signed long r;
371         uint32_t seq;
372         uint16_t queried_pasid;
373         bool ret;
374         struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
375         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
376
377         if (amdgpu_emu_mode == 0 && ring->sched.ready) {
378                 spin_lock(&adev->gfx.kiq.ring_lock);
379                 /* 2 dwords flush + 8 dwords fence */
380                 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
381                 kiq->pmf->kiq_invalidate_tlbs(ring,
382                                         pasid, flush_type, all_hub);
383                 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
384                 if (r) {
385                         amdgpu_ring_undo(ring);
386                         spin_unlock(&adev->gfx.kiq.ring_lock);
387                         return -ETIME;
388                 }
389
390                 amdgpu_ring_commit(ring);
391                 spin_unlock(&adev->gfx.kiq.ring_lock);
392                 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
393                 if (r < 1) {
394                         dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
395                         return -ETIME;
396                 }
397
398                 return 0;
399         }
400
401         for (vmid = 1; vmid < 16; vmid++) {
402
403                 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
404                                 &queried_pasid);
405                 if (ret && queried_pasid == pasid) {
406                         if (all_hub) {
407                                 for (i = 0; i < adev->num_vmhubs; i++)
408                                         gmc_v10_0_flush_gpu_tlb(adev, vmid,
409                                                         i, flush_type);
410                         } else {
411                                 gmc_v10_0_flush_gpu_tlb(adev, vmid,
412                                                 AMDGPU_GFXHUB_0, flush_type);
413                         }
414                         break;
415                 }
416         }
417
418         return 0;
419 }
420
421 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
422                                              unsigned vmid, uint64_t pd_addr)
423 {
424         bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
425         struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
426         uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
427         unsigned eng = ring->vm_inv_eng;
428
429         /*
430          * It may lose gpuvm invalidate acknowldege state across power-gating
431          * off cycle, add semaphore acquire before invalidation and semaphore
432          * release after invalidation to avoid entering power gated state
433          * to WA the Issue
434          */
435
436         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
437         if (use_semaphore)
438                 /* a read return value of 1 means semaphore acuqire */
439                 amdgpu_ring_emit_reg_wait(ring,
440                                           hub->vm_inv_eng0_sem +
441                                           hub->eng_distance * eng, 0x1, 0x1);
442
443         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
444                               (hub->ctx_addr_distance * vmid),
445                               lower_32_bits(pd_addr));
446
447         amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
448                               (hub->ctx_addr_distance * vmid),
449                               upper_32_bits(pd_addr));
450
451         amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
452                                             hub->eng_distance * eng,
453                                             hub->vm_inv_eng0_ack +
454                                             hub->eng_distance * eng,
455                                             req, 1 << vmid);
456
457         /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
458         if (use_semaphore)
459                 /*
460                  * add semaphore release after invalidation,
461                  * write with 0 means semaphore release
462                  */
463                 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
464                                       hub->eng_distance * eng, 0);
465
466         return pd_addr;
467 }
468
469 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
470                                          unsigned pasid)
471 {
472         struct amdgpu_device *adev = ring->adev;
473         uint32_t reg;
474
475         if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
476                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
477         else
478                 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
479
480         amdgpu_ring_emit_wreg(ring, reg, pasid);
481 }
482
483 /*
484  * PTE format on NAVI 10:
485  * 63:59 reserved
486  * 58:57 reserved
487  * 56 F
488  * 55 L
489  * 54 reserved
490  * 53:52 SW
491  * 51 T
492  * 50:48 mtype
493  * 47:12 4k physical page base address
494  * 11:7 fragment
495  * 6 write
496  * 5 read
497  * 4 exe
498  * 3 Z
499  * 2 snooped
500  * 1 system
501  * 0 valid
502  *
503  * PDE format on NAVI 10:
504  * 63:59 block fragment size
505  * 58:55 reserved
506  * 54 P
507  * 53:48 reserved
508  * 47:6 physical base address of PD or PTE
509  * 5:3 reserved
510  * 2 C
511  * 1 system
512  * 0 valid
513  */
514
515 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
516 {
517         switch (flags) {
518         case AMDGPU_VM_MTYPE_DEFAULT:
519                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
520         case AMDGPU_VM_MTYPE_NC:
521                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
522         case AMDGPU_VM_MTYPE_WC:
523                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
524         case AMDGPU_VM_MTYPE_CC:
525                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
526         case AMDGPU_VM_MTYPE_UC:
527                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
528         default:
529                 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
530         }
531 }
532
533 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
534                                  uint64_t *addr, uint64_t *flags)
535 {
536         if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
537                 *addr = adev->vm_manager.vram_base_offset + *addr -
538                         adev->gmc.vram_start;
539         BUG_ON(*addr & 0xFFFF00000000003FULL);
540
541         if (!adev->gmc.translate_further)
542                 return;
543
544         if (level == AMDGPU_VM_PDB1) {
545                 /* Set the block fragment size */
546                 if (!(*flags & AMDGPU_PDE_PTE))
547                         *flags |= AMDGPU_PDE_BFS(0x9);
548
549         } else if (level == AMDGPU_VM_PDB0) {
550                 if (*flags & AMDGPU_PDE_PTE)
551                         *flags &= ~AMDGPU_PDE_PTE;
552                 else
553                         *flags |= AMDGPU_PTE_TF;
554         }
555 }
556
557 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
558                                  struct amdgpu_bo_va_mapping *mapping,
559                                  uint64_t *flags)
560 {
561         *flags &= ~AMDGPU_PTE_EXECUTABLE;
562         *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
563
564         *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
565         *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
566
567         if (mapping->flags & AMDGPU_PTE_PRT) {
568                 *flags |= AMDGPU_PTE_PRT;
569                 *flags |= AMDGPU_PTE_SNOOPED;
570                 *flags |= AMDGPU_PTE_LOG;
571                 *flags |= AMDGPU_PTE_SYSTEM;
572                 *flags &= ~AMDGPU_PTE_VALID;
573         }
574 }
575
576 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
577 {
578         u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
579         unsigned size;
580
581         if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
582                 size = AMDGPU_VBIOS_VGA_ALLOCATION;
583         } else {
584                 u32 viewport;
585                 u32 pitch;
586
587                 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
588                 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
589                 size = (REG_GET_FIELD(viewport,
590                                         HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
591                                 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
592                                 4);
593         }
594
595         return size;
596 }
597
598 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
599         .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
600         .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
601         .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
602         .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
603         .map_mtype = gmc_v10_0_map_mtype,
604         .get_vm_pde = gmc_v10_0_get_vm_pde,
605         .get_vm_pte = gmc_v10_0_get_vm_pte,
606         .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
607 };
608
609 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
610 {
611         if (adev->gmc.gmc_funcs == NULL)
612                 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
613 }
614
615 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
616 {
617         switch (adev->asic_type) {
618         case CHIP_SIENNA_CICHLID:
619                 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
620                 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
621                 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
622                 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
623                 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
624                 adev->umc.funcs = &umc_v8_7_funcs;
625                 break;
626         default:
627                 break;
628         }
629 }
630
631
632 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
633 {
634         adev->mmhub.funcs = &mmhub_v2_0_funcs;
635 }
636
637 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
638 {
639         switch (adev->asic_type) {
640         case CHIP_SIENNA_CICHLID:
641         case CHIP_NAVY_FLOUNDER:
642                 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
643                 break;
644         default:
645                 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
646                 break;
647         }
648 }
649
650
651 static int gmc_v10_0_early_init(void *handle)
652 {
653         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
654
655         gmc_v10_0_set_mmhub_funcs(adev);
656         gmc_v10_0_set_gfxhub_funcs(adev);
657         gmc_v10_0_set_gmc_funcs(adev);
658         gmc_v10_0_set_irq_funcs(adev);
659         gmc_v10_0_set_umc_funcs(adev);
660
661         adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
662         adev->gmc.shared_aperture_end =
663                 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
664         adev->gmc.private_aperture_start = 0x1000000000000000ULL;
665         adev->gmc.private_aperture_end =
666                 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
667
668         return 0;
669 }
670
671 static int gmc_v10_0_late_init(void *handle)
672 {
673         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
674         int r;
675
676         amdgpu_bo_late_init(adev);
677
678         r = amdgpu_gmc_allocate_vm_inv_eng(adev);
679         if (r)
680                 return r;
681
682         r = amdgpu_gmc_ras_late_init(adev);
683         if (r)
684                 return r;
685
686         return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
687 }
688
689 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
690                                         struct amdgpu_gmc *mc)
691 {
692         u64 base = 0;
693
694         base = adev->gfxhub.funcs->get_fb_location(adev);
695
696         /* add the xgmi offset of the physical node */
697         base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
698
699         amdgpu_gmc_vram_location(adev, &adev->gmc, base);
700         amdgpu_gmc_gart_location(adev, mc);
701
702         /* base offset of vram pages */
703         adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
704
705         /* add the xgmi offset of the physical node */
706         adev->vm_manager.vram_base_offset +=
707                 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
708 }
709
710 /**
711  * gmc_v10_0_mc_init - initialize the memory controller driver params
712  *
713  * @adev: amdgpu_device pointer
714  *
715  * Look up the amount of vram, vram width, and decide how to place
716  * vram and gart within the GPU's physical address space.
717  * Returns 0 for success.
718  */
719 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
720 {
721         int r;
722
723         /* size in MB on si */
724         adev->gmc.mc_vram_size =
725                 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
726         adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
727
728         if (!(adev->flags & AMD_IS_APU)) {
729                 r = amdgpu_device_resize_fb_bar(adev);
730                 if (r)
731                         return r;
732         }
733         adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
734         adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
735
736         /* In case the PCI BAR is larger than the actual amount of vram */
737         adev->gmc.visible_vram_size = adev->gmc.aper_size;
738         if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
739                 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
740
741         /* set the gart size */
742         if (amdgpu_gart_size == -1) {
743                 switch (adev->asic_type) {
744                 case CHIP_NAVI10:
745                 case CHIP_NAVI14:
746                 case CHIP_NAVI12:
747                 case CHIP_SIENNA_CICHLID:
748                 case CHIP_NAVY_FLOUNDER:
749                 default:
750                         adev->gmc.gart_size = 512ULL << 20;
751                         break;
752                 }
753         } else
754                 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
755
756         gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
757
758         return 0;
759 }
760
761 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
762 {
763         int r;
764
765         if (adev->gart.bo) {
766                 WARN(1, "NAVI10 PCIE GART already initialized\n");
767                 return 0;
768         }
769
770         /* Initialize common gart structure */
771         r = amdgpu_gart_init(adev);
772         if (r)
773                 return r;
774
775         adev->gart.table_size = adev->gart.num_gpu_pages * 8;
776         adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
777                                  AMDGPU_PTE_EXECUTABLE;
778
779         return amdgpu_gart_table_vram_alloc(adev);
780 }
781
782 static int gmc_v10_0_sw_init(void *handle)
783 {
784         int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
785         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
786
787         adev->gfxhub.funcs->init(adev);
788
789         adev->mmhub.funcs->init(adev);
790
791         spin_lock_init(&adev->gmc.invalidate_lock);
792
793         if (adev->asic_type == CHIP_SIENNA_CICHLID && amdgpu_emu_mode == 1) {
794                 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
795                 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
796         } else {
797                 r = amdgpu_atomfirmware_get_vram_info(adev,
798                                 &vram_width, &vram_type, &vram_vendor);
799                 adev->gmc.vram_width = vram_width;
800
801                 adev->gmc.vram_type = vram_type;
802                 adev->gmc.vram_vendor = vram_vendor;
803         }
804
805         switch (adev->asic_type) {
806         case CHIP_NAVI10:
807         case CHIP_NAVI14:
808         case CHIP_NAVI12:
809         case CHIP_SIENNA_CICHLID:
810         case CHIP_NAVY_FLOUNDER:
811                 adev->num_vmhubs = 2;
812                 /*
813                  * To fulfill 4-level page support,
814                  * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
815                  * block size 512 (9bit)
816                  */
817                 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
818                 break;
819         default:
820                 break;
821         }
822
823         /* This interrupt is VMC page fault.*/
824         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
825                               VMC_1_0__SRCID__VM_FAULT,
826                               &adev->gmc.vm_fault);
827
828         if (r)
829                 return r;
830
831         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
832                               UTCL2_1_0__SRCID__FAULT,
833                               &adev->gmc.vm_fault);
834         if (r)
835                 return r;
836
837         if (!amdgpu_sriov_vf(adev)) {
838                 /* interrupt sent to DF. */
839                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
840                                       &adev->gmc.ecc_irq);
841                 if (r)
842                         return r;
843         }
844
845         /*
846          * Set the internal MC address mask This is the max address of the GPU's
847          * internal address space.
848          */
849         adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
850
851         r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
852         if (r) {
853                 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
854                 return r;
855         }
856
857         if (adev->gmc.xgmi.supported) {
858                 r = adev->gfxhub.funcs->get_xgmi_info(adev);
859                 if (r)
860                         return r;
861         }
862
863         r = gmc_v10_0_mc_init(adev);
864         if (r)
865                 return r;
866
867         amdgpu_gmc_get_vbios_allocations(adev);
868
869         /* Memory manager */
870         r = amdgpu_bo_init(adev);
871         if (r)
872                 return r;
873
874         r = gmc_v10_0_gart_init(adev);
875         if (r)
876                 return r;
877
878         /*
879          * number of VMs
880          * VMID 0 is reserved for System
881          * amdgpu graphics/compute will use VMIDs 1-7
882          * amdkfd will use VMIDs 8-15
883          */
884         adev->vm_manager.first_kfd_vmid = 8;
885
886         amdgpu_vm_manager_init(adev);
887
888         return 0;
889 }
890
891 /**
892  * gmc_v8_0_gart_fini - vm fini callback
893  *
894  * @adev: amdgpu_device pointer
895  *
896  * Tears down the driver GART/VM setup (CIK).
897  */
898 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
899 {
900         amdgpu_gart_table_vram_free(adev);
901         amdgpu_gart_fini(adev);
902 }
903
904 static int gmc_v10_0_sw_fini(void *handle)
905 {
906         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
907
908         amdgpu_vm_manager_fini(adev);
909         gmc_v10_0_gart_fini(adev);
910         amdgpu_gem_force_release(adev);
911         amdgpu_bo_fini(adev);
912
913         return 0;
914 }
915
916 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
917 {
918         switch (adev->asic_type) {
919         case CHIP_NAVI10:
920         case CHIP_NAVI14:
921         case CHIP_NAVI12:
922         case CHIP_SIENNA_CICHLID:
923         case CHIP_NAVY_FLOUNDER:
924                 break;
925         default:
926                 break;
927         }
928 }
929
930 /**
931  * gmc_v10_0_gart_enable - gart enable
932  *
933  * @adev: amdgpu_device pointer
934  */
935 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
936 {
937         int r;
938         bool value;
939         u32 tmp;
940
941         if (adev->gart.bo == NULL) {
942                 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
943                 return -EINVAL;
944         }
945
946         r = amdgpu_gart_table_vram_pin(adev);
947         if (r)
948                 return r;
949
950         r = adev->gfxhub.funcs->gart_enable(adev);
951         if (r)
952                 return r;
953
954         r = adev->mmhub.funcs->gart_enable(adev);
955         if (r)
956                 return r;
957
958         tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
959         tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
960         WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
961
962         tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
963         WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
964
965         /* Flush HDP after it is initialized */
966         adev->nbio.funcs->hdp_flush(adev, NULL);
967
968         value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
969                 false : true;
970
971         adev->gfxhub.funcs->set_fault_enable_default(adev, value);
972         adev->mmhub.funcs->set_fault_enable_default(adev, value);
973         gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
974         gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
975
976         DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
977                  (unsigned)(adev->gmc.gart_size >> 20),
978                  (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
979
980         adev->gart.ready = true;
981
982         return 0;
983 }
984
985 static int gmc_v10_0_hw_init(void *handle)
986 {
987         int r;
988         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
989
990         /* The sequence of these two function calls matters.*/
991         gmc_v10_0_init_golden_registers(adev);
992
993         r = gmc_v10_0_gart_enable(adev);
994         if (r)
995                 return r;
996
997         if (adev->umc.funcs && adev->umc.funcs->init_registers)
998                 adev->umc.funcs->init_registers(adev);
999
1000         return 0;
1001 }
1002
1003 /**
1004  * gmc_v10_0_gart_disable - gart disable
1005  *
1006  * @adev: amdgpu_device pointer
1007  *
1008  * This disables all VM page table.
1009  */
1010 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1011 {
1012         adev->gfxhub.funcs->gart_disable(adev);
1013         adev->mmhub.funcs->gart_disable(adev);
1014         amdgpu_gart_table_vram_unpin(adev);
1015 }
1016
1017 static int gmc_v10_0_hw_fini(void *handle)
1018 {
1019         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1020
1021         if (amdgpu_sriov_vf(adev)) {
1022                 /* full access mode, so don't touch any GMC register */
1023                 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1024                 return 0;
1025         }
1026
1027         amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1028         amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1029         gmc_v10_0_gart_disable(adev);
1030
1031         return 0;
1032 }
1033
1034 static int gmc_v10_0_suspend(void *handle)
1035 {
1036         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1037
1038         gmc_v10_0_hw_fini(adev);
1039
1040         return 0;
1041 }
1042
1043 static int gmc_v10_0_resume(void *handle)
1044 {
1045         int r;
1046         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1047
1048         r = gmc_v10_0_hw_init(adev);
1049         if (r)
1050                 return r;
1051
1052         amdgpu_vmid_reset_all(adev);
1053
1054         return 0;
1055 }
1056
1057 static bool gmc_v10_0_is_idle(void *handle)
1058 {
1059         /* MC is always ready in GMC v10.*/
1060         return true;
1061 }
1062
1063 static int gmc_v10_0_wait_for_idle(void *handle)
1064 {
1065         /* There is no need to wait for MC idle in GMC v10.*/
1066         return 0;
1067 }
1068
1069 static int gmc_v10_0_soft_reset(void *handle)
1070 {
1071         return 0;
1072 }
1073
1074 static int gmc_v10_0_set_clockgating_state(void *handle,
1075                                            enum amd_clockgating_state state)
1076 {
1077         int r;
1078         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1079
1080         r = adev->mmhub.funcs->set_clockgating(adev, state);
1081         if (r)
1082                 return r;
1083
1084         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1085             adev->asic_type == CHIP_NAVY_FLOUNDER)
1086                 return athub_v2_1_set_clockgating(adev, state);
1087         else
1088                 return athub_v2_0_set_clockgating(adev, state);
1089 }
1090
1091 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1092 {
1093         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1094
1095         adev->mmhub.funcs->get_clockgating(adev, flags);
1096
1097         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1098             adev->asic_type == CHIP_NAVY_FLOUNDER)
1099                 athub_v2_1_get_clockgating(adev, flags);
1100         else
1101                 athub_v2_0_get_clockgating(adev, flags);
1102 }
1103
1104 static int gmc_v10_0_set_powergating_state(void *handle,
1105                                            enum amd_powergating_state state)
1106 {
1107         return 0;
1108 }
1109
1110 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1111         .name = "gmc_v10_0",
1112         .early_init = gmc_v10_0_early_init,
1113         .late_init = gmc_v10_0_late_init,
1114         .sw_init = gmc_v10_0_sw_init,
1115         .sw_fini = gmc_v10_0_sw_fini,
1116         .hw_init = gmc_v10_0_hw_init,
1117         .hw_fini = gmc_v10_0_hw_fini,
1118         .suspend = gmc_v10_0_suspend,
1119         .resume = gmc_v10_0_resume,
1120         .is_idle = gmc_v10_0_is_idle,
1121         .wait_for_idle = gmc_v10_0_wait_for_idle,
1122         .soft_reset = gmc_v10_0_soft_reset,
1123         .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1124         .set_powergating_state = gmc_v10_0_set_powergating_state,
1125         .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1126 };
1127
1128 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1129 {
1130         .type = AMD_IP_BLOCK_TYPE_GMC,
1131         .major = 10,
1132         .minor = 0,
1133         .rev = 0,
1134         .funcs = &gmc_v10_0_ip_funcs,
1135 };
This page took 0.101219 seconds and 4 git commands to generate.