2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
56 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_xgmi.h"
61 /* add these here since we already include dce12 headers and these are for DCN */
62 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
63 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
68 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
69 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
72 static const char *gfxhub_client_ids[] = {
88 static const char *mmhub_client_ids_raven[][2] = {
113 static const char *mmhub_client_ids_renoir[][2] = {
141 static const char *mmhub_client_ids_vega10[][2] = {
154 [32+14][0] = "SDMA0",
167 [32+4][1] = "DCEDWB",
170 [32+14][1] = "SDMA1",
173 static const char *mmhub_client_ids_vega12[][2] = {
186 [32+15][0] = "SDMA0",
196 [32+1][1] = "DCEDWB",
202 [32+15][1] = "SDMA1",
205 static const char *mmhub_client_ids_vega20[][2] = {
219 [32+12][0] = "UTCL2",
220 [32+14][0] = "SDMA1",
238 [32+14][1] = "SDMA1",
241 static const char *mmhub_client_ids_arcturus[][2] = {
282 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
284 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
285 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
288 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
290 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
291 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
294 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
295 (0x000143c0 + 0x00000000),
296 (0x000143c0 + 0x00000800),
297 (0x000143c0 + 0x00001000),
298 (0x000143c0 + 0x00001800),
299 (0x000543c0 + 0x00000000),
300 (0x000543c0 + 0x00000800),
301 (0x000543c0 + 0x00001000),
302 (0x000543c0 + 0x00001800),
303 (0x000943c0 + 0x00000000),
304 (0x000943c0 + 0x00000800),
305 (0x000943c0 + 0x00001000),
306 (0x000943c0 + 0x00001800),
307 (0x000d43c0 + 0x00000000),
308 (0x000d43c0 + 0x00000800),
309 (0x000d43c0 + 0x00001000),
310 (0x000d43c0 + 0x00001800),
311 (0x001143c0 + 0x00000000),
312 (0x001143c0 + 0x00000800),
313 (0x001143c0 + 0x00001000),
314 (0x001143c0 + 0x00001800),
315 (0x001543c0 + 0x00000000),
316 (0x001543c0 + 0x00000800),
317 (0x001543c0 + 0x00001000),
318 (0x001543c0 + 0x00001800),
319 (0x001943c0 + 0x00000000),
320 (0x001943c0 + 0x00000800),
321 (0x001943c0 + 0x00001000),
322 (0x001943c0 + 0x00001800),
323 (0x001d43c0 + 0x00000000),
324 (0x001d43c0 + 0x00000800),
325 (0x001d43c0 + 0x00001000),
326 (0x001d43c0 + 0x00001800),
329 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
330 (0x000143e0 + 0x00000000),
331 (0x000143e0 + 0x00000800),
332 (0x000143e0 + 0x00001000),
333 (0x000143e0 + 0x00001800),
334 (0x000543e0 + 0x00000000),
335 (0x000543e0 + 0x00000800),
336 (0x000543e0 + 0x00001000),
337 (0x000543e0 + 0x00001800),
338 (0x000943e0 + 0x00000000),
339 (0x000943e0 + 0x00000800),
340 (0x000943e0 + 0x00001000),
341 (0x000943e0 + 0x00001800),
342 (0x000d43e0 + 0x00000000),
343 (0x000d43e0 + 0x00000800),
344 (0x000d43e0 + 0x00001000),
345 (0x000d43e0 + 0x00001800),
346 (0x001143e0 + 0x00000000),
347 (0x001143e0 + 0x00000800),
348 (0x001143e0 + 0x00001000),
349 (0x001143e0 + 0x00001800),
350 (0x001543e0 + 0x00000000),
351 (0x001543e0 + 0x00000800),
352 (0x001543e0 + 0x00001000),
353 (0x001543e0 + 0x00001800),
354 (0x001943e0 + 0x00000000),
355 (0x001943e0 + 0x00000800),
356 (0x001943e0 + 0x00001000),
357 (0x001943e0 + 0x00001800),
358 (0x001d43e0 + 0x00000000),
359 (0x001d43e0 + 0x00000800),
360 (0x001d43e0 + 0x00001000),
361 (0x001d43e0 + 0x00001800),
364 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
365 struct amdgpu_irq_src *src,
367 enum amdgpu_interrupt_state state)
369 u32 bits, i, tmp, reg;
371 /* Devices newer then VEGA10/12 shall have these programming
372 sequences performed by PSP BL */
373 if (adev->asic_type >= CHIP_VEGA20)
379 case AMDGPU_IRQ_STATE_DISABLE:
380 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
381 reg = ecc_umc_mcumc_ctrl_addrs[i];
386 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
387 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
393 case AMDGPU_IRQ_STATE_ENABLE:
394 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
395 reg = ecc_umc_mcumc_ctrl_addrs[i];
400 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
401 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
414 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
415 struct amdgpu_irq_src *src,
417 enum amdgpu_interrupt_state state)
419 struct amdgpu_vmhub *hub;
420 u32 tmp, reg, bits, i, j;
422 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
423 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
424 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
425 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
426 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
427 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
428 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
431 case AMDGPU_IRQ_STATE_DISABLE:
432 for (j = 0; j < adev->num_vmhubs; j++) {
433 hub = &adev->vmhub[j];
434 for (i = 0; i < 16; i++) {
435 reg = hub->vm_context0_cntl + i;
442 case AMDGPU_IRQ_STATE_ENABLE:
443 for (j = 0; j < adev->num_vmhubs; j++) {
444 hub = &adev->vmhub[j];
445 for (i = 0; i < 16; i++) {
446 reg = hub->vm_context0_cntl + i;
460 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
461 struct amdgpu_irq_src *source,
462 struct amdgpu_iv_entry *entry)
464 bool retry_fault = !!(entry->src_data[1] & 0x80);
465 uint32_t status = 0, cid = 0, rw = 0;
466 struct amdgpu_task_info task_info;
467 struct amdgpu_vmhub *hub;
468 const char *mmhub_cid;
469 const char *hub_name;
472 addr = (u64)entry->src_data[0] << 12;
473 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
476 /* Returning 1 here also prevents sending the IV to the KFD */
478 /* Process it onyl if it's the first fault for this address */
479 if (entry->ih != &adev->irq.ih_soft &&
480 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
484 /* Delegate it to a different ring if the hardware hasn't
487 if (in_interrupt()) {
488 amdgpu_irq_delegate(adev, entry, 8);
492 /* Try to handle the recoverable page faults by filling page
495 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
499 if (!printk_ratelimit())
502 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
504 hub = &adev->vmhub[AMDGPU_MMHUB_0];
505 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
507 hub = &adev->vmhub[AMDGPU_MMHUB_1];
509 hub_name = "gfxhub0";
510 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
513 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
514 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
517 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
518 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
519 hub_name, retry_fault ? "retry" : "no-retry",
520 entry->src_id, entry->ring_id, entry->vmid,
521 entry->pasid, task_info.process_name, task_info.tgid,
522 task_info.task_name, task_info.pid);
523 dev_err(adev->dev, " in page starting at address 0x%012llx from client %d\n",
524 addr, entry->client_id);
526 if (amdgpu_sriov_vf(adev))
530 * Issue a dummy read to wait for the status register to
531 * be updated to avoid reading an incorrect value due to
532 * the new fast GRBM interface.
534 if (entry->vmid_src == AMDGPU_GFXHUB_0)
535 RREG32(hub->vm_l2_pro_fault_status);
537 status = RREG32(hub->vm_l2_pro_fault_status);
538 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
539 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
540 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
544 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
546 if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
547 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
548 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
549 gfxhub_client_ids[cid],
552 switch (adev->asic_type) {
554 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
557 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
560 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
563 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
566 mmhub_cid = mmhub_client_ids_raven[cid][rw];
569 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
575 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
576 mmhub_cid ? mmhub_cid : "unknown", cid);
578 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
579 REG_GET_FIELD(status,
580 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
581 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
582 REG_GET_FIELD(status,
583 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
584 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
585 REG_GET_FIELD(status,
586 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
587 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
588 REG_GET_FIELD(status,
589 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
590 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
594 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
595 .set = gmc_v9_0_vm_fault_interrupt_state,
596 .process = gmc_v9_0_process_interrupt,
600 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
601 .set = gmc_v9_0_ecc_interrupt_state,
602 .process = amdgpu_umc_process_ecc_irq,
605 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
607 adev->gmc.vm_fault.num_types = 1;
608 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
610 if (!amdgpu_sriov_vf(adev)) {
611 adev->gmc.ecc_irq.num_types = 1;
612 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
616 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
621 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
622 PER_VMID_INVALIDATE_REQ, 1 << vmid);
623 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
624 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
625 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
626 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
627 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
628 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
629 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
630 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
636 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
638 * @adev: amdgpu_device pointer
642 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
645 return ((vmhub == AMDGPU_MMHUB_0 ||
646 vmhub == AMDGPU_MMHUB_1) &&
647 (!amdgpu_sriov_vf(adev)) &&
648 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
649 (adev->apu_flags & AMD_APU_IS_PICASSO))));
652 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
653 uint8_t vmid, uint16_t *p_pasid)
657 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
659 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
661 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
666 * VMID 0 is the physical GPU addresses as used by the kernel.
667 * VMIDs 1-15 are used for userspace clients and are handled
668 * by the amdgpu vm/hsa code.
672 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
674 * @adev: amdgpu_device pointer
675 * @vmid: vm instance to flush
676 * @vmhub: which hub to flush
677 * @flush_type: the flush type
679 * Flush the TLB for the requested page table using certain type.
681 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
682 uint32_t vmhub, uint32_t flush_type)
684 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
685 const unsigned eng = 17;
686 u32 j, inv_req, inv_req2, tmp;
687 struct amdgpu_vmhub *hub;
689 BUG_ON(vmhub >= adev->num_vmhubs);
691 hub = &adev->vmhub[vmhub];
692 if (adev->gmc.xgmi.num_physical_nodes &&
693 adev->asic_type == CHIP_VEGA20) {
694 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
695 * heavy-weight TLB flush (type 2), which flushes
696 * both. Due to a race condition with concurrent
697 * memory accesses using the same TLB cache line, we
698 * still need a second TLB flush after this.
700 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
701 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
703 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
707 /* This is necessary for a HW workaround under SRIOV as well
708 * as GFXOFF under bare metal
710 if (adev->gfx.kiq.ring.sched.ready &&
711 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
712 down_read_trylock(&adev->reset_sem)) {
713 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
714 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
716 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
718 up_read(&adev->reset_sem);
722 spin_lock(&adev->gmc.invalidate_lock);
725 * It may lose gpuvm invalidate acknowldege state across power-gating
726 * off cycle, add semaphore acquire before invalidation and semaphore
727 * release after invalidation to avoid entering power gated state
731 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
733 for (j = 0; j < adev->usec_timeout; j++) {
734 /* a read return value of 1 means semaphore acuqire */
735 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
736 hub->eng_distance * eng);
742 if (j >= adev->usec_timeout)
743 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
747 WREG32_NO_KIQ(hub->vm_inv_eng0_req +
748 hub->eng_distance * eng, inv_req);
751 * Issue a dummy read to wait for the ACK register to
752 * be cleared to avoid a false ACK due to the new fast
755 if (vmhub == AMDGPU_GFXHUB_0)
756 RREG32_NO_KIQ(hub->vm_inv_eng0_req +
757 hub->eng_distance * eng);
759 for (j = 0; j < adev->usec_timeout; j++) {
760 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
761 hub->eng_distance * eng);
762 if (tmp & (1 << vmid))
771 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
774 * add semaphore release after invalidation,
775 * write with 0 means semaphore release
777 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
778 hub->eng_distance * eng, 0);
780 spin_unlock(&adev->gmc.invalidate_lock);
782 if (j < adev->usec_timeout)
785 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
789 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
791 * @adev: amdgpu_device pointer
792 * @pasid: pasid to be flush
793 * @flush_type: the flush type
794 * @all_hub: flush all hubs
796 * Flush the TLB for the requested pasid.
798 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
799 uint16_t pasid, uint32_t flush_type,
805 uint16_t queried_pasid;
807 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
808 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
810 if (amdgpu_in_reset(adev))
813 if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
814 /* Vega20+XGMI caches PTEs in TC and TLB. Add a
815 * heavy-weight TLB flush (type 2), which flushes
816 * both. Due to a race condition with concurrent
817 * memory accesses using the same TLB cache line, we
818 * still need a second TLB flush after this.
820 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
821 adev->asic_type == CHIP_VEGA20);
822 /* 2 dwords flush + 8 dwords fence */
823 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
826 ndw += kiq->pmf->invalidate_tlbs_size;
828 spin_lock(&adev->gfx.kiq.ring_lock);
829 /* 2 dwords flush + 8 dwords fence */
830 amdgpu_ring_alloc(ring, ndw);
832 kiq->pmf->kiq_invalidate_tlbs(ring,
834 kiq->pmf->kiq_invalidate_tlbs(ring,
835 pasid, flush_type, all_hub);
836 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
838 amdgpu_ring_undo(ring);
839 spin_unlock(&adev->gfx.kiq.ring_lock);
840 up_read(&adev->reset_sem);
844 amdgpu_ring_commit(ring);
845 spin_unlock(&adev->gfx.kiq.ring_lock);
846 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
848 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
849 up_read(&adev->reset_sem);
852 up_read(&adev->reset_sem);
856 for (vmid = 1; vmid < 16; vmid++) {
858 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
860 if (ret && queried_pasid == pasid) {
862 for (i = 0; i < adev->num_vmhubs; i++)
863 gmc_v9_0_flush_gpu_tlb(adev, vmid,
866 gmc_v9_0_flush_gpu_tlb(adev, vmid,
867 AMDGPU_GFXHUB_0, flush_type);
877 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
878 unsigned vmid, uint64_t pd_addr)
880 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
881 struct amdgpu_device *adev = ring->adev;
882 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
883 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
884 unsigned eng = ring->vm_inv_eng;
887 * It may lose gpuvm invalidate acknowldege state across power-gating
888 * off cycle, add semaphore acquire before invalidation and semaphore
889 * release after invalidation to avoid entering power gated state
893 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
895 /* a read return value of 1 means semaphore acuqire */
896 amdgpu_ring_emit_reg_wait(ring,
897 hub->vm_inv_eng0_sem +
898 hub->eng_distance * eng, 0x1, 0x1);
900 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
901 (hub->ctx_addr_distance * vmid),
902 lower_32_bits(pd_addr));
904 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
905 (hub->ctx_addr_distance * vmid),
906 upper_32_bits(pd_addr));
908 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
909 hub->eng_distance * eng,
910 hub->vm_inv_eng0_ack +
911 hub->eng_distance * eng,
914 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
917 * add semaphore release after invalidation,
918 * write with 0 means semaphore release
920 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
921 hub->eng_distance * eng, 0);
926 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
929 struct amdgpu_device *adev = ring->adev;
932 /* Do nothing because there's no lut register for mmhub1. */
933 if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
936 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
937 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
939 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
941 amdgpu_ring_emit_wreg(ring, reg, pasid);
945 * PTE format on VEGA 10:
954 * 47:12 4k physical page base address
964 * PDE format on VEGA 10:
965 * 63:59 block fragment size
969 * 47:6 physical base address of PD or PTE
976 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
980 case AMDGPU_VM_MTYPE_DEFAULT:
981 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
982 case AMDGPU_VM_MTYPE_NC:
983 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
984 case AMDGPU_VM_MTYPE_WC:
985 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
986 case AMDGPU_VM_MTYPE_RW:
987 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
988 case AMDGPU_VM_MTYPE_CC:
989 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
990 case AMDGPU_VM_MTYPE_UC:
991 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
993 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
997 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
998 uint64_t *addr, uint64_t *flags)
1000 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1001 *addr = adev->vm_manager.vram_base_offset + *addr -
1002 adev->gmc.vram_start;
1003 BUG_ON(*addr & 0xFFFF00000000003FULL);
1005 if (!adev->gmc.translate_further)
1008 if (level == AMDGPU_VM_PDB1) {
1009 /* Set the block fragment size */
1010 if (!(*flags & AMDGPU_PDE_PTE))
1011 *flags |= AMDGPU_PDE_BFS(0x9);
1013 } else if (level == AMDGPU_VM_PDB0) {
1014 if (*flags & AMDGPU_PDE_PTE)
1015 *flags &= ~AMDGPU_PDE_PTE;
1017 *flags |= AMDGPU_PTE_TF;
1021 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1022 struct amdgpu_bo_va_mapping *mapping,
1025 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1026 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1028 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1029 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1031 if (mapping->flags & AMDGPU_PTE_PRT) {
1032 *flags |= AMDGPU_PTE_PRT;
1033 *flags &= ~AMDGPU_PTE_VALID;
1036 if (adev->asic_type == CHIP_ARCTURUS &&
1037 !(*flags & AMDGPU_PTE_SYSTEM) &&
1038 mapping->bo_va->is_xgmi)
1039 *flags |= AMDGPU_PTE_SNOOPED;
1042 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1044 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1047 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1048 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1052 switch (adev->asic_type) {
1055 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1056 size = (REG_GET_FIELD(viewport,
1057 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1058 REG_GET_FIELD(viewport,
1059 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1066 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1067 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1068 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1077 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1078 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1079 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1080 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1081 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1082 .map_mtype = gmc_v9_0_map_mtype,
1083 .get_vm_pde = gmc_v9_0_get_vm_pde,
1084 .get_vm_pte = gmc_v9_0_get_vm_pte,
1085 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1088 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1090 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1093 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1095 switch (adev->asic_type) {
1097 adev->umc.funcs = &umc_v6_0_funcs;
1100 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1101 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1102 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1103 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1104 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1105 adev->umc.funcs = &umc_v6_1_funcs;
1108 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1109 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1110 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1111 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1112 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1113 adev->umc.funcs = &umc_v6_1_funcs;
1120 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1122 switch (adev->asic_type) {
1124 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1127 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1132 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1134 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1137 static int gmc_v9_0_early_init(void *handle)
1139 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1141 gmc_v9_0_set_gmc_funcs(adev);
1142 gmc_v9_0_set_irq_funcs(adev);
1143 gmc_v9_0_set_umc_funcs(adev);
1144 gmc_v9_0_set_mmhub_funcs(adev);
1145 gmc_v9_0_set_gfxhub_funcs(adev);
1147 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1148 adev->gmc.shared_aperture_end =
1149 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1150 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1151 adev->gmc.private_aperture_end =
1152 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1157 static int gmc_v9_0_late_init(void *handle)
1159 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1162 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1167 * Workaround performance drop issue with VBIOS enables partial
1168 * writes, while disables HBM ECC for vega10.
1170 if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
1171 if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1172 if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1173 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1177 if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
1178 adev->mmhub.funcs->reset_ras_error_count(adev);
1180 r = amdgpu_gmc_ras_late_init(adev);
1184 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1187 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1188 struct amdgpu_gmc *mc)
1192 if (!amdgpu_sriov_vf(adev))
1193 base = adev->mmhub.funcs->get_fb_location(adev);
1195 /* add the xgmi offset of the physical node */
1196 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1197 amdgpu_gmc_vram_location(adev, mc, base);
1198 amdgpu_gmc_gart_location(adev, mc);
1199 amdgpu_gmc_agp_location(adev, mc);
1200 /* base offset of vram pages */
1201 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1203 /* XXX: add the xgmi offset of the physical node? */
1204 adev->vm_manager.vram_base_offset +=
1205 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1209 * gmc_v9_0_mc_init - initialize the memory controller driver params
1211 * @adev: amdgpu_device pointer
1213 * Look up the amount of vram, vram width, and decide how to place
1214 * vram and gart within the GPU's physical address space.
1215 * Returns 0 for success.
1217 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1221 /* size in MB on si */
1222 adev->gmc.mc_vram_size =
1223 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1224 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1226 if (!(adev->flags & AMD_IS_APU)) {
1227 r = amdgpu_device_resize_fb_bar(adev);
1231 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1232 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1234 #ifdef CONFIG_X86_64
1235 if (adev->flags & AMD_IS_APU) {
1236 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1237 adev->gmc.aper_size = adev->gmc.real_vram_size;
1240 /* In case the PCI BAR is larger than the actual amount of vram */
1241 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1242 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1243 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1245 /* set the gart size */
1246 if (amdgpu_gart_size == -1) {
1247 switch (adev->asic_type) {
1248 case CHIP_VEGA10: /* all engines support GPUVM */
1249 case CHIP_VEGA12: /* all engines support GPUVM */
1253 adev->gmc.gart_size = 512ULL << 20;
1255 case CHIP_RAVEN: /* DCE SG support */
1257 adev->gmc.gart_size = 1024ULL << 20;
1261 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1264 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1269 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1273 if (adev->gart.bo) {
1274 WARN(1, "VEGA10 PCIE GART already initialized\n");
1277 /* Initialize common gart structure */
1278 r = amdgpu_gart_init(adev);
1281 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1282 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1283 AMDGPU_PTE_EXECUTABLE;
1284 return amdgpu_gart_table_vram_alloc(adev);
1288 * gmc_v9_0_save_registers - saves regs
1290 * @adev: amdgpu_device pointer
1292 * This saves potential register values that should be
1293 * restored upon resume
1295 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1297 if (adev->asic_type == CHIP_RAVEN)
1298 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1301 static int gmc_v9_0_sw_init(void *handle)
1303 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1304 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1306 adev->gfxhub.funcs->init(adev);
1308 adev->mmhub.funcs->init(adev);
1310 spin_lock_init(&adev->gmc.invalidate_lock);
1312 r = amdgpu_atomfirmware_get_vram_info(adev,
1313 &vram_width, &vram_type, &vram_vendor);
1314 if (amdgpu_sriov_vf(adev))
1315 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1316 * and DF related registers is not readable, seems hardcord is the
1317 * only way to set the correct vram_width
1319 adev->gmc.vram_width = 2048;
1320 else if (amdgpu_emu_mode != 1)
1321 adev->gmc.vram_width = vram_width;
1323 if (!adev->gmc.vram_width) {
1324 int chansize, numchan;
1326 /* hbm memory channel size */
1327 if (adev->flags & AMD_IS_APU)
1332 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1333 adev->gmc.vram_width = numchan * chansize;
1336 adev->gmc.vram_type = vram_type;
1337 adev->gmc.vram_vendor = vram_vendor;
1338 switch (adev->asic_type) {
1340 adev->num_vmhubs = 2;
1342 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1343 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1345 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1346 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1347 adev->gmc.translate_further =
1348 adev->vm_manager.num_level > 1;
1355 adev->num_vmhubs = 2;
1359 * To fulfill 4-level page support,
1360 * vm size is 256TB (48bit), maximum size of Vega10,
1361 * block size 512 (9bit)
1363 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1364 if (amdgpu_sriov_vf(adev))
1365 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1367 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1370 adev->num_vmhubs = 3;
1372 /* Keep the vm size same with Vega20 */
1373 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1379 /* This interrupt is VMC page fault.*/
1380 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1381 &adev->gmc.vm_fault);
1385 if (adev->asic_type == CHIP_ARCTURUS) {
1386 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1387 &adev->gmc.vm_fault);
1392 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1393 &adev->gmc.vm_fault);
1398 if (!amdgpu_sriov_vf(adev)) {
1399 /* interrupt sent to DF. */
1400 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1401 &adev->gmc.ecc_irq);
1406 /* Set the internal MC address mask
1407 * This is the max address of the GPU's
1408 * internal address space.
1410 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1412 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1414 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1417 adev->need_swiotlb = drm_need_swiotlb(44);
1419 if (adev->gmc.xgmi.supported) {
1420 r = adev->gfxhub.funcs->get_xgmi_info(adev);
1425 r = gmc_v9_0_mc_init(adev);
1429 amdgpu_gmc_get_vbios_allocations(adev);
1431 /* Memory manager */
1432 r = amdgpu_bo_init(adev);
1436 r = gmc_v9_0_gart_init(adev);
1442 * VMID 0 is reserved for System
1443 * amdgpu graphics/compute will use VMIDs 1..n-1
1444 * amdkfd will use VMIDs n..15
1446 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1447 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1448 * for video processing.
1450 adev->vm_manager.first_kfd_vmid =
1451 adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
1453 amdgpu_vm_manager_init(adev);
1455 gmc_v9_0_save_registers(adev);
1460 static int gmc_v9_0_sw_fini(void *handle)
1462 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1464 amdgpu_gmc_ras_fini(adev);
1465 amdgpu_gem_force_release(adev);
1466 amdgpu_vm_manager_fini(adev);
1467 amdgpu_gart_table_vram_free(adev);
1468 amdgpu_bo_fini(adev);
1469 amdgpu_gart_fini(adev);
1474 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1477 switch (adev->asic_type) {
1479 if (amdgpu_sriov_vf(adev))
1483 soc15_program_register_sequence(adev,
1484 golden_settings_mmhub_1_0_0,
1485 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1486 soc15_program_register_sequence(adev,
1487 golden_settings_athub_1_0_0,
1488 ARRAY_SIZE(golden_settings_athub_1_0_0));
1493 /* TODO for renoir */
1494 soc15_program_register_sequence(adev,
1495 golden_settings_athub_1_0_0,
1496 ARRAY_SIZE(golden_settings_athub_1_0_0));
1504 * gmc_v9_0_restore_registers - restores regs
1506 * @adev: amdgpu_device pointer
1508 * This restores register values, saved at suspend.
1510 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1512 if (adev->asic_type == CHIP_RAVEN) {
1513 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1514 WARN_ON(adev->gmc.sdpif_register !=
1515 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1520 * gmc_v9_0_gart_enable - gart enable
1522 * @adev: amdgpu_device pointer
1524 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1528 if (adev->gart.bo == NULL) {
1529 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1532 r = amdgpu_gart_table_vram_pin(adev);
1536 r = adev->gfxhub.funcs->gart_enable(adev);
1540 r = adev->mmhub.funcs->gart_enable(adev);
1544 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1545 (unsigned)(adev->gmc.gart_size >> 20),
1546 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1547 adev->gart.ready = true;
1551 static int gmc_v9_0_hw_init(void *handle)
1553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557 /* The sequence of these two function calls matters.*/
1558 gmc_v9_0_init_golden_registers(adev);
1560 if (adev->mode_info.num_crtc) {
1561 /* Lockout access through VGA aperture*/
1562 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1563 /* disable VGA render */
1564 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1567 if (adev->mmhub.funcs->update_power_gating)
1568 adev->mmhub.funcs->update_power_gating(adev, true);
1570 adev->hdp.funcs->init_registers(adev);
1572 /* After HDP is initialized, flush HDP.*/
1573 adev->hdp.funcs->flush_hdp(adev, NULL);
1575 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1580 if (!amdgpu_sriov_vf(adev)) {
1581 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1582 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1584 for (i = 0; i < adev->num_vmhubs; ++i)
1585 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1587 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1588 adev->umc.funcs->init_registers(adev);
1590 r = gmc_v9_0_gart_enable(adev);
1596 * gmc_v9_0_gart_disable - gart disable
1598 * @adev: amdgpu_device pointer
1600 * This disables all VM page table.
1602 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1604 adev->gfxhub.funcs->gart_disable(adev);
1605 adev->mmhub.funcs->gart_disable(adev);
1606 amdgpu_gart_table_vram_unpin(adev);
1609 static int gmc_v9_0_hw_fini(void *handle)
1611 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1613 if (amdgpu_sriov_vf(adev)) {
1614 /* full access mode, so don't touch any GMC register */
1615 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1619 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1620 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1621 gmc_v9_0_gart_disable(adev);
1626 static int gmc_v9_0_suspend(void *handle)
1628 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1630 return gmc_v9_0_hw_fini(adev);
1633 static int gmc_v9_0_resume(void *handle)
1636 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1638 r = gmc_v9_0_hw_init(adev);
1642 amdgpu_vmid_reset_all(adev);
1647 static bool gmc_v9_0_is_idle(void *handle)
1649 /* MC is always ready in GMC v9.*/
1653 static int gmc_v9_0_wait_for_idle(void *handle)
1655 /* There is no need to wait for MC idle in GMC v9.*/
1659 static int gmc_v9_0_soft_reset(void *handle)
1661 /* XXX for emulation.*/
1665 static int gmc_v9_0_set_clockgating_state(void *handle,
1666 enum amd_clockgating_state state)
1668 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670 adev->mmhub.funcs->set_clockgating(adev, state);
1672 athub_v1_0_set_clockgating(adev, state);
1677 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1679 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1681 adev->mmhub.funcs->get_clockgating(adev, flags);
1683 athub_v1_0_get_clockgating(adev, flags);
1686 static int gmc_v9_0_set_powergating_state(void *handle,
1687 enum amd_powergating_state state)
1692 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1694 .early_init = gmc_v9_0_early_init,
1695 .late_init = gmc_v9_0_late_init,
1696 .sw_init = gmc_v9_0_sw_init,
1697 .sw_fini = gmc_v9_0_sw_fini,
1698 .hw_init = gmc_v9_0_hw_init,
1699 .hw_fini = gmc_v9_0_hw_fini,
1700 .suspend = gmc_v9_0_suspend,
1701 .resume = gmc_v9_0_resume,
1702 .is_idle = gmc_v9_0_is_idle,
1703 .wait_for_idle = gmc_v9_0_wait_for_idle,
1704 .soft_reset = gmc_v9_0_soft_reset,
1705 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1706 .set_powergating_state = gmc_v9_0_set_powergating_state,
1707 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1710 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1712 .type = AMD_IP_BLOCK_TYPE_GMC,
1716 .funcs = &gmc_v9_0_ip_funcs,