2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
27 #include <drm/drm_cache.h>
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "gfxhub_v1_2.h"
53 #include "mmhub_v9_4.h"
54 #include "mmhub_v1_7.h"
55 #include "mmhub_v1_8.h"
59 #include "umc_v12_0.h"
63 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_xgmi.h"
68 /* add these here since we already include dce12 headers and these are for DCN */
69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
81 #define MAX_MEM_RANGES 8
83 static const char * const gfxhub_client_ids[] = {
99 static const char *mmhub_client_ids_raven[][2] = {
124 static const char *mmhub_client_ids_renoir[][2] = {
152 static const char *mmhub_client_ids_vega10[][2] = {
165 [32+14][0] = "SDMA0",
178 [32+4][1] = "DCEDWB",
181 [32+14][1] = "SDMA1",
184 static const char *mmhub_client_ids_vega12[][2] = {
197 [32+15][0] = "SDMA0",
207 [32+1][1] = "DCEDWB",
213 [32+15][1] = "SDMA1",
216 static const char *mmhub_client_ids_vega20[][2] = {
230 [32+12][0] = "UTCL2",
231 [32+14][0] = "SDMA1",
249 [32+14][1] = "SDMA1",
252 static const char *mmhub_client_ids_arcturus[][2] = {
293 static const char *mmhub_client_ids_aldebaran[][2] = {
296 [32+1][0] = "DBGU_IO0",
297 [32+2][0] = "DBGU_IO2",
299 [96+11][0] = "JPEG0",
301 [96+13][0] = "VCNU0",
302 [128+11][0] = "JPEG1",
303 [128+12][0] = "VCN1",
304 [128+13][0] = "VCNU1",
307 [256+0][0] = "SDMA0",
308 [256+1][0] = "SDMA1",
309 [256+2][0] = "SDMA2",
310 [256+3][0] = "SDMA3",
311 [256+4][0] = "SDMA4",
315 [32+1][1] = "DBGU_IO0",
316 [32+2][1] = "DBGU_IO2",
318 [96+11][1] = "JPEG0",
320 [96+13][1] = "VCNU0",
321 [128+11][1] = "JPEG1",
322 [128+12][1] = "VCN1",
323 [128+13][1] = "VCNU1",
326 [256+0][1] = "SDMA0",
327 [256+1][1] = "SDMA1",
328 [256+2][1] = "SDMA2",
329 [256+3][1] = "SDMA3",
330 [256+4][1] = "SDMA4",
334 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
335 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
336 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
339 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
340 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
341 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
344 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
345 (0x000143c0 + 0x00000000),
346 (0x000143c0 + 0x00000800),
347 (0x000143c0 + 0x00001000),
348 (0x000143c0 + 0x00001800),
349 (0x000543c0 + 0x00000000),
350 (0x000543c0 + 0x00000800),
351 (0x000543c0 + 0x00001000),
352 (0x000543c0 + 0x00001800),
353 (0x000943c0 + 0x00000000),
354 (0x000943c0 + 0x00000800),
355 (0x000943c0 + 0x00001000),
356 (0x000943c0 + 0x00001800),
357 (0x000d43c0 + 0x00000000),
358 (0x000d43c0 + 0x00000800),
359 (0x000d43c0 + 0x00001000),
360 (0x000d43c0 + 0x00001800),
361 (0x001143c0 + 0x00000000),
362 (0x001143c0 + 0x00000800),
363 (0x001143c0 + 0x00001000),
364 (0x001143c0 + 0x00001800),
365 (0x001543c0 + 0x00000000),
366 (0x001543c0 + 0x00000800),
367 (0x001543c0 + 0x00001000),
368 (0x001543c0 + 0x00001800),
369 (0x001943c0 + 0x00000000),
370 (0x001943c0 + 0x00000800),
371 (0x001943c0 + 0x00001000),
372 (0x001943c0 + 0x00001800),
373 (0x001d43c0 + 0x00000000),
374 (0x001d43c0 + 0x00000800),
375 (0x001d43c0 + 0x00001000),
376 (0x001d43c0 + 0x00001800),
379 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
380 (0x000143e0 + 0x00000000),
381 (0x000143e0 + 0x00000800),
382 (0x000143e0 + 0x00001000),
383 (0x000143e0 + 0x00001800),
384 (0x000543e0 + 0x00000000),
385 (0x000543e0 + 0x00000800),
386 (0x000543e0 + 0x00001000),
387 (0x000543e0 + 0x00001800),
388 (0x000943e0 + 0x00000000),
389 (0x000943e0 + 0x00000800),
390 (0x000943e0 + 0x00001000),
391 (0x000943e0 + 0x00001800),
392 (0x000d43e0 + 0x00000000),
393 (0x000d43e0 + 0x00000800),
394 (0x000d43e0 + 0x00001000),
395 (0x000d43e0 + 0x00001800),
396 (0x001143e0 + 0x00000000),
397 (0x001143e0 + 0x00000800),
398 (0x001143e0 + 0x00001000),
399 (0x001143e0 + 0x00001800),
400 (0x001543e0 + 0x00000000),
401 (0x001543e0 + 0x00000800),
402 (0x001543e0 + 0x00001000),
403 (0x001543e0 + 0x00001800),
404 (0x001943e0 + 0x00000000),
405 (0x001943e0 + 0x00000800),
406 (0x001943e0 + 0x00001000),
407 (0x001943e0 + 0x00001800),
408 (0x001d43e0 + 0x00000000),
409 (0x001d43e0 + 0x00000800),
410 (0x001d43e0 + 0x00001000),
411 (0x001d43e0 + 0x00001800),
414 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
415 struct amdgpu_irq_src *src,
417 enum amdgpu_interrupt_state state)
419 u32 bits, i, tmp, reg;
421 /* Devices newer then VEGA10/12 shall have these programming
422 * sequences performed by PSP BL
424 if (adev->asic_type >= CHIP_VEGA20)
430 case AMDGPU_IRQ_STATE_DISABLE:
431 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
432 reg = ecc_umc_mcumc_ctrl_addrs[i];
437 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
438 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
444 case AMDGPU_IRQ_STATE_ENABLE:
445 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
446 reg = ecc_umc_mcumc_ctrl_addrs[i];
451 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
452 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
465 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
466 struct amdgpu_irq_src *src,
468 enum amdgpu_interrupt_state state)
470 struct amdgpu_vmhub *hub;
471 u32 tmp, reg, bits, i, j;
473 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
475 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
479 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
482 case AMDGPU_IRQ_STATE_DISABLE:
483 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
484 hub = &adev->vmhub[j];
485 for (i = 0; i < 16; i++) {
486 reg = hub->vm_context0_cntl + i;
488 /* This works because this interrupt is only
489 * enabled at init/resume and disabled in
490 * fini/suspend, so the overall state doesn't
491 * change over the course of suspend/resume.
493 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
496 if (j >= AMDGPU_MMHUB0(0))
497 tmp = RREG32_SOC15_IP(MMHUB, reg);
499 tmp = RREG32_SOC15_IP(GC, reg);
503 if (j >= AMDGPU_MMHUB0(0))
504 WREG32_SOC15_IP(MMHUB, reg, tmp);
506 WREG32_SOC15_IP(GC, reg, tmp);
510 case AMDGPU_IRQ_STATE_ENABLE:
511 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
512 hub = &adev->vmhub[j];
513 for (i = 0; i < 16; i++) {
514 reg = hub->vm_context0_cntl + i;
516 /* This works because this interrupt is only
517 * enabled at init/resume and disabled in
518 * fini/suspend, so the overall state doesn't
519 * change over the course of suspend/resume.
521 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
524 if (j >= AMDGPU_MMHUB0(0))
525 tmp = RREG32_SOC15_IP(MMHUB, reg);
527 tmp = RREG32_SOC15_IP(GC, reg);
531 if (j >= AMDGPU_MMHUB0(0))
532 WREG32_SOC15_IP(MMHUB, reg, tmp);
534 WREG32_SOC15_IP(GC, reg, tmp);
545 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
546 struct amdgpu_irq_src *source,
547 struct amdgpu_iv_entry *entry)
549 bool retry_fault = !!(entry->src_data[1] & 0x80);
550 bool write_fault = !!(entry->src_data[1] & 0x20);
551 uint32_t status = 0, cid = 0, rw = 0;
552 struct amdgpu_task_info task_info;
553 struct amdgpu_vmhub *hub;
554 const char *mmhub_cid;
555 const char *hub_name;
558 uint32_t cam_index = 0;
562 node_id = entry->node_id;
564 addr = (u64)entry->src_data[0] << 12;
565 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
567 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
569 vmhub = AMDGPU_MMHUB0(node_id / 4);
570 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
572 vmhub = AMDGPU_MMHUB1(0);
574 hub_name = "gfxhub0";
575 if (adev->gfx.funcs->ih_node_to_logical_xcc) {
576 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
583 hub = &adev->vmhub[vmhub];
586 if (adev->irq.retry_cam_enabled) {
587 /* Delegate it to a different ring if the hardware hasn't
590 if (entry->ih == &adev->irq.ih) {
591 amdgpu_irq_delegate(adev, entry, 8);
595 cam_index = entry->src_data[2] & 0x3ff;
597 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
599 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
603 /* Process it onyl if it's the first fault for this address */
604 if (entry->ih != &adev->irq.ih_soft &&
605 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
609 /* Delegate it to a different ring if the hardware hasn't
612 if (entry->ih == &adev->irq.ih) {
613 amdgpu_irq_delegate(adev, entry, 8);
617 /* Try to handle the recoverable page faults by filling page
620 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
626 if (!printk_ratelimit())
629 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
630 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
633 "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
634 hub_name, retry_fault ? "retry" : "no-retry",
635 entry->src_id, entry->ring_id, entry->vmid,
636 entry->pasid, task_info.process_name, task_info.tgid,
637 task_info.task_name, task_info.pid);
638 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
639 addr, entry->client_id,
640 soc15_ih_clientid_name[entry->client_id]);
642 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
643 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
644 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
645 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
647 if (amdgpu_sriov_vf(adev))
651 * Issue a dummy read to wait for the status register to
652 * be updated to avoid reading an incorrect value due to
653 * the new fast GRBM interface.
655 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
656 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
657 RREG32(hub->vm_l2_pro_fault_status);
659 status = RREG32(hub->vm_l2_pro_fault_status);
660 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
661 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
662 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
664 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
667 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
669 if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
670 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
671 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
672 gfxhub_client_ids[cid],
675 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
676 case IP_VERSION(9, 0, 0):
677 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
679 case IP_VERSION(9, 3, 0):
680 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
682 case IP_VERSION(9, 4, 0):
683 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
685 case IP_VERSION(9, 4, 1):
686 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
688 case IP_VERSION(9, 1, 0):
689 case IP_VERSION(9, 2, 0):
690 mmhub_cid = mmhub_client_ids_raven[cid][rw];
692 case IP_VERSION(1, 5, 0):
693 case IP_VERSION(2, 4, 0):
694 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
696 case IP_VERSION(1, 8, 0):
697 case IP_VERSION(9, 4, 2):
698 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
704 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
705 mmhub_cid ? mmhub_cid : "unknown", cid);
707 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
708 REG_GET_FIELD(status,
709 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
710 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
711 REG_GET_FIELD(status,
712 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
713 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
714 REG_GET_FIELD(status,
715 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
716 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
717 REG_GET_FIELD(status,
718 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
719 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
723 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
724 .set = gmc_v9_0_vm_fault_interrupt_state,
725 .process = gmc_v9_0_process_interrupt,
729 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
730 .set = gmc_v9_0_ecc_interrupt_state,
731 .process = amdgpu_umc_process_ecc_irq,
734 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
736 adev->gmc.vm_fault.num_types = 1;
737 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
739 if (!amdgpu_sriov_vf(adev) &&
740 !adev->gmc.xgmi.connected_to_cpu &&
741 !adev->gmc.is_app_apu) {
742 adev->gmc.ecc_irq.num_types = 1;
743 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
747 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
752 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
753 PER_VMID_INVALIDATE_REQ, 1 << vmid);
754 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
755 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
756 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
757 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
758 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
759 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
760 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
761 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
767 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
769 * @adev: amdgpu_device pointer
773 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
776 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
777 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
780 return ((vmhub == AMDGPU_MMHUB0(0) ||
781 vmhub == AMDGPU_MMHUB1(0)) &&
782 (!amdgpu_sriov_vf(adev)) &&
783 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
784 (adev->apu_flags & AMD_APU_IS_PICASSO))));
787 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
788 uint8_t vmid, uint16_t *p_pasid)
792 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
794 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
796 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
801 * VMID 0 is the physical GPU addresses as used by the kernel.
802 * VMIDs 1-15 are used for userspace clients and are handled
803 * by the amdgpu vm/hsa code.
807 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
809 * @adev: amdgpu_device pointer
810 * @vmid: vm instance to flush
811 * @vmhub: which hub to flush
812 * @flush_type: the flush type
814 * Flush the TLB for the requested page table using certain type.
816 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
817 uint32_t vmhub, uint32_t flush_type)
819 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
820 u32 j, inv_req, tmp, sem, req, ack, inst;
821 const unsigned int eng = 17;
822 struct amdgpu_vmhub *hub;
824 BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
826 hub = &adev->vmhub[vmhub];
827 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
828 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
829 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
830 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
832 /* This is necessary for a HW workaround under SRIOV as well
833 * as GFXOFF under bare metal
835 if (vmhub >= AMDGPU_MMHUB0(0))
836 inst = GET_INST(GC, 0);
839 if (adev->gfx.kiq[inst].ring.sched.ready &&
840 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
841 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
842 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
844 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
849 spin_lock(&adev->gmc.invalidate_lock);
852 * It may lose gpuvm invalidate acknowldege state across power-gating
853 * off cycle, add semaphore acquire before invalidation and semaphore
854 * release after invalidation to avoid entering power gated state
858 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
860 for (j = 0; j < adev->usec_timeout; j++) {
861 /* a read return value of 1 means semaphore acquire */
862 if (vmhub >= AMDGPU_MMHUB0(0))
863 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, inst);
865 tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, inst);
871 if (j >= adev->usec_timeout)
872 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
875 if (vmhub >= AMDGPU_MMHUB0(0))
876 WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, inst);
878 WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, inst);
881 * Issue a dummy read to wait for the ACK register to
882 * be cleared to avoid a false ACK due to the new fast
885 if ((vmhub == AMDGPU_GFXHUB(0)) &&
886 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
889 for (j = 0; j < adev->usec_timeout; j++) {
890 if (vmhub >= AMDGPU_MMHUB0(0))
891 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, inst);
893 tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, inst);
894 if (tmp & (1 << vmid))
899 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
902 * add semaphore release after invalidation,
903 * write with 0 means semaphore release
905 if (vmhub >= AMDGPU_MMHUB0(0))
906 WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, inst);
908 WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, inst);
911 spin_unlock(&adev->gmc.invalidate_lock);
913 if (j < adev->usec_timeout)
916 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
920 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
922 * @adev: amdgpu_device pointer
923 * @pasid: pasid to be flush
924 * @flush_type: the flush type
925 * @all_hub: flush all hubs
926 * @inst: is used to select which instance of KIQ to use for the invalidation
928 * Flush the TLB for the requested pasid.
930 static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
931 uint16_t pasid, uint32_t flush_type,
932 bool all_hub, uint32_t inst)
937 for (vmid = 1; vmid < 16; vmid++) {
940 valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
942 if (!valid || queried != pasid)
946 for_each_set_bit(i, adev->vmhubs_mask,
948 gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
951 gmc_v9_0_flush_gpu_tlb(adev, vmid,
958 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
959 unsigned int vmid, uint64_t pd_addr)
961 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
962 struct amdgpu_device *adev = ring->adev;
963 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
964 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
965 unsigned int eng = ring->vm_inv_eng;
968 * It may lose gpuvm invalidate acknowldege state across power-gating
969 * off cycle, add semaphore acquire before invalidation and semaphore
970 * release after invalidation to avoid entering power gated state
974 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
976 /* a read return value of 1 means semaphore acuqire */
977 amdgpu_ring_emit_reg_wait(ring,
978 hub->vm_inv_eng0_sem +
979 hub->eng_distance * eng, 0x1, 0x1);
981 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
982 (hub->ctx_addr_distance * vmid),
983 lower_32_bits(pd_addr));
985 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
986 (hub->ctx_addr_distance * vmid),
987 upper_32_bits(pd_addr));
989 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
990 hub->eng_distance * eng,
991 hub->vm_inv_eng0_ack +
992 hub->eng_distance * eng,
995 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
998 * add semaphore release after invalidation,
999 * write with 0 means semaphore release
1001 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1002 hub->eng_distance * eng, 0);
1007 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
1010 struct amdgpu_device *adev = ring->adev;
1013 /* Do nothing because there's no lut register for mmhub1. */
1014 if (ring->vm_hub == AMDGPU_MMHUB1(0))
1017 if (ring->vm_hub == AMDGPU_GFXHUB(0))
1018 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1020 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1022 amdgpu_ring_emit_wreg(ring, reg, pasid);
1026 * PTE format on VEGA 10:
1035 * 47:12 4k physical page base address
1045 * PDE format on VEGA 10:
1046 * 63:59 block fragment size
1050 * 47:6 physical base address of PD or PTE
1057 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1061 case AMDGPU_VM_MTYPE_DEFAULT:
1062 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1063 case AMDGPU_VM_MTYPE_NC:
1064 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1065 case AMDGPU_VM_MTYPE_WC:
1066 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1067 case AMDGPU_VM_MTYPE_RW:
1068 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1069 case AMDGPU_VM_MTYPE_CC:
1070 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1071 case AMDGPU_VM_MTYPE_UC:
1072 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1074 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1078 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1079 uint64_t *addr, uint64_t *flags)
1081 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1082 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1083 BUG_ON(*addr & 0xFFFF00000000003FULL);
1085 if (!adev->gmc.translate_further)
1088 if (level == AMDGPU_VM_PDB1) {
1089 /* Set the block fragment size */
1090 if (!(*flags & AMDGPU_PDE_PTE))
1091 *flags |= AMDGPU_PDE_BFS(0x9);
1093 } else if (level == AMDGPU_VM_PDB0) {
1094 if (*flags & AMDGPU_PDE_PTE) {
1095 *flags &= ~AMDGPU_PDE_PTE;
1096 if (!(*flags & AMDGPU_PTE_VALID))
1097 *addr |= 1 << PAGE_SHIFT;
1099 *flags |= AMDGPU_PTE_TF;
1104 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1105 struct amdgpu_bo *bo,
1106 struct amdgpu_bo_va_mapping *mapping,
1109 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1110 bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
1111 bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
1112 bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
1113 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1114 struct amdgpu_vm *vm = mapping->bo_va->base.vm;
1115 unsigned int mtype_local, mtype;
1119 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1120 case IP_VERSION(9, 4, 1):
1121 case IP_VERSION(9, 4, 2):
1123 if (bo_adev == adev) {
1130 /* FIXME: is this still needed? Or does
1131 * amdgpu_ttm_tt_pde_flags already handle this?
1133 if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
1134 IP_VERSION(9, 4, 2) ||
1135 amdgpu_ip_version(adev, GC_HWIP, 0) ==
1136 IP_VERSION(9, 4, 3)) &&
1137 adev->gmc.xgmi.connected_to_cpu)
1140 if (uncached || coherent)
1144 if (mapping->bo_va->is_xgmi)
1148 if (uncached || coherent)
1152 /* FIXME: is this still needed? Or does
1153 * amdgpu_ttm_tt_pde_flags already handle this?
1158 case IP_VERSION(9, 4, 3):
1159 /* Only local VRAM BOs or system memory on non-NUMA APUs
1160 * can be assumed to be local in their entirety. Choose
1161 * MTYPE_NC as safe fallback for all system memory BOs on
1162 * NUMA systems. Their MTYPE can be overridden per-page in
1163 * gmc_v9_0_override_vm_pte_flags.
1165 mtype_local = MTYPE_RW;
1166 if (amdgpu_mtype_local == 1) {
1167 DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
1168 mtype_local = MTYPE_NC;
1169 } else if (amdgpu_mtype_local == 2) {
1170 DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1171 mtype_local = MTYPE_CC;
1173 DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
1175 is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1176 num_possible_nodes() <= 1) ||
1177 (is_vram && adev == bo_adev &&
1178 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1182 } else if (ext_coherent) {
1184 mtype = is_local ? MTYPE_CC : MTYPE_UC;
1187 } else if (adev->flags & AMD_IS_APU) {
1188 mtype = is_local ? mtype_local : MTYPE_NC;
1192 mtype = mtype_local;
1201 if (uncached || coherent)
1206 /* FIXME: is this still needed? Or does
1207 * amdgpu_ttm_tt_pde_flags already handle this?
1213 if (mtype != MTYPE_NC)
1214 *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1215 AMDGPU_PTE_MTYPE_VG10(mtype);
1216 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1219 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1220 struct amdgpu_bo_va_mapping *mapping,
1223 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1225 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1226 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1228 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1229 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1231 if (mapping->flags & AMDGPU_PTE_PRT) {
1232 *flags |= AMDGPU_PTE_PRT;
1233 *flags &= ~AMDGPU_PTE_VALID;
1236 if (bo && bo->tbo.resource)
1237 gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
1241 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1242 struct amdgpu_vm *vm,
1243 uint64_t addr, uint64_t *flags)
1245 int local_node, nid;
1247 /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1248 * memory can use more efficient MTYPEs.
1250 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
1253 /* Only direct-mapped memory allows us to determine the NUMA node from
1256 if (!adev->ram_is_direct_mapped) {
1257 dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
1261 /* MTYPE_NC is the same default and can be overridden.
1262 * MTYPE_UC will be present if the memory is extended-coherent
1263 * and can also be overridden.
1265 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1266 AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) &&
1267 (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1268 AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) {
1269 dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
1273 /* FIXME: Only supported on native mode for now. For carve-out, the
1274 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1275 * memory partitions are not associated with different NUMA nodes.
1277 if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1278 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1280 dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
1284 /* Only handle real RAM. Mappings of PCIe resources don't have struct
1285 * page or NUMA nodes.
1287 if (!page_is_ram(addr >> PAGE_SHIFT)) {
1288 dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
1291 nid = pfn_to_nid(addr >> PAGE_SHIFT);
1292 dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1293 vm->mem_id, local_node, nid);
1294 if (nid == local_node) {
1295 uint64_t old_flags = *flags;
1296 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
1297 AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
1298 unsigned int mtype_local = MTYPE_RW;
1300 if (amdgpu_mtype_local == 1)
1301 mtype_local = MTYPE_NC;
1302 else if (amdgpu_mtype_local == 2)
1303 mtype_local = MTYPE_CC;
1305 *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1306 AMDGPU_PTE_MTYPE_VG10(mtype_local);
1307 } else if (adev->rev_id) {
1309 *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1310 AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1313 dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
1318 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1320 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1323 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1325 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1326 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1330 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1331 case IP_VERSION(1, 0, 0):
1332 case IP_VERSION(1, 0, 1):
1333 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1334 size = (REG_GET_FIELD(viewport,
1335 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1336 REG_GET_FIELD(viewport,
1337 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1340 case IP_VERSION(2, 1, 0):
1341 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1342 size = (REG_GET_FIELD(viewport,
1343 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1344 REG_GET_FIELD(viewport,
1345 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1349 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1350 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1351 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1360 static enum amdgpu_memory_partition
1361 gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1363 enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1365 if (adev->nbio.funcs->get_memory_partition_mode)
1366 mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1372 static enum amdgpu_memory_partition
1373 gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
1375 if (amdgpu_sriov_vf(adev))
1376 return AMDGPU_NPS1_PARTITION_MODE;
1378 return gmc_v9_0_get_memory_partition(adev, NULL);
1381 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1382 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1383 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1384 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1385 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1386 .map_mtype = gmc_v9_0_map_mtype,
1387 .get_vm_pde = gmc_v9_0_get_vm_pde,
1388 .get_vm_pte = gmc_v9_0_get_vm_pte,
1389 .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1390 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1391 .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
1394 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1396 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1399 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1401 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1402 case IP_VERSION(6, 0, 0):
1403 adev->umc.funcs = &umc_v6_0_funcs;
1405 case IP_VERSION(6, 1, 1):
1406 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1407 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1408 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1409 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1410 adev->umc.retire_unit = 1;
1411 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1412 adev->umc.ras = &umc_v6_1_ras;
1414 case IP_VERSION(6, 1, 2):
1415 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1416 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1417 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1418 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1419 adev->umc.retire_unit = 1;
1420 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1421 adev->umc.ras = &umc_v6_1_ras;
1423 case IP_VERSION(6, 7, 0):
1424 adev->umc.max_ras_err_cnt_per_query =
1425 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1426 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1427 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1428 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1429 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1430 if (!adev->gmc.xgmi.connected_to_cpu)
1431 adev->umc.ras = &umc_v6_7_ras;
1432 if (1 & adev->smuio.funcs->get_die_id(adev))
1433 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1435 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1437 case IP_VERSION(12, 0, 0):
1438 adev->umc.max_ras_err_cnt_per_query =
1439 UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1440 adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
1441 adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
1442 adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
1443 adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
1444 adev->umc.active_mask = adev->aid_mask;
1445 adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1446 adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0];
1447 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1448 adev->umc.ras = &umc_v12_0_ras;
1455 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1457 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1458 case IP_VERSION(9, 4, 1):
1459 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1461 case IP_VERSION(9, 4, 2):
1462 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1464 case IP_VERSION(1, 8, 0):
1465 adev->mmhub.funcs = &mmhub_v1_8_funcs;
1468 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1473 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1475 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1476 case IP_VERSION(9, 4, 0):
1477 adev->mmhub.ras = &mmhub_v1_0_ras;
1479 case IP_VERSION(9, 4, 1):
1480 adev->mmhub.ras = &mmhub_v9_4_ras;
1482 case IP_VERSION(9, 4, 2):
1483 adev->mmhub.ras = &mmhub_v1_7_ras;
1485 case IP_VERSION(1, 8, 0):
1486 adev->mmhub.ras = &mmhub_v1_8_ras;
1489 /* mmhub ras is not available */
1494 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1496 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
1497 adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1499 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1502 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1504 adev->hdp.ras = &hdp_v4_0_ras;
1507 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1509 struct amdgpu_mca *mca = &adev->mca;
1511 /* is UMC the right IP to check for MCA? Maybe DF? */
1512 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1513 case IP_VERSION(6, 7, 0):
1514 if (!adev->gmc.xgmi.connected_to_cpu) {
1515 mca->mp0.ras = &mca_v3_0_mp0_ras;
1516 mca->mp1.ras = &mca_v3_0_mp1_ras;
1517 mca->mpio.ras = &mca_v3_0_mpio_ras;
1525 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1527 if (!adev->gmc.xgmi.connected_to_cpu)
1528 adev->gmc.xgmi.ras = &xgmi_ras;
1531 static int gmc_v9_0_early_init(void *handle)
1533 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1536 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1537 * in their IP discovery tables
1539 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
1540 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1541 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
1542 adev->gmc.xgmi.supported = true;
1544 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
1545 adev->gmc.xgmi.supported = true;
1546 adev->gmc.xgmi.connected_to_cpu =
1547 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1550 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
1551 enum amdgpu_pkg_type pkg_type =
1552 adev->smuio.funcs->get_pkg_type(adev);
1553 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1554 * and the APU, can be in used two possible modes:
1557 * "is_app_apu" can be used to identify the APU in the native
1560 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1561 !pci_resource_len(adev->pdev, 0));
1564 gmc_v9_0_set_gmc_funcs(adev);
1565 gmc_v9_0_set_irq_funcs(adev);
1566 gmc_v9_0_set_umc_funcs(adev);
1567 gmc_v9_0_set_mmhub_funcs(adev);
1568 gmc_v9_0_set_mmhub_ras_funcs(adev);
1569 gmc_v9_0_set_gfxhub_funcs(adev);
1570 gmc_v9_0_set_hdp_ras_funcs(adev);
1571 gmc_v9_0_set_mca_ras_funcs(adev);
1572 gmc_v9_0_set_xgmi_ras_funcs(adev);
1574 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1575 adev->gmc.shared_aperture_end =
1576 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1577 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1578 adev->gmc.private_aperture_end =
1579 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1580 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1585 static int gmc_v9_0_late_init(void *handle)
1587 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1590 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1595 * Workaround performance drop issue with VBIOS enables partial
1596 * writes, while disables HBM ECC for vega10.
1598 if (!amdgpu_sriov_vf(adev) &&
1599 (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
1600 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1601 if (adev->df.funcs &&
1602 adev->df.funcs->enable_ecc_force_par_wr_rmw)
1603 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1607 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1608 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
1609 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
1612 r = amdgpu_gmc_ras_late_init(adev);
1616 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1619 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1620 struct amdgpu_gmc *mc)
1622 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1624 amdgpu_gmc_set_agp_default(adev, mc);
1626 /* add the xgmi offset of the physical node */
1627 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1628 if (adev->gmc.xgmi.connected_to_cpu) {
1629 amdgpu_gmc_sysvm_location(adev, mc);
1631 amdgpu_gmc_vram_location(adev, mc, base);
1632 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
1633 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
1634 amdgpu_gmc_agp_location(adev, mc);
1636 /* base offset of vram pages */
1637 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1639 /* XXX: add the xgmi offset of the physical node? */
1640 adev->vm_manager.vram_base_offset +=
1641 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1645 * gmc_v9_0_mc_init - initialize the memory controller driver params
1647 * @adev: amdgpu_device pointer
1649 * Look up the amount of vram, vram width, and decide how to place
1650 * vram and gart within the GPU's physical address space.
1651 * Returns 0 for success.
1653 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1657 /* size in MB on si */
1658 if (!adev->gmc.is_app_apu) {
1659 adev->gmc.mc_vram_size =
1660 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1662 DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1663 adev->gmc.mc_vram_size = 0;
1665 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1667 if (!(adev->flags & AMD_IS_APU) &&
1668 !adev->gmc.xgmi.connected_to_cpu) {
1669 r = amdgpu_device_resize_fb_bar(adev);
1673 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1674 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1676 #ifdef CONFIG_X86_64
1678 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1679 * interface can use VRAM through here as it appears system reserved
1680 * memory in host address space.
1682 * For APUs, VRAM is just the stolen system memory and can be accessed
1685 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1688 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1689 if ((!amdgpu_sriov_vf(adev) &&
1690 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1691 (adev->gmc.xgmi.supported &&
1692 adev->gmc.xgmi.connected_to_cpu)) {
1693 adev->gmc.aper_base =
1694 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1695 adev->gmc.xgmi.physical_node_id *
1696 adev->gmc.xgmi.node_segment_size;
1697 adev->gmc.aper_size = adev->gmc.real_vram_size;
1701 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1703 /* set the gart size */
1704 if (amdgpu_gart_size == -1) {
1705 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1706 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1707 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1708 case IP_VERSION(9, 4, 0):
1709 case IP_VERSION(9, 4, 1):
1710 case IP_VERSION(9, 4, 2):
1711 case IP_VERSION(9, 4, 3):
1713 adev->gmc.gart_size = 512ULL << 20;
1715 case IP_VERSION(9, 1, 0): /* DCE SG support */
1716 case IP_VERSION(9, 2, 2): /* DCE SG support */
1717 case IP_VERSION(9, 3, 0):
1718 adev->gmc.gart_size = 1024ULL << 20;
1722 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1725 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1727 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1732 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1736 if (adev->gart.bo) {
1737 WARN(1, "VEGA10 PCIE GART already initialized\n");
1741 if (adev->gmc.xgmi.connected_to_cpu) {
1742 adev->gmc.vmid0_page_table_depth = 1;
1743 adev->gmc.vmid0_page_table_block_size = 12;
1745 adev->gmc.vmid0_page_table_depth = 0;
1746 adev->gmc.vmid0_page_table_block_size = 0;
1749 /* Initialize common gart structure */
1750 r = amdgpu_gart_init(adev);
1753 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1754 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1755 AMDGPU_PTE_EXECUTABLE;
1757 if (!adev->gmc.real_vram_size) {
1758 dev_info(adev->dev, "Put GART in system memory for APU\n");
1759 r = amdgpu_gart_table_ram_alloc(adev);
1761 dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1763 r = amdgpu_gart_table_vram_alloc(adev);
1767 if (adev->gmc.xgmi.connected_to_cpu)
1768 r = amdgpu_gmc_pdb0_alloc(adev);
1775 * gmc_v9_0_save_registers - saves regs
1777 * @adev: amdgpu_device pointer
1779 * This saves potential register values that should be
1780 * restored upon resume
1782 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1784 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1785 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
1786 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1789 static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1791 enum amdgpu_memory_partition mode;
1795 mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1797 /* Mode detected by hardware not present in supported modes */
1798 if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1799 !(BIT(mode - 1) & supp_modes))
1803 case UNKNOWN_MEMORY_PARTITION_MODE:
1804 case AMDGPU_NPS1_PARTITION_MODE:
1805 valid = (adev->gmc.num_mem_partitions == 1);
1807 case AMDGPU_NPS2_PARTITION_MODE:
1808 valid = (adev->gmc.num_mem_partitions == 2);
1810 case AMDGPU_NPS4_PARTITION_MODE:
1811 valid = (adev->gmc.num_mem_partitions == 3 ||
1812 adev->gmc.num_mem_partitions == 4);
1821 static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1825 /* Check if node with id 'nid' is present in 'node_ids' array */
1826 for (i = 0; i < num_ids; ++i)
1827 if (node_ids[i] == nid)
1834 gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1835 struct amdgpu_mem_partition_info *mem_ranges)
1837 struct amdgpu_numa_info numa_info;
1838 int node_ids[MAX_MEM_RANGES];
1839 int num_ranges = 0, ret;
1840 int num_xcc, xcc_id;
1843 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1844 xcc_mask = (1U << num_xcc) - 1;
1846 for_each_inst(xcc_id, xcc_mask) {
1847 ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1851 if (numa_info.nid == NUMA_NO_NODE) {
1852 mem_ranges[0].size = numa_info.size;
1853 mem_ranges[0].numa.node = numa_info.nid;
1858 if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1862 node_ids[num_ranges] = numa_info.nid;
1863 mem_ranges[num_ranges].numa.node = numa_info.nid;
1864 mem_ranges[num_ranges].size = numa_info.size;
1868 adev->gmc.num_mem_partitions = num_ranges;
1872 gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1873 struct amdgpu_mem_partition_info *mem_ranges)
1875 enum amdgpu_memory_partition mode;
1876 u32 start_addr = 0, size;
1879 mode = gmc_v9_0_query_memory_partition(adev);
1882 case UNKNOWN_MEMORY_PARTITION_MODE:
1883 case AMDGPU_NPS1_PARTITION_MODE:
1884 adev->gmc.num_mem_partitions = 1;
1886 case AMDGPU_NPS2_PARTITION_MODE:
1887 adev->gmc.num_mem_partitions = 2;
1889 case AMDGPU_NPS4_PARTITION_MODE:
1890 if (adev->flags & AMD_IS_APU)
1891 adev->gmc.num_mem_partitions = 3;
1893 adev->gmc.num_mem_partitions = 4;
1896 adev->gmc.num_mem_partitions = 1;
1900 size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT;
1901 size /= adev->gmc.num_mem_partitions;
1903 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
1904 mem_ranges[i].range.fpfn = start_addr;
1905 mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
1906 mem_ranges[i].range.lpfn = start_addr + size - 1;
1910 /* Adjust the last one */
1911 mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn =
1912 (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
1913 mem_ranges[adev->gmc.num_mem_partitions - 1].size =
1914 adev->gmc.real_vram_size -
1915 ((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn
1916 << AMDGPU_GPU_PAGE_SHIFT);
1919 static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
1923 adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
1924 sizeof(struct amdgpu_mem_partition_info),
1926 if (!adev->gmc.mem_partitions)
1929 /* TODO : Get the range from PSP/Discovery for dGPU */
1930 if (adev->gmc.is_app_apu)
1931 gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
1933 gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
1935 if (amdgpu_sriov_vf(adev))
1938 valid = gmc_v9_0_validate_partition_info(adev);
1940 /* TODO: handle invalid case */
1942 "Mem ranges not matching with hardware config");
1948 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
1950 static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
1953 if (!amdgpu_sriov_vf(adev)) {
1954 vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
1955 adev->gmc.vram_vendor = vram_info & 0xF;
1957 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1958 adev->gmc.vram_width = 128 * 64;
1961 static int gmc_v9_0_sw_init(void *handle)
1963 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
1964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1965 unsigned long inst_mask = adev->aid_mask;
1967 adev->gfxhub.funcs->init(adev);
1969 adev->mmhub.funcs->init(adev);
1971 spin_lock_init(&adev->gmc.invalidate_lock);
1973 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
1974 gmc_v9_4_3_init_vram_info(adev);
1975 } else if (!adev->bios) {
1976 if (adev->flags & AMD_IS_APU) {
1977 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
1978 adev->gmc.vram_width = 64 * 64;
1980 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1981 adev->gmc.vram_width = 128 * 64;
1984 r = amdgpu_atomfirmware_get_vram_info(adev,
1985 &vram_width, &vram_type, &vram_vendor);
1986 if (amdgpu_sriov_vf(adev))
1987 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1988 * and DF related registers is not readable, seems hardcord is the
1989 * only way to set the correct vram_width
1991 adev->gmc.vram_width = 2048;
1992 else if (amdgpu_emu_mode != 1)
1993 adev->gmc.vram_width = vram_width;
1995 if (!adev->gmc.vram_width) {
1996 int chansize, numchan;
1998 /* hbm memory channel size */
1999 if (adev->flags & AMD_IS_APU)
2003 if (adev->df.funcs &&
2004 adev->df.funcs->get_hbm_channel_number) {
2005 numchan = adev->df.funcs->get_hbm_channel_number(adev);
2006 adev->gmc.vram_width = numchan * chansize;
2010 adev->gmc.vram_type = vram_type;
2011 adev->gmc.vram_vendor = vram_vendor;
2013 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2014 case IP_VERSION(9, 1, 0):
2015 case IP_VERSION(9, 2, 2):
2016 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2017 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2019 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
2020 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2022 /* vm_size is 128TB + 512GB for legacy 3-level page support */
2023 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
2024 adev->gmc.translate_further =
2025 adev->vm_manager.num_level > 1;
2028 case IP_VERSION(9, 0, 1):
2029 case IP_VERSION(9, 2, 1):
2030 case IP_VERSION(9, 4, 0):
2031 case IP_VERSION(9, 3, 0):
2032 case IP_VERSION(9, 4, 2):
2033 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2034 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2037 * To fulfill 4-level page support,
2038 * vm size is 256TB (48bit), maximum size of Vega10,
2039 * block size 512 (9bit)
2042 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2043 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
2044 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2046 case IP_VERSION(9, 4, 1):
2047 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2048 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2049 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
2051 /* Keep the vm size same with Vega20 */
2052 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2053 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2055 case IP_VERSION(9, 4, 3):
2056 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
2057 NUM_XCC(adev->gfx.xcc_mask));
2059 inst_mask <<= AMDGPU_MMHUB0(0);
2060 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2062 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2063 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2069 /* This interrupt is VMC page fault.*/
2070 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2071 &adev->gmc.vm_fault);
2075 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
2076 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
2077 &adev->gmc.vm_fault);
2082 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2083 &adev->gmc.vm_fault);
2088 if (!amdgpu_sriov_vf(adev) &&
2089 !adev->gmc.xgmi.connected_to_cpu &&
2090 !adev->gmc.is_app_apu) {
2091 /* interrupt sent to DF. */
2092 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2093 &adev->gmc.ecc_irq);
2098 /* Set the internal MC address mask
2099 * This is the max address of the GPU's
2100 * internal address space.
2102 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2104 dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
2105 IP_VERSION(9, 4, 2) ?
2108 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2110 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
2113 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2115 r = gmc_v9_0_mc_init(adev);
2119 amdgpu_gmc_get_vbios_allocations(adev);
2121 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
2122 r = gmc_v9_0_init_mem_ranges(adev);
2127 /* Memory manager */
2128 r = amdgpu_bo_init(adev);
2132 r = gmc_v9_0_gart_init(adev);
2138 * VMID 0 is reserved for System
2139 * amdgpu graphics/compute will use VMIDs 1..n-1
2140 * amdkfd will use VMIDs n..15
2142 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2143 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2144 * for video processing.
2146 adev->vm_manager.first_kfd_vmid =
2147 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
2148 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
2149 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) ?
2153 amdgpu_vm_manager_init(adev);
2155 gmc_v9_0_save_registers(adev);
2157 r = amdgpu_gmc_ras_sw_init(adev);
2161 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
2162 amdgpu_gmc_sysfs_init(adev);
2167 static int gmc_v9_0_sw_fini(void *handle)
2169 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2171 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
2172 amdgpu_gmc_sysfs_fini(adev);
2174 amdgpu_gmc_ras_fini(adev);
2175 amdgpu_gem_force_release(adev);
2176 amdgpu_vm_manager_fini(adev);
2177 if (!adev->gmc.real_vram_size) {
2178 dev_info(adev->dev, "Put GART in system memory for APU free\n");
2179 amdgpu_gart_table_ram_free(adev);
2181 amdgpu_gart_table_vram_free(adev);
2183 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2184 amdgpu_bo_fini(adev);
2186 adev->gmc.num_mem_partitions = 0;
2187 kfree(adev->gmc.mem_partitions);
2192 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2194 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
2195 case IP_VERSION(9, 0, 0):
2196 if (amdgpu_sriov_vf(adev))
2199 case IP_VERSION(9, 4, 0):
2200 soc15_program_register_sequence(adev,
2201 golden_settings_mmhub_1_0_0,
2202 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2203 soc15_program_register_sequence(adev,
2204 golden_settings_athub_1_0_0,
2205 ARRAY_SIZE(golden_settings_athub_1_0_0));
2207 case IP_VERSION(9, 1, 0):
2208 case IP_VERSION(9, 2, 0):
2209 /* TODO for renoir */
2210 soc15_program_register_sequence(adev,
2211 golden_settings_athub_1_0_0,
2212 ARRAY_SIZE(golden_settings_athub_1_0_0));
2220 * gmc_v9_0_restore_registers - restores regs
2222 * @adev: amdgpu_device pointer
2224 * This restores register values, saved at suspend.
2226 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2228 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
2229 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
2230 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2231 WARN_ON(adev->gmc.sdpif_register !=
2232 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2237 * gmc_v9_0_gart_enable - gart enable
2239 * @adev: amdgpu_device pointer
2241 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2245 if (adev->gmc.xgmi.connected_to_cpu)
2246 amdgpu_gmc_init_pdb0(adev);
2248 if (adev->gart.bo == NULL) {
2249 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2253 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2255 if (!adev->in_s0ix) {
2256 r = adev->gfxhub.funcs->gart_enable(adev);
2261 r = adev->mmhub.funcs->gart_enable(adev);
2265 DRM_INFO("PCIE GART of %uM enabled.\n",
2266 (unsigned int)(adev->gmc.gart_size >> 20));
2267 if (adev->gmc.pdb0_bo)
2268 DRM_INFO("PDB0 located at 0x%016llX\n",
2269 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2270 DRM_INFO("PTB located at 0x%016llX\n",
2271 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2276 static int gmc_v9_0_hw_init(void *handle)
2278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2282 adev->gmc.flush_pasid_uses_kiq = true;
2284 /* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
2285 * (type 2), which flushes both. Due to a race condition with
2286 * concurrent memory accesses using the same TLB cache line, we still
2287 * need a second TLB flush after this.
2289 adev->gmc.flush_tlb_needs_extra_type_2 =
2290 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
2291 adev->gmc.xgmi.num_physical_nodes;
2293 * TODO: This workaround is badly documented and had a buggy
2294 * implementation. We should probably verify what we do here.
2296 adev->gmc.flush_tlb_needs_extra_type_0 =
2297 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2300 /* The sequence of these two function calls matters.*/
2301 gmc_v9_0_init_golden_registers(adev);
2303 if (adev->mode_info.num_crtc) {
2304 /* Lockout access through VGA aperture*/
2305 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2306 /* disable VGA render */
2307 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2310 if (adev->mmhub.funcs->update_power_gating)
2311 adev->mmhub.funcs->update_power_gating(adev, true);
2313 adev->hdp.funcs->init_registers(adev);
2315 /* After HDP is initialized, flush HDP.*/
2316 adev->hdp.funcs->flush_hdp(adev, NULL);
2318 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2323 if (!amdgpu_sriov_vf(adev)) {
2325 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2326 adev->mmhub.funcs->set_fault_enable_default(adev, value);
2328 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2329 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2331 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2334 if (adev->umc.funcs && adev->umc.funcs->init_registers)
2335 adev->umc.funcs->init_registers(adev);
2337 r = gmc_v9_0_gart_enable(adev);
2341 if (amdgpu_emu_mode == 1)
2342 return amdgpu_gmc_vram_checking(adev);
2348 * gmc_v9_0_gart_disable - gart disable
2350 * @adev: amdgpu_device pointer
2352 * This disables all VM page table.
2354 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2357 adev->gfxhub.funcs->gart_disable(adev);
2358 adev->mmhub.funcs->gart_disable(adev);
2361 static int gmc_v9_0_hw_fini(void *handle)
2363 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2365 gmc_v9_0_gart_disable(adev);
2367 if (amdgpu_sriov_vf(adev)) {
2368 /* full access mode, so don't touch any GMC register */
2369 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2374 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2375 * a correct cached state for GMC. Otherwise, the "gate" again
2376 * operation on S3 resuming will fail due to wrong cached state.
2378 if (adev->mmhub.funcs->update_power_gating)
2379 adev->mmhub.funcs->update_power_gating(adev, false);
2381 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2386 static int gmc_v9_0_suspend(void *handle)
2388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2390 return gmc_v9_0_hw_fini(adev);
2393 static int gmc_v9_0_resume(void *handle)
2396 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2398 r = gmc_v9_0_hw_init(adev);
2402 amdgpu_vmid_reset_all(adev);
2407 static bool gmc_v9_0_is_idle(void *handle)
2409 /* MC is always ready in GMC v9.*/
2413 static int gmc_v9_0_wait_for_idle(void *handle)
2415 /* There is no need to wait for MC idle in GMC v9.*/
2419 static int gmc_v9_0_soft_reset(void *handle)
2421 /* XXX for emulation.*/
2425 static int gmc_v9_0_set_clockgating_state(void *handle,
2426 enum amd_clockgating_state state)
2428 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2430 adev->mmhub.funcs->set_clockgating(adev, state);
2432 athub_v1_0_set_clockgating(adev, state);
2437 static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2439 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2441 adev->mmhub.funcs->get_clockgating(adev, flags);
2443 athub_v1_0_get_clockgating(adev, flags);
2446 static int gmc_v9_0_set_powergating_state(void *handle,
2447 enum amd_powergating_state state)
2452 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2454 .early_init = gmc_v9_0_early_init,
2455 .late_init = gmc_v9_0_late_init,
2456 .sw_init = gmc_v9_0_sw_init,
2457 .sw_fini = gmc_v9_0_sw_fini,
2458 .hw_init = gmc_v9_0_hw_init,
2459 .hw_fini = gmc_v9_0_hw_fini,
2460 .suspend = gmc_v9_0_suspend,
2461 .resume = gmc_v9_0_resume,
2462 .is_idle = gmc_v9_0_is_idle,
2463 .wait_for_idle = gmc_v9_0_wait_for_idle,
2464 .soft_reset = gmc_v9_0_soft_reset,
2465 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
2466 .set_powergating_state = gmc_v9_0_set_powergating_state,
2467 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
2470 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2471 .type = AMD_IP_BLOCK_TYPE_GMC,
2475 .funcs = &gmc_v9_0_ip_funcs,