2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
42 #include "amdgpu_atombios.h"
43 #include "amdgpu_atomfirmware.h"
45 #ifdef CONFIG_DRM_AMDGPU_SI
48 #ifdef CONFIG_DRM_AMDGPU_CIK
53 #include "bif/bif_4_1_d.h"
54 #include <linux/pci.h>
55 #include <linux/firmware.h>
57 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
58 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
60 #define AMDGPU_RESUME_MS 2000
62 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
63 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
64 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
66 static const char *amdgpu_asic_name[] = {
90 bool amdgpu_device_is_px(struct drm_device *dev)
92 struct amdgpu_device *adev = dev->dev_private;
94 if (adev->flags & AMD_IS_PX)
100 * MMIO register access helper functions.
102 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
107 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
108 BUG_ON(in_interrupt());
109 return amdgpu_virt_kiq_rreg(adev, reg);
112 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
113 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
117 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
118 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
119 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
120 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
122 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
126 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
129 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
131 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
132 BUG_ON(in_interrupt());
133 return amdgpu_virt_kiq_wreg(adev, reg, v);
136 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
137 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
141 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
142 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
143 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
144 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
148 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
150 if ((reg * 4) < adev->rio_mem_size)
151 return ioread32(adev->rio_mem + (reg * 4));
153 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
154 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
158 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
161 if ((reg * 4) < adev->rio_mem_size)
162 iowrite32(v, adev->rio_mem + (reg * 4));
164 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
165 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
170 * amdgpu_mm_rdoorbell - read a doorbell dword
172 * @adev: amdgpu_device pointer
173 * @index: doorbell index
175 * Returns the value in the doorbell aperture at the
176 * requested doorbell index (CIK).
178 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
180 if (index < adev->doorbell.num_doorbells) {
181 return readl(adev->doorbell.ptr + index);
183 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
189 * amdgpu_mm_wdoorbell - write a doorbell dword
191 * @adev: amdgpu_device pointer
192 * @index: doorbell index
195 * Writes @v to the doorbell aperture at the
196 * requested doorbell index (CIK).
198 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
200 if (index < adev->doorbell.num_doorbells) {
201 writel(v, adev->doorbell.ptr + index);
203 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
208 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
210 * @adev: amdgpu_device pointer
211 * @index: doorbell index
213 * Returns the value in the doorbell aperture at the
214 * requested doorbell index (VEGA10+).
216 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
218 if (index < adev->doorbell.num_doorbells) {
219 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
221 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
227 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
229 * @adev: amdgpu_device pointer
230 * @index: doorbell index
233 * Writes @v to the doorbell aperture at the
234 * requested doorbell index (VEGA10+).
236 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
238 if (index < adev->doorbell.num_doorbells) {
239 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
241 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
246 * amdgpu_invalid_rreg - dummy reg read function
248 * @adev: amdgpu device pointer
249 * @reg: offset of register
251 * Dummy register read function. Used for register blocks
252 * that certain asics don't have (all asics).
253 * Returns the value in the register.
255 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
257 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
263 * amdgpu_invalid_wreg - dummy reg write function
265 * @adev: amdgpu device pointer
266 * @reg: offset of register
267 * @v: value to write to the register
269 * Dummy register read function. Used for register blocks
270 * that certain asics don't have (all asics).
272 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
274 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
280 * amdgpu_block_invalid_rreg - dummy reg read function
282 * @adev: amdgpu device pointer
283 * @block: offset of instance
284 * @reg: offset of register
286 * Dummy register read function. Used for register blocks
287 * that certain asics don't have (all asics).
288 * Returns the value in the register.
290 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
291 uint32_t block, uint32_t reg)
293 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
300 * amdgpu_block_invalid_wreg - dummy reg write function
302 * @adev: amdgpu device pointer
303 * @block: offset of instance
304 * @reg: offset of register
305 * @v: value to write to the register
307 * Dummy register read function. Used for register blocks
308 * that certain asics don't have (all asics).
310 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
312 uint32_t reg, uint32_t v)
314 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
319 static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
323 if (adev->vram_scratch.robj == NULL) {
324 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
325 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
326 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
327 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
328 NULL, NULL, &adev->vram_scratch.robj);
334 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
335 if (unlikely(r != 0))
337 r = amdgpu_bo_pin(adev->vram_scratch.robj,
338 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
340 amdgpu_bo_unreserve(adev->vram_scratch.robj);
343 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
344 (void **)&adev->vram_scratch.ptr);
346 amdgpu_bo_unpin(adev->vram_scratch.robj);
347 amdgpu_bo_unreserve(adev->vram_scratch.robj);
352 static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
356 if (adev->vram_scratch.robj == NULL) {
359 r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
360 if (likely(r == 0)) {
361 amdgpu_bo_kunmap(adev->vram_scratch.robj);
362 amdgpu_bo_unpin(adev->vram_scratch.robj);
363 amdgpu_bo_unreserve(adev->vram_scratch.robj);
365 amdgpu_bo_unref(&adev->vram_scratch.robj);
369 * amdgpu_program_register_sequence - program an array of registers.
371 * @adev: amdgpu_device pointer
372 * @registers: pointer to the register array
373 * @array_size: size of the register array
375 * Programs an array or registers with and and or masks.
376 * This is a helper for setting golden registers.
378 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
379 const u32 *registers,
380 const u32 array_size)
382 u32 tmp, reg, and_mask, or_mask;
388 for (i = 0; i < array_size; i +=3) {
389 reg = registers[i + 0];
390 and_mask = registers[i + 1];
391 or_mask = registers[i + 2];
393 if (and_mask == 0xffffffff) {
404 void amdgpu_pci_config_reset(struct amdgpu_device *adev)
406 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
410 * GPU doorbell aperture helpers function.
413 * amdgpu_doorbell_init - Init doorbell driver information.
415 * @adev: amdgpu_device pointer
417 * Init doorbell driver information (CIK)
418 * Returns 0 on success, error on failure.
420 static int amdgpu_doorbell_init(struct amdgpu_device *adev)
422 /* doorbell bar mapping */
423 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
424 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
426 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
427 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
428 if (adev->doorbell.num_doorbells == 0)
431 adev->doorbell.ptr = ioremap(adev->doorbell.base,
432 adev->doorbell.num_doorbells *
434 if (adev->doorbell.ptr == NULL)
441 * amdgpu_doorbell_fini - Tear down doorbell driver information.
443 * @adev: amdgpu_device pointer
445 * Tear down doorbell driver information (CIK)
447 static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
449 iounmap(adev->doorbell.ptr);
450 adev->doorbell.ptr = NULL;
454 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
457 * @adev: amdgpu_device pointer
458 * @aperture_base: output returning doorbell aperture base physical address
459 * @aperture_size: output returning doorbell aperture size in bytes
460 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
462 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
463 * takes doorbells required for its own rings and reports the setup to amdkfd.
464 * amdgpu reserved doorbells are at the start of the doorbell aperture.
466 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
467 phys_addr_t *aperture_base,
468 size_t *aperture_size,
469 size_t *start_offset)
472 * The first num_doorbells are used by amdgpu.
473 * amdkfd takes whatever's left in the aperture.
475 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
476 *aperture_base = adev->doorbell.base;
477 *aperture_size = adev->doorbell.size;
478 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
488 * Writeback is the method by which the GPU updates special pages in memory
489 * with the status of certain GPU events (fences, ring pointers,etc.).
493 * amdgpu_wb_fini - Disable Writeback and free memory
495 * @adev: amdgpu_device pointer
497 * Disables Writeback and frees the Writeback memory (all asics).
498 * Used at driver shutdown.
500 static void amdgpu_wb_fini(struct amdgpu_device *adev)
502 if (adev->wb.wb_obj) {
503 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
505 (void **)&adev->wb.wb);
506 adev->wb.wb_obj = NULL;
511 * amdgpu_wb_init- Init Writeback driver info and allocate memory
513 * @adev: amdgpu_device pointer
515 * Initializes writeback and allocates writeback memory (all asics).
516 * Used at driver startup.
517 * Returns 0 on success or an -error on failure.
519 static int amdgpu_wb_init(struct amdgpu_device *adev)
523 if (adev->wb.wb_obj == NULL) {
524 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
525 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
526 &adev->wb.wb_obj, &adev->wb.gpu_addr,
527 (void **)&adev->wb.wb);
529 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
533 adev->wb.num_wb = AMDGPU_MAX_WB;
534 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
536 /* clear wb memory */
537 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
544 * amdgpu_wb_get - Allocate a wb entry
546 * @adev: amdgpu_device pointer
549 * Allocate a wb slot for use by the driver (all asics).
550 * Returns 0 on success or -EINVAL on failure.
552 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
554 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
555 if (offset < adev->wb.num_wb) {
556 __set_bit(offset, adev->wb.used);
565 * amdgpu_wb_get_64bit - Allocate a wb entry
567 * @adev: amdgpu_device pointer
570 * Allocate a wb slot for use by the driver (all asics).
571 * Returns 0 on success or -EINVAL on failure.
573 int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
575 unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
576 adev->wb.num_wb, 0, 2, 7, 0);
577 if ((offset + 1) < adev->wb.num_wb) {
578 __set_bit(offset, adev->wb.used);
579 __set_bit(offset + 1, adev->wb.used);
588 * amdgpu_wb_free - Free a wb entry
590 * @adev: amdgpu_device pointer
593 * Free a wb slot allocated for use by the driver (all asics)
595 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
597 if (wb < adev->wb.num_wb)
598 __clear_bit(wb, adev->wb.used);
602 * amdgpu_wb_free_64bit - Free a wb entry
604 * @adev: amdgpu_device pointer
607 * Free a wb slot allocated for use by the driver (all asics)
609 void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
611 if ((wb + 1) < adev->wb.num_wb) {
612 __clear_bit(wb, adev->wb.used);
613 __clear_bit(wb + 1, adev->wb.used);
618 * amdgpu_vram_location - try to find VRAM location
619 * @adev: amdgpu device structure holding all necessary informations
620 * @mc: memory controller structure holding memory informations
621 * @base: base address at which to put VRAM
623 * Function will try to place VRAM at base address provided
624 * as parameter (which is so far either PCI aperture address or
625 * for IGP TOM base address).
627 * If there is not enough space to fit the unvisible VRAM in the 32bits
628 * address space then we limit the VRAM size to the aperture.
630 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
631 * this shouldn't be a problem as we are using the PCI aperture as a reference.
632 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
635 * Note: we use mc_vram_size as on some board we need to program the mc to
636 * cover the whole aperture even if VRAM size is inferior to aperture size
637 * Novell bug 204882 + along with lots of ubuntu ones
639 * Note: when limiting vram it's safe to overwritte real_vram_size because
640 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
641 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
644 * Note: IGP TOM addr should be the same as the aperture addr, we don't
645 * explicitly check for that though.
647 * FIXME: when reducing VRAM size align new size on power of 2.
649 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
651 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
653 mc->vram_start = base;
654 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
655 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
656 mc->real_vram_size = mc->aper_size;
657 mc->mc_vram_size = mc->aper_size;
659 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
660 if (limit && limit < mc->real_vram_size)
661 mc->real_vram_size = limit;
662 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
663 mc->mc_vram_size >> 20, mc->vram_start,
664 mc->vram_end, mc->real_vram_size >> 20);
668 * amdgpu_gtt_location - try to find GTT location
669 * @adev: amdgpu device structure holding all necessary informations
670 * @mc: memory controller structure holding memory informations
672 * Function will place try to place GTT before or after VRAM.
674 * If GTT size is bigger than space left then we ajust GTT size.
675 * Thus function will never fails.
677 * FIXME: when reducing GTT size align new size on power of 2.
679 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
681 u64 size_af, size_bf;
683 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
684 size_bf = mc->vram_start & ~mc->gtt_base_align;
685 if (size_bf > size_af) {
686 if (mc->gtt_size > size_bf) {
687 dev_warn(adev->dev, "limiting GTT\n");
688 mc->gtt_size = size_bf;
692 if (mc->gtt_size > size_af) {
693 dev_warn(adev->dev, "limiting GTT\n");
694 mc->gtt_size = size_af;
696 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
698 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
699 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
700 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
704 * GPU helpers function.
707 * amdgpu_need_post - check if the hw need post or not
709 * @adev: amdgpu_device pointer
711 * Check if the asic has been initialized (all asics) at driver startup
712 * or post is needed if hw reset is performed.
713 * Returns true if need or false if not.
715 bool amdgpu_need_post(struct amdgpu_device *adev)
719 if (adev->has_hw_reset) {
720 adev->has_hw_reset = false;
723 /* then check MEM_SIZE, in case the crtcs are off */
724 reg = amdgpu_asic_get_config_memsize(adev);
726 if ((reg != 0) && (reg != 0xffffffff))
733 static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
735 if (amdgpu_sriov_vf(adev))
738 if (amdgpu_passthrough(adev)) {
739 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
740 * some old smc fw still need driver do vPost otherwise gpu hang, while
741 * those smc fw version above 22.15 doesn't have this flaw, so we force
742 * vpost executed for smc version below 22.15
744 if (adev->asic_type == CHIP_FIJI) {
747 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
748 /* force vPost if error occured */
752 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
753 if (fw_ver < 0x00160e00)
757 return amdgpu_need_post(adev);
761 * amdgpu_dummy_page_init - init dummy page used by the driver
763 * @adev: amdgpu_device pointer
765 * Allocate the dummy page used by the driver (all asics).
766 * This dummy page is used by the driver as a filler for gart entries
767 * when pages are taken out of the GART
768 * Returns 0 on sucess, -ENOMEM on failure.
770 int amdgpu_dummy_page_init(struct amdgpu_device *adev)
772 if (adev->dummy_page.page)
774 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
775 if (adev->dummy_page.page == NULL)
777 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
778 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
779 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
780 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
781 __free_page(adev->dummy_page.page);
782 adev->dummy_page.page = NULL;
789 * amdgpu_dummy_page_fini - free dummy page used by the driver
791 * @adev: amdgpu_device pointer
793 * Frees the dummy page used by the driver (all asics).
795 void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
797 if (adev->dummy_page.page == NULL)
799 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
800 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
801 __free_page(adev->dummy_page.page);
802 adev->dummy_page.page = NULL;
806 /* ATOM accessor methods */
808 * ATOM is an interpreted byte code stored in tables in the vbios. The
809 * driver registers callbacks to access registers and the interpreter
810 * in the driver parses the tables and executes then to program specific
811 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
812 * atombios.h, and atom.c
816 * cail_pll_read - read PLL register
818 * @info: atom card_info pointer
819 * @reg: PLL register offset
821 * Provides a PLL register accessor for the atom interpreter (r4xx+).
822 * Returns the value of the PLL register.
824 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
830 * cail_pll_write - write PLL register
832 * @info: atom card_info pointer
833 * @reg: PLL register offset
834 * @val: value to write to the pll register
836 * Provides a PLL register accessor for the atom interpreter (r4xx+).
838 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
844 * cail_mc_read - read MC (Memory Controller) register
846 * @info: atom card_info pointer
847 * @reg: MC register offset
849 * Provides an MC register accessor for the atom interpreter (r4xx+).
850 * Returns the value of the MC register.
852 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
858 * cail_mc_write - write MC (Memory Controller) register
860 * @info: atom card_info pointer
861 * @reg: MC register offset
862 * @val: value to write to the pll register
864 * Provides a MC register accessor for the atom interpreter (r4xx+).
866 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
872 * cail_reg_write - write MMIO register
874 * @info: atom card_info pointer
875 * @reg: MMIO register offset
876 * @val: value to write to the pll register
878 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
880 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
882 struct amdgpu_device *adev = info->dev->dev_private;
888 * cail_reg_read - read MMIO register
890 * @info: atom card_info pointer
891 * @reg: MMIO register offset
893 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
894 * Returns the value of the MMIO register.
896 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
898 struct amdgpu_device *adev = info->dev->dev_private;
906 * cail_ioreg_write - write IO register
908 * @info: atom card_info pointer
909 * @reg: IO register offset
910 * @val: value to write to the pll register
912 * Provides a IO register accessor for the atom interpreter (r4xx+).
914 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
916 struct amdgpu_device *adev = info->dev->dev_private;
922 * cail_ioreg_read - read IO register
924 * @info: atom card_info pointer
925 * @reg: IO register offset
927 * Provides an IO register accessor for the atom interpreter (r4xx+).
928 * Returns the value of the IO register.
930 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
932 struct amdgpu_device *adev = info->dev->dev_private;
940 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
942 * @adev: amdgpu_device pointer
944 * Frees the driver info and register access callbacks for the ATOM
945 * interpreter (r4xx+).
946 * Called at driver shutdown.
948 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
950 if (adev->mode_info.atom_context) {
951 kfree(adev->mode_info.atom_context->scratch);
952 kfree(adev->mode_info.atom_context->iio);
954 kfree(adev->mode_info.atom_context);
955 adev->mode_info.atom_context = NULL;
956 kfree(adev->mode_info.atom_card_info);
957 adev->mode_info.atom_card_info = NULL;
961 * amdgpu_atombios_init - init the driver info and callbacks for atombios
963 * @adev: amdgpu_device pointer
965 * Initializes the driver info and register access callbacks for the
966 * ATOM interpreter (r4xx+).
967 * Returns 0 on sucess, -ENOMEM on failure.
968 * Called at driver startup.
970 static int amdgpu_atombios_init(struct amdgpu_device *adev)
972 struct card_info *atom_card_info =
973 kzalloc(sizeof(struct card_info), GFP_KERNEL);
978 adev->mode_info.atom_card_info = atom_card_info;
979 atom_card_info->dev = adev->ddev;
980 atom_card_info->reg_read = cail_reg_read;
981 atom_card_info->reg_write = cail_reg_write;
982 /* needed for iio ops */
984 atom_card_info->ioreg_read = cail_ioreg_read;
985 atom_card_info->ioreg_write = cail_ioreg_write;
987 DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
988 atom_card_info->ioreg_read = cail_reg_read;
989 atom_card_info->ioreg_write = cail_reg_write;
991 atom_card_info->mc_read = cail_mc_read;
992 atom_card_info->mc_write = cail_mc_write;
993 atom_card_info->pll_read = cail_pll_read;
994 atom_card_info->pll_write = cail_pll_write;
996 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
997 if (!adev->mode_info.atom_context) {
998 amdgpu_atombios_fini(adev);
1002 mutex_init(&adev->mode_info.atom_context->mutex);
1003 if (adev->is_atom_fw) {
1004 amdgpu_atomfirmware_scratch_regs_init(adev);
1005 amdgpu_atomfirmware_allocate_fb_scratch(adev);
1007 amdgpu_atombios_scratch_regs_init(adev);
1008 amdgpu_atombios_allocate_fb_scratch(adev);
1013 /* if we get transitioned to only one device, take VGA back */
1015 * amdgpu_vga_set_decode - enable/disable vga decode
1017 * @cookie: amdgpu_device pointer
1018 * @state: enable/disable vga decode
1020 * Enable/disable vga decode (all asics).
1021 * Returns VGA resource flags.
1023 static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
1025 struct amdgpu_device *adev = cookie;
1026 amdgpu_asic_set_vga_state(adev, state);
1028 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1029 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1031 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1035 * amdgpu_check_pot_argument - check that argument is a power of two
1037 * @arg: value to check
1039 * Validates that a certain argument is a power of two (all asics).
1040 * Returns true if argument is valid.
1042 static bool amdgpu_check_pot_argument(int arg)
1044 return (arg & (arg - 1)) == 0;
1047 static void amdgpu_check_block_size(struct amdgpu_device *adev)
1049 /* defines number of bits in page table versus page directory,
1050 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1051 * page table and the remaining bits are in the page directory */
1052 if (amdgpu_vm_block_size == -1)
1055 if (amdgpu_vm_block_size < 9) {
1056 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1057 amdgpu_vm_block_size);
1061 if (amdgpu_vm_block_size > 24 ||
1062 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1063 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1064 amdgpu_vm_block_size);
1071 amdgpu_vm_block_size = -1;
1074 static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1076 /* no need to check the default value */
1077 if (amdgpu_vm_size == -1)
1080 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1081 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1086 if (amdgpu_vm_size < 1) {
1087 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1093 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1095 if (amdgpu_vm_size > 1024) {
1096 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1104 amdgpu_vm_size = -1;
1108 * amdgpu_check_arguments - validate module params
1110 * @adev: amdgpu_device pointer
1112 * Validates certain module parameters and updates
1113 * the associated values used by the driver (all asics).
1115 static void amdgpu_check_arguments(struct amdgpu_device *adev)
1117 if (amdgpu_sched_jobs < 4) {
1118 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1120 amdgpu_sched_jobs = 4;
1121 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
1122 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1124 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1127 if (amdgpu_gart_size != -1) {
1128 /* gtt size must be greater or equal to 32M */
1129 if (amdgpu_gart_size < 32) {
1130 dev_warn(adev->dev, "gart size (%d) too small\n",
1132 amdgpu_gart_size = -1;
1136 amdgpu_check_vm_size(adev);
1138 amdgpu_check_block_size(adev);
1140 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1141 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
1142 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1143 amdgpu_vram_page_split);
1144 amdgpu_vram_page_split = 1024;
1149 * amdgpu_switcheroo_set_state - set switcheroo state
1151 * @pdev: pci dev pointer
1152 * @state: vga_switcheroo state
1154 * Callback for the switcheroo driver. Suspends or resumes the
1155 * the asics before or after it is powered up using ACPI methods.
1157 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1159 struct drm_device *dev = pci_get_drvdata(pdev);
1161 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1164 if (state == VGA_SWITCHEROO_ON) {
1165 unsigned d3_delay = dev->pdev->d3_delay;
1167 pr_info("amdgpu: switched on\n");
1168 /* don't suspend or resume card normally */
1169 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1171 amdgpu_device_resume(dev, true, true);
1173 dev->pdev->d3_delay = d3_delay;
1175 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1176 drm_kms_helper_poll_enable(dev);
1178 pr_info("amdgpu: switched off\n");
1179 drm_kms_helper_poll_disable(dev);
1180 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1181 amdgpu_device_suspend(dev, true, true);
1182 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1187 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1189 * @pdev: pci dev pointer
1191 * Callback for the switcheroo driver. Check of the switcheroo
1192 * state can be changed.
1193 * Returns true if the state can be changed, false if not.
1195 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1197 struct drm_device *dev = pci_get_drvdata(pdev);
1200 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1201 * locking inversion with the driver load path. And the access here is
1202 * completely racy anyway. So don't bother with locking for now.
1204 return dev->open_count == 0;
1207 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1208 .set_gpu_state = amdgpu_switcheroo_set_state,
1210 .can_switch = amdgpu_switcheroo_can_switch,
1213 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1214 enum amd_ip_block_type block_type,
1215 enum amd_clockgating_state state)
1219 for (i = 0; i < adev->num_ip_blocks; i++) {
1220 if (!adev->ip_blocks[i].status.valid)
1222 if (adev->ip_blocks[i].version->type != block_type)
1224 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1226 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1227 (void *)adev, state);
1229 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1230 adev->ip_blocks[i].version->funcs->name, r);
1235 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1236 enum amd_ip_block_type block_type,
1237 enum amd_powergating_state state)
1241 for (i = 0; i < adev->num_ip_blocks; i++) {
1242 if (!adev->ip_blocks[i].status.valid)
1244 if (adev->ip_blocks[i].version->type != block_type)
1246 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1248 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1249 (void *)adev, state);
1251 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1252 adev->ip_blocks[i].version->funcs->name, r);
1257 void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
1261 for (i = 0; i < adev->num_ip_blocks; i++) {
1262 if (!adev->ip_blocks[i].status.valid)
1264 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1265 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1269 int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1270 enum amd_ip_block_type block_type)
1274 for (i = 0; i < adev->num_ip_blocks; i++) {
1275 if (!adev->ip_blocks[i].status.valid)
1277 if (adev->ip_blocks[i].version->type == block_type) {
1278 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1288 bool amdgpu_is_idle(struct amdgpu_device *adev,
1289 enum amd_ip_block_type block_type)
1293 for (i = 0; i < adev->num_ip_blocks; i++) {
1294 if (!adev->ip_blocks[i].status.valid)
1296 if (adev->ip_blocks[i].version->type == block_type)
1297 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1303 struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1304 enum amd_ip_block_type type)
1308 for (i = 0; i < adev->num_ip_blocks; i++)
1309 if (adev->ip_blocks[i].version->type == type)
1310 return &adev->ip_blocks[i];
1316 * amdgpu_ip_block_version_cmp
1318 * @adev: amdgpu_device pointer
1319 * @type: enum amd_ip_block_type
1320 * @major: major version
1321 * @minor: minor version
1323 * return 0 if equal or greater
1324 * return 1 if smaller or the ip_block doesn't exist
1326 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1327 enum amd_ip_block_type type,
1328 u32 major, u32 minor)
1330 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1332 if (ip_block && ((ip_block->version->major > major) ||
1333 ((ip_block->version->major == major) &&
1334 (ip_block->version->minor >= minor))))
1341 * amdgpu_ip_block_add
1343 * @adev: amdgpu_device pointer
1344 * @ip_block_version: pointer to the IP to add
1346 * Adds the IP block driver information to the collection of IPs
1349 int amdgpu_ip_block_add(struct amdgpu_device *adev,
1350 const struct amdgpu_ip_block_version *ip_block_version)
1352 if (!ip_block_version)
1355 DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks,
1356 ip_block_version->funcs->name);
1358 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1363 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1365 adev->enable_virtual_display = false;
1367 if (amdgpu_virtual_display) {
1368 struct drm_device *ddev = adev->ddev;
1369 const char *pci_address_name = pci_name(ddev->pdev);
1370 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1372 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1373 pciaddstr_tmp = pciaddstr;
1374 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1375 pciaddname = strsep(&pciaddname_tmp, ",");
1376 if (!strcmp("all", pciaddname)
1377 || !strcmp(pci_address_name, pciaddname)) {
1381 adev->enable_virtual_display = true;
1384 res = kstrtol(pciaddname_tmp, 10,
1392 adev->mode_info.num_crtc = num_crtc;
1394 adev->mode_info.num_crtc = 1;
1400 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1401 amdgpu_virtual_display, pci_address_name,
1402 adev->enable_virtual_display, adev->mode_info.num_crtc);
1408 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1410 const char *chip_name;
1413 const struct gpu_info_firmware_header_v1_0 *hdr;
1415 adev->firmware.gpu_info_fw = NULL;
1417 switch (adev->asic_type) {
1421 case CHIP_POLARIS11:
1422 case CHIP_POLARIS10:
1423 case CHIP_POLARIS12:
1426 #ifdef CONFIG_DRM_AMDGPU_SI
1433 #ifdef CONFIG_DRM_AMDGPU_CIK
1443 chip_name = "vega10";
1446 chip_name = "raven";
1450 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1451 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1454 "Failed to load gpu_info firmware \"%s\"\n",
1458 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1461 "Failed to validate gpu_info firmware \"%s\"\n",
1466 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1467 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1469 switch (hdr->version_major) {
1472 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1473 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1474 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1476 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1477 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1478 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1479 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1480 adev->gfx.config.max_texture_channel_caches =
1481 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1482 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1483 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1484 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1485 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1486 adev->gfx.config.double_offchip_lds_buf =
1487 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1488 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1489 adev->gfx.cu_info.max_waves_per_simd =
1490 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1491 adev->gfx.cu_info.max_scratch_slots_per_cu =
1492 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1493 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1498 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1506 static int amdgpu_early_init(struct amdgpu_device *adev)
1510 amdgpu_device_enable_virtual_display(adev);
1512 switch (adev->asic_type) {
1516 case CHIP_POLARIS11:
1517 case CHIP_POLARIS10:
1518 case CHIP_POLARIS12:
1521 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1522 adev->family = AMDGPU_FAMILY_CZ;
1524 adev->family = AMDGPU_FAMILY_VI;
1526 r = vi_set_ip_blocks(adev);
1530 #ifdef CONFIG_DRM_AMDGPU_SI
1536 adev->family = AMDGPU_FAMILY_SI;
1537 r = si_set_ip_blocks(adev);
1542 #ifdef CONFIG_DRM_AMDGPU_CIK
1548 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1549 adev->family = AMDGPU_FAMILY_CI;
1551 adev->family = AMDGPU_FAMILY_KV;
1553 r = cik_set_ip_blocks(adev);
1560 if (adev->asic_type == CHIP_RAVEN)
1561 adev->family = AMDGPU_FAMILY_RV;
1563 adev->family = AMDGPU_FAMILY_AI;
1565 r = soc15_set_ip_blocks(adev);
1570 /* FIXME: not supported yet */
1574 r = amdgpu_device_parse_gpu_info_fw(adev);
1578 if (amdgpu_sriov_vf(adev)) {
1579 r = amdgpu_virt_request_full_gpu(adev, true);
1584 for (i = 0; i < adev->num_ip_blocks; i++) {
1585 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1586 DRM_ERROR("disabled ip block: %d <%s>\n",
1587 i, adev->ip_blocks[i].version->funcs->name);
1588 adev->ip_blocks[i].status.valid = false;
1590 if (adev->ip_blocks[i].version->funcs->early_init) {
1591 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1593 adev->ip_blocks[i].status.valid = false;
1595 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1596 adev->ip_blocks[i].version->funcs->name, r);
1599 adev->ip_blocks[i].status.valid = true;
1602 adev->ip_blocks[i].status.valid = true;
1607 adev->cg_flags &= amdgpu_cg_mask;
1608 adev->pg_flags &= amdgpu_pg_mask;
1613 static int amdgpu_init(struct amdgpu_device *adev)
1617 for (i = 0; i < adev->num_ip_blocks; i++) {
1618 if (!adev->ip_blocks[i].status.valid)
1620 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1622 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1623 adev->ip_blocks[i].version->funcs->name, r);
1626 adev->ip_blocks[i].status.sw = true;
1627 /* need to do gmc hw init early so we can allocate gpu mem */
1628 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1629 r = amdgpu_vram_scratch_init(adev);
1631 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1634 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1636 DRM_ERROR("hw_init %d failed %d\n", i, r);
1639 r = amdgpu_wb_init(adev);
1641 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1644 adev->ip_blocks[i].status.hw = true;
1646 /* right after GMC hw init, we create CSA */
1647 if (amdgpu_sriov_vf(adev)) {
1648 r = amdgpu_allocate_static_csa(adev);
1650 DRM_ERROR("allocate CSA failed %d\n", r);
1657 for (i = 0; i < adev->num_ip_blocks; i++) {
1658 if (!adev->ip_blocks[i].status.sw)
1660 /* gmc hw init is done early */
1661 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1663 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1665 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1666 adev->ip_blocks[i].version->funcs->name, r);
1669 adev->ip_blocks[i].status.hw = true;
1675 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
1677 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1680 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
1682 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1683 AMDGPU_RESET_MAGIC_NUM);
1686 static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
1690 for (i = 0; i < adev->num_ip_blocks; i++) {
1691 if (!adev->ip_blocks[i].status.valid)
1693 /* skip CG for VCE/UVD, it's handled specially */
1694 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1695 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1696 /* enable clockgating to save power */
1697 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1700 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1701 adev->ip_blocks[i].version->funcs->name, r);
1709 static int amdgpu_late_init(struct amdgpu_device *adev)
1713 for (i = 0; i < adev->num_ip_blocks; i++) {
1714 if (!adev->ip_blocks[i].status.valid)
1716 if (adev->ip_blocks[i].version->funcs->late_init) {
1717 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1719 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1720 adev->ip_blocks[i].version->funcs->name, r);
1723 adev->ip_blocks[i].status.late_initialized = true;
1727 mod_delayed_work(system_wq, &adev->late_init_work,
1728 msecs_to_jiffies(AMDGPU_RESUME_MS));
1730 amdgpu_fill_reset_magic(adev);
1735 static int amdgpu_fini(struct amdgpu_device *adev)
1739 /* need to disable SMC first */
1740 for (i = 0; i < adev->num_ip_blocks; i++) {
1741 if (!adev->ip_blocks[i].status.hw)
1743 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1744 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1745 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1746 AMD_CG_STATE_UNGATE);
1748 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1749 adev->ip_blocks[i].version->funcs->name, r);
1752 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1753 /* XXX handle errors */
1755 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1756 adev->ip_blocks[i].version->funcs->name, r);
1758 adev->ip_blocks[i].status.hw = false;
1763 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1764 if (!adev->ip_blocks[i].status.hw)
1766 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1767 amdgpu_wb_fini(adev);
1768 amdgpu_vram_scratch_fini(adev);
1771 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1772 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1773 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1774 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1775 AMD_CG_STATE_UNGATE);
1777 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1778 adev->ip_blocks[i].version->funcs->name, r);
1783 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1784 /* XXX handle errors */
1786 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1787 adev->ip_blocks[i].version->funcs->name, r);
1790 adev->ip_blocks[i].status.hw = false;
1793 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1794 if (!adev->ip_blocks[i].status.sw)
1796 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1797 /* XXX handle errors */
1799 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1800 adev->ip_blocks[i].version->funcs->name, r);
1802 adev->ip_blocks[i].status.sw = false;
1803 adev->ip_blocks[i].status.valid = false;
1806 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1807 if (!adev->ip_blocks[i].status.late_initialized)
1809 if (adev->ip_blocks[i].version->funcs->late_fini)
1810 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1811 adev->ip_blocks[i].status.late_initialized = false;
1814 if (amdgpu_sriov_vf(adev)) {
1815 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
1816 amdgpu_virt_release_full_gpu(adev, false);
1822 static void amdgpu_late_init_func_handler(struct work_struct *work)
1824 struct amdgpu_device *adev =
1825 container_of(work, struct amdgpu_device, late_init_work.work);
1826 amdgpu_late_set_cg_state(adev);
1829 int amdgpu_suspend(struct amdgpu_device *adev)
1833 if (amdgpu_sriov_vf(adev))
1834 amdgpu_virt_request_full_gpu(adev, false);
1836 /* ungate SMC block first */
1837 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1838 AMD_CG_STATE_UNGATE);
1840 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1843 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1844 if (!adev->ip_blocks[i].status.valid)
1846 /* ungate blocks so that suspend can properly shut them down */
1847 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1848 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1849 AMD_CG_STATE_UNGATE);
1851 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1852 adev->ip_blocks[i].version->funcs->name, r);
1855 /* XXX handle errors */
1856 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1857 /* XXX handle errors */
1859 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1860 adev->ip_blocks[i].version->funcs->name, r);
1864 if (amdgpu_sriov_vf(adev))
1865 amdgpu_virt_release_full_gpu(adev, false);
1870 static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
1874 static enum amd_ip_block_type ip_order[] = {
1875 AMD_IP_BLOCK_TYPE_GMC,
1876 AMD_IP_BLOCK_TYPE_COMMON,
1877 AMD_IP_BLOCK_TYPE_IH,
1880 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1882 struct amdgpu_ip_block *block;
1884 for (j = 0; j < adev->num_ip_blocks; j++) {
1885 block = &adev->ip_blocks[j];
1887 if (block->version->type != ip_order[i] ||
1888 !block->status.valid)
1891 r = block->version->funcs->hw_init(adev);
1892 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1899 static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
1903 static enum amd_ip_block_type ip_order[] = {
1904 AMD_IP_BLOCK_TYPE_SMC,
1905 AMD_IP_BLOCK_TYPE_DCE,
1906 AMD_IP_BLOCK_TYPE_GFX,
1907 AMD_IP_BLOCK_TYPE_SDMA,
1908 AMD_IP_BLOCK_TYPE_VCE,
1911 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1913 struct amdgpu_ip_block *block;
1915 for (j = 0; j < adev->num_ip_blocks; j++) {
1916 block = &adev->ip_blocks[j];
1918 if (block->version->type != ip_order[i] ||
1919 !block->status.valid)
1922 r = block->version->funcs->hw_init(adev);
1923 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
1930 static int amdgpu_resume_phase1(struct amdgpu_device *adev)
1934 for (i = 0; i < adev->num_ip_blocks; i++) {
1935 if (!adev->ip_blocks[i].status.valid)
1937 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1938 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1939 adev->ip_blocks[i].version->type ==
1940 AMD_IP_BLOCK_TYPE_IH) {
1941 r = adev->ip_blocks[i].version->funcs->resume(adev);
1943 DRM_ERROR("resume of IP block <%s> failed %d\n",
1944 adev->ip_blocks[i].version->funcs->name, r);
1953 static int amdgpu_resume_phase2(struct amdgpu_device *adev)
1957 for (i = 0; i < adev->num_ip_blocks; i++) {
1958 if (!adev->ip_blocks[i].status.valid)
1960 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1961 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
1962 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
1964 r = adev->ip_blocks[i].version->funcs->resume(adev);
1966 DRM_ERROR("resume of IP block <%s> failed %d\n",
1967 adev->ip_blocks[i].version->funcs->name, r);
1975 static int amdgpu_resume(struct amdgpu_device *adev)
1979 r = amdgpu_resume_phase1(adev);
1982 r = amdgpu_resume_phase2(adev);
1987 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1989 if (adev->is_atom_fw) {
1990 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
1991 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1993 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1994 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1999 * amdgpu_device_init - initialize the driver
2001 * @adev: amdgpu_device pointer
2002 * @pdev: drm dev pointer
2003 * @pdev: pci dev pointer
2004 * @flags: driver flags
2006 * Initializes the driver info and hw (all asics).
2007 * Returns 0 for success or an error on failure.
2008 * Called at driver startup.
2010 int amdgpu_device_init(struct amdgpu_device *adev,
2011 struct drm_device *ddev,
2012 struct pci_dev *pdev,
2016 bool runtime = false;
2019 adev->shutdown = false;
2020 adev->dev = &pdev->dev;
2023 adev->flags = flags;
2024 adev->asic_type = flags & AMD_ASIC_MASK;
2025 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
2026 adev->mc.gtt_size = 512 * 1024 * 1024;
2027 adev->accel_working = false;
2028 adev->num_rings = 0;
2029 adev->mman.buffer_funcs = NULL;
2030 adev->mman.buffer_funcs_ring = NULL;
2031 adev->vm_manager.vm_pte_funcs = NULL;
2032 adev->vm_manager.vm_pte_num_rings = 0;
2033 adev->gart.gart_funcs = NULL;
2034 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2036 adev->smc_rreg = &amdgpu_invalid_rreg;
2037 adev->smc_wreg = &amdgpu_invalid_wreg;
2038 adev->pcie_rreg = &amdgpu_invalid_rreg;
2039 adev->pcie_wreg = &amdgpu_invalid_wreg;
2040 adev->pciep_rreg = &amdgpu_invalid_rreg;
2041 adev->pciep_wreg = &amdgpu_invalid_wreg;
2042 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2043 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2044 adev->didt_rreg = &amdgpu_invalid_rreg;
2045 adev->didt_wreg = &amdgpu_invalid_wreg;
2046 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2047 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
2048 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2049 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2052 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2053 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2054 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
2056 /* mutex initialization are all done here so we
2057 * can recall function without having locking issues */
2058 atomic_set(&adev->irq.ih.lock, 0);
2059 mutex_init(&adev->firmware.mutex);
2060 mutex_init(&adev->pm.mutex);
2061 mutex_init(&adev->gfx.gpu_clock_mutex);
2062 mutex_init(&adev->srbm_mutex);
2063 mutex_init(&adev->grbm_idx_mutex);
2064 mutex_init(&adev->mn_lock);
2065 hash_init(adev->mn_hash);
2067 amdgpu_check_arguments(adev);
2069 spin_lock_init(&adev->mmio_idx_lock);
2070 spin_lock_init(&adev->smc_idx_lock);
2071 spin_lock_init(&adev->pcie_idx_lock);
2072 spin_lock_init(&adev->uvd_ctx_idx_lock);
2073 spin_lock_init(&adev->didt_idx_lock);
2074 spin_lock_init(&adev->gc_cac_idx_lock);
2075 spin_lock_init(&adev->audio_endpt_idx_lock);
2076 spin_lock_init(&adev->mm_stats.lock);
2078 INIT_LIST_HEAD(&adev->shadow_list);
2079 mutex_init(&adev->shadow_list_lock);
2081 INIT_LIST_HEAD(&adev->gtt_list);
2082 spin_lock_init(&adev->gtt_list_lock);
2084 INIT_LIST_HEAD(&adev->ring_lru_list);
2085 spin_lock_init(&adev->ring_lru_list_lock);
2087 INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
2089 /* Registers mapping */
2090 /* TODO: block userspace mapping of io register */
2091 if (adev->asic_type >= CHIP_BONAIRE) {
2092 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2093 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2095 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2096 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2099 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2100 if (adev->rmmio == NULL) {
2103 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2104 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2106 if (adev->asic_type >= CHIP_BONAIRE)
2107 /* doorbell bar mapping */
2108 amdgpu_doorbell_init(adev);
2110 /* io port mapping */
2111 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2112 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2113 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2114 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2118 if (adev->rio_mem == NULL)
2119 DRM_INFO("PCI I/O BAR is not found.\n");
2121 /* early init functions */
2122 r = amdgpu_early_init(adev);
2126 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2127 /* this will fail for cards that aren't VGA class devices, just
2129 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
2131 if (amdgpu_runtime_pm == 1)
2133 if (amdgpu_device_is_px(ddev))
2135 if (!pci_is_thunderbolt_attached(adev->pdev))
2136 vga_switcheroo_register_client(adev->pdev,
2137 &amdgpu_switcheroo_ops, runtime);
2139 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2142 if (!amdgpu_get_bios(adev)) {
2147 r = amdgpu_atombios_init(adev);
2149 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2153 /* detect if we are with an SRIOV vbios */
2154 amdgpu_device_detect_sriov_bios(adev);
2156 /* Post card if necessary */
2157 if (amdgpu_vpost_needed(adev)) {
2159 dev_err(adev->dev, "no vBIOS found\n");
2163 DRM_INFO("GPU posting now...\n");
2164 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2166 dev_err(adev->dev, "gpu post error!\n");
2170 DRM_INFO("GPU post is not needed\n");
2173 if (!adev->is_atom_fw) {
2174 /* Initialize clocks */
2175 r = amdgpu_atombios_get_clock_info(adev);
2177 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
2180 /* init i2c buses */
2181 amdgpu_atombios_i2c_init(adev);
2185 r = amdgpu_fence_driver_init(adev);
2187 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
2191 /* init the mode config */
2192 drm_mode_config_init(adev->ddev);
2194 r = amdgpu_init(adev);
2196 dev_err(adev->dev, "amdgpu_init failed\n");
2201 adev->accel_working = true;
2203 amdgpu_vm_check_compute_bug(adev);
2205 /* Initialize the buffer migration limit. */
2206 if (amdgpu_moverate >= 0)
2207 max_MBps = amdgpu_moverate;
2209 max_MBps = 8; /* Allow 8 MB/s. */
2210 /* Get a log2 for easy divisions. */
2211 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2213 r = amdgpu_ib_pool_init(adev);
2215 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2219 r = amdgpu_ib_ring_tests(adev);
2221 DRM_ERROR("ib ring test failed (%d).\n", r);
2223 amdgpu_fbdev_init(adev);
2225 r = amdgpu_gem_debugfs_init(adev);
2227 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
2229 r = amdgpu_debugfs_regs_init(adev);
2231 DRM_ERROR("registering register debugfs failed (%d).\n", r);
2233 r = amdgpu_debugfs_test_ib_ring_init(adev);
2235 DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
2237 r = amdgpu_debugfs_firmware_init(adev);
2239 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
2241 if ((amdgpu_testing & 1)) {
2242 if (adev->accel_working)
2243 amdgpu_test_moves(adev);
2245 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2247 if (amdgpu_benchmarking) {
2248 if (adev->accel_working)
2249 amdgpu_benchmark(adev, amdgpu_benchmarking);
2251 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2254 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2255 * explicit gating rather than handling it automatically.
2257 r = amdgpu_late_init(adev);
2259 dev_err(adev->dev, "amdgpu_late_init failed\n");
2267 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2272 * amdgpu_device_fini - tear down the driver
2274 * @adev: amdgpu_device pointer
2276 * Tear down the driver info (all asics).
2277 * Called at driver shutdown.
2279 void amdgpu_device_fini(struct amdgpu_device *adev)
2283 DRM_INFO("amdgpu: finishing device.\n");
2284 adev->shutdown = true;
2285 if (adev->mode_info.mode_config_initialized)
2286 drm_crtc_force_disable_all(adev->ddev);
2287 /* evict vram memory */
2288 amdgpu_bo_evict_vram(adev);
2289 amdgpu_ib_pool_fini(adev);
2290 amdgpu_fence_driver_fini(adev);
2291 amdgpu_fbdev_fini(adev);
2292 r = amdgpu_fini(adev);
2293 if (adev->firmware.gpu_info_fw) {
2294 release_firmware(adev->firmware.gpu_info_fw);
2295 adev->firmware.gpu_info_fw = NULL;
2297 adev->accel_working = false;
2298 cancel_delayed_work_sync(&adev->late_init_work);
2299 /* free i2c buses */
2300 amdgpu_i2c_fini(adev);
2301 amdgpu_atombios_fini(adev);
2304 if (!pci_is_thunderbolt_attached(adev->pdev))
2305 vga_switcheroo_unregister_client(adev->pdev);
2306 if (adev->flags & AMD_IS_PX)
2307 vga_switcheroo_fini_domain_pm_ops(adev->dev);
2308 vga_client_register(adev->pdev, NULL, NULL, NULL);
2310 pci_iounmap(adev->pdev, adev->rio_mem);
2311 adev->rio_mem = NULL;
2312 iounmap(adev->rmmio);
2314 if (adev->asic_type >= CHIP_BONAIRE)
2315 amdgpu_doorbell_fini(adev);
2316 amdgpu_debugfs_regs_cleanup(adev);
2324 * amdgpu_device_suspend - initiate device suspend
2326 * @pdev: drm dev pointer
2327 * @state: suspend state
2329 * Puts the hw in the suspend state (all asics).
2330 * Returns 0 for success or an error on failure.
2331 * Called at driver suspend.
2333 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2335 struct amdgpu_device *adev;
2336 struct drm_crtc *crtc;
2337 struct drm_connector *connector;
2340 if (dev == NULL || dev->dev_private == NULL) {
2344 adev = dev->dev_private;
2346 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2349 drm_kms_helper_poll_disable(dev);
2351 /* turn off display hw */
2352 drm_modeset_lock_all(dev);
2353 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2354 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2356 drm_modeset_unlock_all(dev);
2358 /* unpin the front buffers and cursors */
2359 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2360 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2361 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
2362 struct amdgpu_bo *robj;
2364 if (amdgpu_crtc->cursor_bo) {
2365 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2366 r = amdgpu_bo_reserve(aobj, true);
2368 amdgpu_bo_unpin(aobj);
2369 amdgpu_bo_unreserve(aobj);
2373 if (rfb == NULL || rfb->obj == NULL) {
2376 robj = gem_to_amdgpu_bo(rfb->obj);
2377 /* don't unpin kernel fb objects */
2378 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
2379 r = amdgpu_bo_reserve(robj, true);
2381 amdgpu_bo_unpin(robj);
2382 amdgpu_bo_unreserve(robj);
2386 /* evict vram memory */
2387 amdgpu_bo_evict_vram(adev);
2389 amdgpu_fence_driver_suspend(adev);
2391 r = amdgpu_suspend(adev);
2393 /* evict remaining vram memory
2394 * This second call to evict vram is to evict the gart page table
2397 amdgpu_bo_evict_vram(adev);
2399 if (adev->is_atom_fw)
2400 amdgpu_atomfirmware_scratch_regs_save(adev);
2402 amdgpu_atombios_scratch_regs_save(adev);
2403 pci_save_state(dev->pdev);
2405 /* Shut down the device */
2406 pci_disable_device(dev->pdev);
2407 pci_set_power_state(dev->pdev, PCI_D3hot);
2409 r = amdgpu_asic_reset(adev);
2411 DRM_ERROR("amdgpu asic reset failed\n");
2416 amdgpu_fbdev_set_suspend(adev, 1);
2423 * amdgpu_device_resume - initiate device resume
2425 * @pdev: drm dev pointer
2427 * Bring the hw back to operating state (all asics).
2428 * Returns 0 for success or an error on failure.
2429 * Called at driver resume.
2431 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2433 struct drm_connector *connector;
2434 struct amdgpu_device *adev = dev->dev_private;
2435 struct drm_crtc *crtc;
2438 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2445 pci_set_power_state(dev->pdev, PCI_D0);
2446 pci_restore_state(dev->pdev);
2447 r = pci_enable_device(dev->pdev);
2451 if (adev->is_atom_fw)
2452 amdgpu_atomfirmware_scratch_regs_restore(adev);
2454 amdgpu_atombios_scratch_regs_restore(adev);
2457 if (amdgpu_need_post(adev)) {
2458 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2460 DRM_ERROR("amdgpu asic init failed\n");
2463 r = amdgpu_resume(adev);
2465 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2468 amdgpu_fence_driver_resume(adev);
2471 r = amdgpu_ib_ring_tests(adev);
2473 DRM_ERROR("ib ring test failed (%d).\n", r);
2476 r = amdgpu_late_init(adev);
2481 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2482 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2484 if (amdgpu_crtc->cursor_bo) {
2485 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2486 r = amdgpu_bo_reserve(aobj, true);
2488 r = amdgpu_bo_pin(aobj,
2489 AMDGPU_GEM_DOMAIN_VRAM,
2490 &amdgpu_crtc->cursor_addr);
2492 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2493 amdgpu_bo_unreserve(aobj);
2498 /* blat the mode back in */
2500 drm_helper_resume_force_mode(dev);
2501 /* turn on display hw */
2502 drm_modeset_lock_all(dev);
2503 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2504 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2506 drm_modeset_unlock_all(dev);
2509 drm_kms_helper_poll_enable(dev);
2512 * Most of the connector probing functions try to acquire runtime pm
2513 * refs to ensure that the GPU is powered on when connector polling is
2514 * performed. Since we're calling this from a runtime PM callback,
2515 * trying to acquire rpm refs will cause us to deadlock.
2517 * Since we're guaranteed to be holding the rpm lock, it's safe to
2518 * temporarily disable the rpm helpers so this doesn't deadlock us.
2521 dev->dev->power.disable_depth++;
2523 drm_helper_hpd_irq_event(dev);
2525 dev->dev->power.disable_depth--;
2529 amdgpu_fbdev_set_suspend(adev, 0);
2538 static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2541 bool asic_hang = false;
2543 for (i = 0; i < adev->num_ip_blocks; i++) {
2544 if (!adev->ip_blocks[i].status.valid)
2546 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2547 adev->ip_blocks[i].status.hang =
2548 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2549 if (adev->ip_blocks[i].status.hang) {
2550 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2557 static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2561 for (i = 0; i < adev->num_ip_blocks; i++) {
2562 if (!adev->ip_blocks[i].status.valid)
2564 if (adev->ip_blocks[i].status.hang &&
2565 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2566 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2575 static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2579 for (i = 0; i < adev->num_ip_blocks; i++) {
2580 if (!adev->ip_blocks[i].status.valid)
2582 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2583 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2584 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2585 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2586 if (adev->ip_blocks[i].status.hang) {
2587 DRM_INFO("Some block need full reset!\n");
2595 static int amdgpu_soft_reset(struct amdgpu_device *adev)
2599 for (i = 0; i < adev->num_ip_blocks; i++) {
2600 if (!adev->ip_blocks[i].status.valid)
2602 if (adev->ip_blocks[i].status.hang &&
2603 adev->ip_blocks[i].version->funcs->soft_reset) {
2604 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2613 static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2617 for (i = 0; i < adev->num_ip_blocks; i++) {
2618 if (!adev->ip_blocks[i].status.valid)
2620 if (adev->ip_blocks[i].status.hang &&
2621 adev->ip_blocks[i].version->funcs->post_soft_reset)
2622 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2630 bool amdgpu_need_backup(struct amdgpu_device *adev)
2632 if (adev->flags & AMD_IS_APU)
2635 return amdgpu_lockup_timeout > 0 ? true : false;
2638 static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2639 struct amdgpu_ring *ring,
2640 struct amdgpu_bo *bo,
2641 struct dma_fence **fence)
2649 r = amdgpu_bo_reserve(bo, true);
2652 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2653 /* if bo has been evicted, then no need to recover */
2654 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2655 r = amdgpu_bo_validate(bo->shadow);
2657 DRM_ERROR("bo validate failed!\n");
2661 r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
2663 DRM_ERROR("%p bind failed\n", bo->shadow);
2667 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2670 DRM_ERROR("recover page table failed!\n");
2675 amdgpu_bo_unreserve(bo);
2680 * amdgpu_sriov_gpu_reset - reset the asic
2682 * @adev: amdgpu device pointer
2683 * @job: which job trigger hang
2685 * Attempt the reset the GPU if it has hung (all asics).
2687 * Returns 0 for success or an error on failure.
2689 int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
2693 struct amdgpu_bo *bo, *tmp;
2694 struct amdgpu_ring *ring;
2695 struct dma_fence *fence = NULL, *next = NULL;
2697 mutex_lock(&adev->virt.lock_reset);
2698 atomic_inc(&adev->gpu_reset_counter);
2699 adev->gfx.in_reset = true;
2702 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2704 /* we start from the ring trigger GPU hang */
2705 j = job ? job->ring->idx : 0;
2707 /* block scheduler */
2708 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2709 ring = adev->rings[i % AMDGPU_MAX_RINGS];
2710 if (!ring || !ring->sched.thread)
2713 kthread_park(ring->sched.thread);
2718 /* here give the last chance to check if job removed from mirror-list
2719 * since we already pay some time on kthread_park */
2720 if (job && list_empty(&job->base.node)) {
2721 kthread_unpark(ring->sched.thread);
2725 if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit))
2726 amd_sched_job_kickout(&job->base);
2728 /* only do job_reset on the hang ring if @job not NULL */
2729 amd_sched_hw_job_reset(&ring->sched);
2731 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2732 amdgpu_fence_driver_force_completion_ring(ring);
2735 /* request to take full control of GPU before re-initialization */
2737 amdgpu_virt_reset_gpu(adev);
2739 amdgpu_virt_request_full_gpu(adev, true);
2742 /* Resume IP prior to SMC */
2743 amdgpu_sriov_reinit_early(adev);
2745 /* we need recover gart prior to run SMC/CP/SDMA resume */
2746 amdgpu_ttm_recover_gart(adev);
2748 /* now we are okay to resume SMC/CP/SDMA */
2749 amdgpu_sriov_reinit_late(adev);
2751 amdgpu_irq_gpu_reset_resume_helper(adev);
2753 if (amdgpu_ib_ring_tests(adev))
2754 dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
2756 /* release full control of GPU after ib test */
2757 amdgpu_virt_release_full_gpu(adev, true);
2759 DRM_INFO("recover vram bo from shadow\n");
2761 ring = adev->mman.buffer_funcs_ring;
2762 mutex_lock(&adev->shadow_list_lock);
2763 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2765 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2767 r = dma_fence_wait(fence, false);
2769 WARN(r, "recovery from shadow isn't completed\n");
2774 dma_fence_put(fence);
2777 mutex_unlock(&adev->shadow_list_lock);
2780 r = dma_fence_wait(fence, false);
2782 WARN(r, "recovery from shadow isn't completed\n");
2784 dma_fence_put(fence);
2786 for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) {
2787 ring = adev->rings[i % AMDGPU_MAX_RINGS];
2788 if (!ring || !ring->sched.thread)
2791 if (job && j != i) {
2792 kthread_unpark(ring->sched.thread);
2796 amd_sched_job_recovery(&ring->sched);
2797 kthread_unpark(ring->sched.thread);
2800 drm_helper_resume_force_mode(adev->ddev);
2802 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2804 /* bad news, how to tell it to userspace ? */
2805 dev_info(adev->dev, "GPU reset failed\n");
2807 dev_info(adev->dev, "GPU reset successed!\n");
2810 adev->gfx.in_reset = false;
2811 mutex_unlock(&adev->virt.lock_reset);
2816 * amdgpu_gpu_reset - reset the asic
2818 * @adev: amdgpu device pointer
2820 * Attempt the reset the GPU if it has hung (all asics).
2821 * Returns 0 for success or an error on failure.
2823 int amdgpu_gpu_reset(struct amdgpu_device *adev)
2827 bool need_full_reset, vram_lost = false;
2829 if (!amdgpu_check_soft_reset(adev)) {
2830 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2834 atomic_inc(&adev->gpu_reset_counter);
2837 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2839 /* block scheduler */
2840 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2841 struct amdgpu_ring *ring = adev->rings[i];
2843 if (!ring || !ring->sched.thread)
2845 kthread_park(ring->sched.thread);
2846 amd_sched_hw_job_reset(&ring->sched);
2848 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2849 amdgpu_fence_driver_force_completion(adev);
2851 need_full_reset = amdgpu_need_full_reset(adev);
2853 if (!need_full_reset) {
2854 amdgpu_pre_soft_reset(adev);
2855 r = amdgpu_soft_reset(adev);
2856 amdgpu_post_soft_reset(adev);
2857 if (r || amdgpu_check_soft_reset(adev)) {
2858 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2859 need_full_reset = true;
2863 if (need_full_reset) {
2864 r = amdgpu_suspend(adev);
2867 /* Disable fb access */
2868 if (adev->mode_info.num_crtc) {
2869 struct amdgpu_mode_mc_save save;
2870 amdgpu_display_stop_mc_access(adev, &save);
2871 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2873 if (adev->is_atom_fw)
2874 amdgpu_atomfirmware_scratch_regs_save(adev);
2876 amdgpu_atombios_scratch_regs_save(adev);
2877 r = amdgpu_asic_reset(adev);
2878 if (adev->is_atom_fw)
2879 amdgpu_atomfirmware_scratch_regs_restore(adev);
2881 amdgpu_atombios_scratch_regs_restore(adev);
2883 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2886 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2887 r = amdgpu_resume_phase1(adev);
2890 vram_lost = amdgpu_check_vram_lost(adev);
2892 DRM_ERROR("VRAM is lost!\n");
2893 atomic_inc(&adev->vram_lost_counter);
2895 r = amdgpu_ttm_recover_gart(adev);
2898 r = amdgpu_resume_phase2(adev);
2902 amdgpu_fill_reset_magic(adev);
2907 amdgpu_irq_gpu_reset_resume_helper(adev);
2908 r = amdgpu_ib_ring_tests(adev);
2910 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2911 r = amdgpu_suspend(adev);
2912 need_full_reset = true;
2916 * recovery vm page tables, since we cannot depend on VRAM is
2917 * consistent after gpu full reset.
2919 if (need_full_reset && amdgpu_need_backup(adev)) {
2920 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2921 struct amdgpu_bo *bo, *tmp;
2922 struct dma_fence *fence = NULL, *next = NULL;
2924 DRM_INFO("recover vram bo from shadow\n");
2925 mutex_lock(&adev->shadow_list_lock);
2926 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2928 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2930 r = dma_fence_wait(fence, false);
2932 WARN(r, "recovery from shadow isn't completed\n");
2937 dma_fence_put(fence);
2940 mutex_unlock(&adev->shadow_list_lock);
2942 r = dma_fence_wait(fence, false);
2944 WARN(r, "recovery from shadow isn't completed\n");
2946 dma_fence_put(fence);
2948 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2949 struct amdgpu_ring *ring = adev->rings[i];
2951 if (!ring || !ring->sched.thread)
2954 amd_sched_job_recovery(&ring->sched);
2955 kthread_unpark(ring->sched.thread);
2958 dev_err(adev->dev, "asic resume failed (%d).\n", r);
2959 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2960 if (adev->rings[i] && adev->rings[i]->sched.thread) {
2961 kthread_unpark(adev->rings[i]->sched.thread);
2966 drm_helper_resume_force_mode(adev->ddev);
2968 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2970 /* bad news, how to tell it to userspace ? */
2971 dev_info(adev->dev, "GPU reset failed\n");
2973 dev_info(adev->dev, "GPU reset successed!\n");
2978 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2983 if (amdgpu_pcie_gen_cap)
2984 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2986 if (amdgpu_pcie_lane_cap)
2987 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2989 /* covers APUs as well */
2990 if (pci_is_root_bus(adev->pdev->bus)) {
2991 if (adev->pm.pcie_gen_mask == 0)
2992 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2993 if (adev->pm.pcie_mlw_mask == 0)
2994 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2998 if (adev->pm.pcie_gen_mask == 0) {
2999 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3001 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3002 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3003 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3005 if (mask & DRM_PCIE_SPEED_25)
3006 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3007 if (mask & DRM_PCIE_SPEED_50)
3008 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3009 if (mask & DRM_PCIE_SPEED_80)
3010 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3012 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3015 if (adev->pm.pcie_mlw_mask == 0) {
3016 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3020 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3021 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3022 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3023 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3024 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3025 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3026 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3029 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3030 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3031 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3032 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3033 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3034 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3037 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3038 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3039 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3040 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3041 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3044 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3045 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3046 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3047 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3050 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3051 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3052 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3055 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3056 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3059 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3065 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3073 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
3074 const struct drm_info_list *files,
3079 for (i = 0; i < adev->debugfs_count; i++) {
3080 if (adev->debugfs[i].files == files) {
3081 /* Already registered */
3086 i = adev->debugfs_count + 1;
3087 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
3088 DRM_ERROR("Reached maximum number of debugfs components.\n");
3089 DRM_ERROR("Report so we increase "
3090 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
3093 adev->debugfs[adev->debugfs_count].files = files;
3094 adev->debugfs[adev->debugfs_count].num_files = nfiles;
3095 adev->debugfs_count = i;
3096 #if defined(CONFIG_DEBUG_FS)
3097 drm_debugfs_create_files(files, nfiles,
3098 adev->ddev->primary->debugfs_root,
3099 adev->ddev->primary);
3104 #if defined(CONFIG_DEBUG_FS)
3106 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
3107 size_t size, loff_t *pos)
3109 struct amdgpu_device *adev = file_inode(f)->i_private;
3112 bool pm_pg_lock, use_bank;
3113 unsigned instance_bank, sh_bank, se_bank;
3115 if (size & 0x3 || *pos & 0x3)
3118 /* are we reading registers for which a PG lock is necessary? */
3119 pm_pg_lock = (*pos >> 23) & 1;
3121 if (*pos & (1ULL << 62)) {
3122 se_bank = (*pos >> 24) & 0x3FF;
3123 sh_bank = (*pos >> 34) & 0x3FF;
3124 instance_bank = (*pos >> 44) & 0x3FF;
3126 if (se_bank == 0x3FF)
3127 se_bank = 0xFFFFFFFF;
3128 if (sh_bank == 0x3FF)
3129 sh_bank = 0xFFFFFFFF;
3130 if (instance_bank == 0x3FF)
3131 instance_bank = 0xFFFFFFFF;
3137 *pos &= (1UL << 22) - 1;
3140 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3141 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3143 mutex_lock(&adev->grbm_idx_mutex);
3144 amdgpu_gfx_select_se_sh(adev, se_bank,
3145 sh_bank, instance_bank);
3149 mutex_lock(&adev->pm.mutex);
3154 if (*pos > adev->rmmio_size)
3157 value = RREG32(*pos >> 2);
3158 r = put_user(value, (uint32_t *)buf);
3172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3173 mutex_unlock(&adev->grbm_idx_mutex);
3177 mutex_unlock(&adev->pm.mutex);
3182 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
3183 size_t size, loff_t *pos)
3185 struct amdgpu_device *adev = file_inode(f)->i_private;
3188 bool pm_pg_lock, use_bank;
3189 unsigned instance_bank, sh_bank, se_bank;
3191 if (size & 0x3 || *pos & 0x3)
3194 /* are we reading registers for which a PG lock is necessary? */
3195 pm_pg_lock = (*pos >> 23) & 1;
3197 if (*pos & (1ULL << 62)) {
3198 se_bank = (*pos >> 24) & 0x3FF;
3199 sh_bank = (*pos >> 34) & 0x3FF;
3200 instance_bank = (*pos >> 44) & 0x3FF;
3202 if (se_bank == 0x3FF)
3203 se_bank = 0xFFFFFFFF;
3204 if (sh_bank == 0x3FF)
3205 sh_bank = 0xFFFFFFFF;
3206 if (instance_bank == 0x3FF)
3207 instance_bank = 0xFFFFFFFF;
3213 *pos &= (1UL << 22) - 1;
3216 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
3217 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
3219 mutex_lock(&adev->grbm_idx_mutex);
3220 amdgpu_gfx_select_se_sh(adev, se_bank,
3221 sh_bank, instance_bank);
3225 mutex_lock(&adev->pm.mutex);
3230 if (*pos > adev->rmmio_size)
3233 r = get_user(value, (uint32_t *)buf);
3237 WREG32(*pos >> 2, value);
3246 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3247 mutex_unlock(&adev->grbm_idx_mutex);
3251 mutex_unlock(&adev->pm.mutex);
3256 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
3257 size_t size, loff_t *pos)
3259 struct amdgpu_device *adev = file_inode(f)->i_private;
3263 if (size & 0x3 || *pos & 0x3)
3269 value = RREG32_PCIE(*pos >> 2);
3270 r = put_user(value, (uint32_t *)buf);
3283 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
3284 size_t size, loff_t *pos)
3286 struct amdgpu_device *adev = file_inode(f)->i_private;
3290 if (size & 0x3 || *pos & 0x3)
3296 r = get_user(value, (uint32_t *)buf);
3300 WREG32_PCIE(*pos >> 2, value);
3311 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
3312 size_t size, loff_t *pos)
3314 struct amdgpu_device *adev = file_inode(f)->i_private;
3318 if (size & 0x3 || *pos & 0x3)
3324 value = RREG32_DIDT(*pos >> 2);
3325 r = put_user(value, (uint32_t *)buf);
3338 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
3339 size_t size, loff_t *pos)
3341 struct amdgpu_device *adev = file_inode(f)->i_private;
3345 if (size & 0x3 || *pos & 0x3)
3351 r = get_user(value, (uint32_t *)buf);
3355 WREG32_DIDT(*pos >> 2, value);
3366 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
3367 size_t size, loff_t *pos)
3369 struct amdgpu_device *adev = file_inode(f)->i_private;
3373 if (size & 0x3 || *pos & 0x3)
3379 value = RREG32_SMC(*pos);
3380 r = put_user(value, (uint32_t *)buf);
3393 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
3394 size_t size, loff_t *pos)
3396 struct amdgpu_device *adev = file_inode(f)->i_private;
3400 if (size & 0x3 || *pos & 0x3)
3406 r = get_user(value, (uint32_t *)buf);
3410 WREG32_SMC(*pos, value);
3421 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
3422 size_t size, loff_t *pos)
3424 struct amdgpu_device *adev = file_inode(f)->i_private;
3427 uint32_t *config, no_regs = 0;
3429 if (size & 0x3 || *pos & 0x3)
3432 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
3436 /* version, increment each time something is added */
3437 config[no_regs++] = 3;
3438 config[no_regs++] = adev->gfx.config.max_shader_engines;
3439 config[no_regs++] = adev->gfx.config.max_tile_pipes;
3440 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
3441 config[no_regs++] = adev->gfx.config.max_sh_per_se;
3442 config[no_regs++] = adev->gfx.config.max_backends_per_se;
3443 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
3444 config[no_regs++] = adev->gfx.config.max_gprs;
3445 config[no_regs++] = adev->gfx.config.max_gs_threads;
3446 config[no_regs++] = adev->gfx.config.max_hw_contexts;
3447 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
3448 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
3449 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
3450 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
3451 config[no_regs++] = adev->gfx.config.num_tile_pipes;
3452 config[no_regs++] = adev->gfx.config.backend_enable_mask;
3453 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
3454 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
3455 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
3456 config[no_regs++] = adev->gfx.config.num_gpus;
3457 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
3458 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
3459 config[no_regs++] = adev->gfx.config.gb_addr_config;
3460 config[no_regs++] = adev->gfx.config.num_rbs;
3463 config[no_regs++] = adev->rev_id;
3464 config[no_regs++] = adev->pg_flags;
3465 config[no_regs++] = adev->cg_flags;
3468 config[no_regs++] = adev->family;
3469 config[no_regs++] = adev->external_rev_id;
3472 config[no_regs++] = adev->pdev->device;
3473 config[no_regs++] = adev->pdev->revision;
3474 config[no_regs++] = adev->pdev->subsystem_device;
3475 config[no_regs++] = adev->pdev->subsystem_vendor;
3477 while (size && (*pos < no_regs * 4)) {
3480 value = config[*pos >> 2];
3481 r = put_user(value, (uint32_t *)buf);
3497 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
3498 size_t size, loff_t *pos)
3500 struct amdgpu_device *adev = file_inode(f)->i_private;
3501 int idx, x, outsize, r, valuesize;
3502 uint32_t values[16];
3504 if (size & 3 || *pos & 0x3)
3507 if (amdgpu_dpm == 0)
3510 /* convert offset to sensor number */
3513 valuesize = sizeof(values);
3514 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
3515 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
3516 else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
3517 r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
3522 if (size > valuesize)
3529 r = put_user(values[x++], (int32_t *)buf);
3536 return !r ? outsize : r;
3539 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
3540 size_t size, loff_t *pos)
3542 struct amdgpu_device *adev = f->f_inode->i_private;
3545 uint32_t offset, se, sh, cu, wave, simd, data[32];
3547 if (size & 3 || *pos & 3)
3551 offset = (*pos & 0x7F);
3552 se = ((*pos >> 7) & 0xFF);
3553 sh = ((*pos >> 15) & 0xFF);
3554 cu = ((*pos >> 23) & 0xFF);
3555 wave = ((*pos >> 31) & 0xFF);
3556 simd = ((*pos >> 37) & 0xFF);
3558 /* switch to the specific se/sh/cu */
3559 mutex_lock(&adev->grbm_idx_mutex);
3560 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3563 if (adev->gfx.funcs->read_wave_data)
3564 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
3566 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3567 mutex_unlock(&adev->grbm_idx_mutex);
3572 while (size && (offset < x * 4)) {
3575 value = data[offset >> 2];
3576 r = put_user(value, (uint32_t *)buf);
3589 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
3590 size_t size, loff_t *pos)
3592 struct amdgpu_device *adev = f->f_inode->i_private;
3595 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
3597 if (size & 3 || *pos & 3)
3601 offset = (*pos & 0xFFF); /* in dwords */
3602 se = ((*pos >> 12) & 0xFF);
3603 sh = ((*pos >> 20) & 0xFF);
3604 cu = ((*pos >> 28) & 0xFF);
3605 wave = ((*pos >> 36) & 0xFF);
3606 simd = ((*pos >> 44) & 0xFF);
3607 thread = ((*pos >> 52) & 0xFF);
3608 bank = ((*pos >> 60) & 1);
3610 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3614 /* switch to the specific se/sh/cu */
3615 mutex_lock(&adev->grbm_idx_mutex);
3616 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3619 if (adev->gfx.funcs->read_wave_vgprs)
3620 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3622 if (adev->gfx.funcs->read_wave_sgprs)
3623 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3626 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3627 mutex_unlock(&adev->grbm_idx_mutex);
3632 value = data[offset++];
3633 r = put_user(value, (uint32_t *)buf);
3649 static const struct file_operations amdgpu_debugfs_regs_fops = {
3650 .owner = THIS_MODULE,
3651 .read = amdgpu_debugfs_regs_read,
3652 .write = amdgpu_debugfs_regs_write,
3653 .llseek = default_llseek
3655 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3656 .owner = THIS_MODULE,
3657 .read = amdgpu_debugfs_regs_didt_read,
3658 .write = amdgpu_debugfs_regs_didt_write,
3659 .llseek = default_llseek
3661 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3662 .owner = THIS_MODULE,
3663 .read = amdgpu_debugfs_regs_pcie_read,
3664 .write = amdgpu_debugfs_regs_pcie_write,
3665 .llseek = default_llseek
3667 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3668 .owner = THIS_MODULE,
3669 .read = amdgpu_debugfs_regs_smc_read,
3670 .write = amdgpu_debugfs_regs_smc_write,
3671 .llseek = default_llseek
3674 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3675 .owner = THIS_MODULE,
3676 .read = amdgpu_debugfs_gca_config_read,
3677 .llseek = default_llseek
3680 static const struct file_operations amdgpu_debugfs_sensors_fops = {
3681 .owner = THIS_MODULE,
3682 .read = amdgpu_debugfs_sensor_read,
3683 .llseek = default_llseek
3686 static const struct file_operations amdgpu_debugfs_wave_fops = {
3687 .owner = THIS_MODULE,
3688 .read = amdgpu_debugfs_wave_read,
3689 .llseek = default_llseek
3691 static const struct file_operations amdgpu_debugfs_gpr_fops = {
3692 .owner = THIS_MODULE,
3693 .read = amdgpu_debugfs_gpr_read,
3694 .llseek = default_llseek
3697 static const struct file_operations *debugfs_regs[] = {
3698 &amdgpu_debugfs_regs_fops,
3699 &amdgpu_debugfs_regs_didt_fops,
3700 &amdgpu_debugfs_regs_pcie_fops,
3701 &amdgpu_debugfs_regs_smc_fops,
3702 &amdgpu_debugfs_gca_config_fops,
3703 &amdgpu_debugfs_sensors_fops,
3704 &amdgpu_debugfs_wave_fops,
3705 &amdgpu_debugfs_gpr_fops,
3708 static const char *debugfs_regs_names[] = {
3713 "amdgpu_gca_config",
3719 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3721 struct drm_minor *minor = adev->ddev->primary;
3722 struct dentry *ent, *root = minor->debugfs_root;
3725 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3726 ent = debugfs_create_file(debugfs_regs_names[i],
3727 S_IFREG | S_IRUGO, root,
3728 adev, debugfs_regs[i]);
3730 for (j = 0; j < i; j++) {
3731 debugfs_remove(adev->debugfs_regs[i]);
3732 adev->debugfs_regs[i] = NULL;
3734 return PTR_ERR(ent);
3738 i_size_write(ent->d_inode, adev->rmmio_size);
3739 adev->debugfs_regs[i] = ent;
3745 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3749 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3750 if (adev->debugfs_regs[i]) {
3751 debugfs_remove(adev->debugfs_regs[i]);
3752 adev->debugfs_regs[i] = NULL;
3757 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
3759 struct drm_info_node *node = (struct drm_info_node *) m->private;
3760 struct drm_device *dev = node->minor->dev;
3761 struct amdgpu_device *adev = dev->dev_private;
3764 /* hold on the scheduler */
3765 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3766 struct amdgpu_ring *ring = adev->rings[i];
3768 if (!ring || !ring->sched.thread)
3770 kthread_park(ring->sched.thread);
3773 seq_printf(m, "run ib test:\n");
3774 r = amdgpu_ib_ring_tests(adev);
3776 seq_printf(m, "ib ring tests failed (%d).\n", r);
3778 seq_printf(m, "ib ring tests passed.\n");
3780 /* go on the scheduler */
3781 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3782 struct amdgpu_ring *ring = adev->rings[i];
3784 if (!ring || !ring->sched.thread)
3786 kthread_unpark(ring->sched.thread);
3792 static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
3793 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
3796 static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
3798 return amdgpu_debugfs_add_files(adev,
3799 amdgpu_debugfs_test_ib_ring_list, 1);
3802 int amdgpu_debugfs_init(struct drm_minor *minor)
3807 static int amdgpu_debugfs_test_ib_init(struct amdgpu_device *adev)
3811 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3815 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }