2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_dma_buf.h"
29 #include <linux/module.h>
30 #include <linux/dma-buf.h>
31 #include "amdgpu_xgmi.h"
33 static const unsigned int compute_vmid_bitmap = 0xFF00;
35 /* Total memory size in system memory and all GPU VRAM. Used to
36 * estimate worst case amount of memory to reserve for page tables
38 uint64_t amdgpu_amdkfd_total_mem_size;
40 int amdgpu_amdkfd_init(void)
46 amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
47 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
51 amdgpu_amdkfd_gpuvm_init_mem_limits();
59 void amdgpu_amdkfd_fini(void)
64 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
66 const struct kfd2kgd_calls *kfd2kgd;
68 switch (adev->asic_type) {
69 #ifdef CONFIG_DRM_AMDGPU_CIK
72 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
81 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
87 kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
90 dev_info(adev->dev, "kfd not supported on this ASIC\n");
94 adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
98 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
102 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
105 * @adev: amdgpu_device pointer
106 * @aperture_base: output returning doorbell aperture base physical address
107 * @aperture_size: output returning doorbell aperture size in bytes
108 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
110 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
111 * takes doorbells required for its own rings and reports the setup to amdkfd.
112 * amdgpu reserved doorbells are at the start of the doorbell aperture.
114 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
115 phys_addr_t *aperture_base,
116 size_t *aperture_size,
117 size_t *start_offset)
120 * The first num_doorbells are used by amdgpu.
121 * amdkfd takes whatever's left in the aperture.
123 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
124 *aperture_base = adev->doorbell.base;
125 *aperture_size = adev->doorbell.size;
126 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
134 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
140 struct kgd2kfd_shared_resources gpu_resources = {
141 .compute_vmid_bitmap = compute_vmid_bitmap,
142 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
143 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
144 .gpuvm_size = min(adev->vm_manager.max_pfn
145 << AMDGPU_GPU_PAGE_SHIFT,
146 AMDGPU_GMC_HOLE_START),
147 .drm_render_minor = adev->ddev->render->index,
148 .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
152 /* this is going to have a few of the MSBs set that we need to
155 bitmap_complement(gpu_resources.queue_bitmap,
156 adev->gfx.mec.queue_bitmap,
159 /* remove the KIQ bit as well */
160 if (adev->gfx.kiq.ring.sched.ready)
161 clear_bit(amdgpu_gfx_queue_to_bit(adev,
162 adev->gfx.kiq.ring.me - 1,
163 adev->gfx.kiq.ring.pipe,
164 adev->gfx.kiq.ring.queue),
165 gpu_resources.queue_bitmap);
167 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
168 * nbits is not compile time constant
170 last_valid_bit = 1 /* only first MEC can have compute queues */
171 * adev->gfx.mec.num_pipe_per_mec
172 * adev->gfx.mec.num_queue_per_pipe;
173 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
174 clear_bit(i, gpu_resources.queue_bitmap);
176 amdgpu_doorbell_get_kfd_info(adev,
177 &gpu_resources.doorbell_physical_address,
178 &gpu_resources.doorbell_aperture_size,
179 &gpu_resources.doorbell_start_offset);
181 /* Since SOC15, BIF starts to statically use the
182 * lower 12 bits of doorbell addresses for routing
183 * based on settings in registers like
184 * SDMA0_DOORBELL_RANGE etc..
185 * In order to route a doorbell to CP engine, the lower
186 * 12 bits of its address has to be outside the range
187 * set for SDMA, VCN, and IH blocks.
189 if (adev->asic_type >= CHIP_VEGA10) {
190 gpu_resources.non_cp_doorbells_start =
191 adev->doorbell_index.first_non_cp;
192 gpu_resources.non_cp_doorbells_end =
193 adev->doorbell_index.last_non_cp;
196 kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
200 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
203 kgd2kfd_device_exit(adev->kfd.dev);
204 adev->kfd.dev = NULL;
208 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
209 const void *ih_ring_entry)
212 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
215 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
218 kgd2kfd_suspend(adev->kfd.dev);
221 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
226 r = kgd2kfd_resume(adev->kfd.dev);
231 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
236 r = kgd2kfd_pre_reset(adev->kfd.dev);
241 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
246 r = kgd2kfd_post_reset(adev->kfd.dev);
251 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
253 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
255 if (amdgpu_device_should_recover_gpu(adev))
256 amdgpu_device_gpu_recover(adev, NULL);
259 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
260 void **mem_obj, uint64_t *gpu_addr,
261 void **cpu_ptr, bool mqd_gfx9)
263 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
264 struct amdgpu_bo *bo = NULL;
265 struct amdgpu_bo_param bp;
267 void *cpu_ptr_tmp = NULL;
269 memset(&bp, 0, sizeof(bp));
271 bp.byte_align = PAGE_SIZE;
272 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
273 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
274 bp.type = ttm_bo_type_kernel;
278 bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
280 r = amdgpu_bo_create(adev, &bp, &bo);
283 "failed to allocate BO for amdkfd (%d)\n", r);
288 r = amdgpu_bo_reserve(bo, true);
290 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
291 goto allocate_mem_reserve_bo_failed;
294 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
296 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
297 goto allocate_mem_pin_bo_failed;
300 r = amdgpu_ttm_alloc_gart(&bo->tbo);
302 dev_err(adev->dev, "%p bind failed\n", bo);
303 goto allocate_mem_kmap_bo_failed;
306 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
309 "(%d) failed to map bo to kernel for amdkfd\n", r);
310 goto allocate_mem_kmap_bo_failed;
314 *gpu_addr = amdgpu_bo_gpu_offset(bo);
315 *cpu_ptr = cpu_ptr_tmp;
317 amdgpu_bo_unreserve(bo);
321 allocate_mem_kmap_bo_failed:
323 allocate_mem_pin_bo_failed:
324 amdgpu_bo_unreserve(bo);
325 allocate_mem_reserve_bo_failed:
326 amdgpu_bo_unref(&bo);
331 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
333 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
335 amdgpu_bo_reserve(bo, true);
336 amdgpu_bo_kunmap(bo);
338 amdgpu_bo_unreserve(bo);
339 amdgpu_bo_unref(&(bo));
342 int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
345 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
346 struct amdgpu_bo *bo = NULL;
347 struct amdgpu_bo_param bp;
350 memset(&bp, 0, sizeof(bp));
353 bp.domain = AMDGPU_GEM_DOMAIN_GWS;
354 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
355 bp.type = ttm_bo_type_device;
358 r = amdgpu_bo_create(adev, &bp, &bo);
361 "failed to allocate gws BO for amdkfd (%d)\n", r);
369 void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
371 struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
373 amdgpu_bo_unref(&bo);
376 uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
377 enum kgd_engine_type type)
379 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
383 return adev->gfx.pfp_fw_version;
386 return adev->gfx.me_fw_version;
389 return adev->gfx.ce_fw_version;
391 case KGD_ENGINE_MEC1:
392 return adev->gfx.mec_fw_version;
394 case KGD_ENGINE_MEC2:
395 return adev->gfx.mec2_fw_version;
398 return adev->gfx.rlc_fw_version;
400 case KGD_ENGINE_SDMA1:
401 return adev->sdma.instance[0].fw_version;
403 case KGD_ENGINE_SDMA2:
404 return adev->sdma.instance[1].fw_version;
413 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
414 struct kfd_local_mem_info *mem_info)
416 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
417 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
419 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
421 memset(mem_info, 0, sizeof(*mem_info));
422 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
423 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
424 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
425 adev->gmc.visible_vram_size;
427 mem_info->local_mem_size_public = 0;
428 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
430 mem_info->vram_width = adev->gmc.vram_width;
432 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
433 &adev->gmc.aper_base, &aper_limit,
434 mem_info->local_mem_size_public,
435 mem_info->local_mem_size_private);
437 if (amdgpu_sriov_vf(adev))
438 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
439 else if (adev->powerplay.pp_funcs)
440 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
442 mem_info->mem_clk_max = 100;
445 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
447 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
449 if (adev->gfx.funcs->get_gpu_clock_counter)
450 return adev->gfx.funcs->get_gpu_clock_counter(adev);
454 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
456 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
458 /* the sclk is in quantas of 10kHz */
459 if (amdgpu_sriov_vf(adev))
460 return adev->clock.default_sclk / 100;
461 else if (adev->powerplay.pp_funcs)
462 return amdgpu_dpm_get_sclk(adev, false) / 100;
467 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
469 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
470 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
472 memset(cu_info, 0, sizeof(*cu_info));
473 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
476 cu_info->cu_active_number = acu_info.number;
477 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
478 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
479 sizeof(acu_info.bitmap));
480 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
481 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
482 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
483 cu_info->simd_per_cu = acu_info.simd_per_cu;
484 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
485 cu_info->wave_front_size = acu_info.wave_front_size;
486 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
487 cu_info->lds_size = acu_info.lds_size;
490 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
491 struct kgd_dev **dma_buf_kgd,
492 uint64_t *bo_size, void *metadata_buffer,
493 size_t buffer_size, uint32_t *metadata_size,
496 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
497 struct dma_buf *dma_buf;
498 struct drm_gem_object *obj;
499 struct amdgpu_bo *bo;
500 uint64_t metadata_flags;
503 dma_buf = dma_buf_get(dma_buf_fd);
505 return PTR_ERR(dma_buf);
507 if (dma_buf->ops != &amdgpu_dmabuf_ops)
508 /* Can't handle non-graphics buffers */
512 if (obj->dev->driver != adev->ddev->driver)
513 /* Can't handle buffers from different drivers */
516 adev = obj->dev->dev_private;
517 bo = gem_to_amdgpu_bo(obj);
518 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
519 AMDGPU_GEM_DOMAIN_GTT)))
520 /* Only VRAM and GTT BOs are supported */
525 *dma_buf_kgd = (struct kgd_dev *)adev;
527 *bo_size = amdgpu_bo_size(bo);
529 *metadata_size = bo->metadata_size;
531 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
532 metadata_size, &metadata_flags);
534 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
535 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
537 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
538 *flags |= ALLOC_MEM_FLAGS_PUBLIC;
542 dma_buf_put(dma_buf);
546 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
548 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
550 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
553 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
555 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
557 return adev->gmc.xgmi.hive_id;
559 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
561 struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
562 struct amdgpu_device *adev = (struct amdgpu_device *)dst;
563 int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
566 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
567 adev->gmc.xgmi.physical_node_id,
568 peer_adev->gmc.xgmi.physical_node_id, ret);
574 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
576 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
578 return adev->rmmio_remap.bus_addr;
581 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
583 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
585 return adev->gds.gws_size;
588 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
589 uint32_t vmid, uint64_t gpu_addr,
590 uint32_t *ib_cmd, uint32_t ib_len)
592 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
593 struct amdgpu_job *job;
594 struct amdgpu_ib *ib;
595 struct amdgpu_ring *ring;
596 struct dma_fence *f = NULL;
600 case KGD_ENGINE_MEC1:
601 ring = &adev->gfx.compute_ring[0];
603 case KGD_ENGINE_SDMA1:
604 ring = &adev->sdma.instance[0].ring;
606 case KGD_ENGINE_SDMA2:
607 ring = &adev->sdma.instance[1].ring;
610 pr_err("Invalid engine in IB submission: %d\n", engine);
615 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
620 memset(ib, 0, sizeof(struct amdgpu_ib));
622 ib->gpu_addr = gpu_addr;
624 ib->length_dw = ib_len;
625 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
628 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
630 DRM_ERROR("amdgpu: failed to schedule IB.\n");
634 ret = dma_fence_wait(f, false);
638 amdgpu_job_free(job);
643 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
645 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
647 if (adev->powerplay.pp_funcs &&
648 adev->powerplay.pp_funcs->switch_power_profile)
649 amdgpu_dpm_switch_power_profile(adev,
650 PP_SMC_POWER_PROFILE_COMPUTE,
654 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
657 if ((1 << vmid) & compute_vmid_bitmap)
664 #ifndef CONFIG_HSA_AMD
665 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
670 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
674 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
675 struct amdgpu_vm *vm)
679 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
684 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
689 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
694 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
699 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
704 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
705 const struct kfd2kgd_calls *f2g)
710 bool kgd2kfd_device_init(struct kfd_dev *kfd,
711 const struct kgd2kfd_shared_resources *gpu_resources)
716 void kgd2kfd_device_exit(struct kfd_dev *kfd)
720 void kgd2kfd_exit(void)
724 void kgd2kfd_suspend(struct kfd_dev *kfd)
728 int kgd2kfd_resume(struct kfd_dev *kfd)
733 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
738 int kgd2kfd_post_reset(struct kfd_dev *kfd)
743 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
747 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)