2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
28 #include <drm/amdgpu_drm.h>
30 #include "cgs_linux.h"
33 struct amdgpu_cgs_device {
34 struct cgs_device base;
35 struct amdgpu_device *adev;
38 #define CGS_FUNC_ADEV \
39 struct amdgpu_device *adev = \
40 ((struct amdgpu_cgs_device *)cgs_device)->adev
42 static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
43 uint64_t *mc_start, uint64_t *mc_size,
48 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
49 case CGS_GPU_MEM_TYPE__VISIBLE_FB:
51 *mc_size = adev->mc.visible_vram_size;
52 *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
54 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
55 case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
56 *mc_start = adev->mc.visible_vram_size;
57 *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
60 case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
61 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
62 *mc_start = adev->mc.gtt_start;
63 *mc_size = adev->mc.gtt_size;
64 *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
73 static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
75 uint64_t min_offset, uint64_t max_offset,
76 cgs_handle_t *kmem_handle, uint64_t *mcaddr)
81 struct page *kmem_page = vmalloc_to_page(kmem);
82 int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
84 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
85 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
86 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
89 ret = amdgpu_bo_reserve(bo, false);
90 if (unlikely(ret != 0))
93 /* pin buffer into GTT */
94 ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
95 min_offset, max_offset, mcaddr);
96 amdgpu_bo_unreserve(bo);
98 *kmem_handle = (cgs_handle_t)bo;
102 static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
104 struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
107 int r = amdgpu_bo_reserve(obj, false);
108 if (likely(r == 0)) {
109 amdgpu_bo_unpin(obj);
110 amdgpu_bo_unreserve(obj);
112 amdgpu_bo_unref(&obj);
118 static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
119 enum cgs_gpu_mem_type type,
120 uint64_t size, uint64_t align,
121 uint64_t min_offset, uint64_t max_offset,
122 cgs_handle_t *handle)
128 struct amdgpu_bo *obj;
129 struct ttm_placement placement;
130 struct ttm_place place;
132 if (min_offset > max_offset) {
137 /* fail if the alignment is not a power of 2 */
138 if (((align != 1) && (align & (align - 1)))
139 || size == 0 || align == 0)
144 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
145 case CGS_GPU_MEM_TYPE__VISIBLE_FB:
146 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
147 domain = AMDGPU_GEM_DOMAIN_VRAM;
148 if (max_offset > adev->mc.real_vram_size)
150 place.fpfn = min_offset >> PAGE_SHIFT;
151 place.lpfn = max_offset >> PAGE_SHIFT;
152 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
155 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
156 case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
157 flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
158 domain = AMDGPU_GEM_DOMAIN_VRAM;
159 if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
161 max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
163 min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
164 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
169 case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
170 domain = AMDGPU_GEM_DOMAIN_GTT;
171 place.fpfn = min_offset >> PAGE_SHIFT;
172 place.lpfn = max_offset >> PAGE_SHIFT;
173 place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
175 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
176 flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
177 domain = AMDGPU_GEM_DOMAIN_GTT;
178 place.fpfn = min_offset >> PAGE_SHIFT;
179 place.lpfn = max_offset >> PAGE_SHIFT;
180 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
181 TTM_PL_FLAG_UNCACHED;
190 placement.placement = &place;
191 placement.num_placement = 1;
192 placement.busy_placement = &place;
193 placement.num_busy_placement = 1;
195 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
197 NULL, &placement, &obj);
199 DRM_ERROR("(%d) bo create failed\n", ret);
202 *handle = (cgs_handle_t)obj;
207 static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
208 cgs_handle_t *handle)
213 struct drm_gem_object *obj;
214 struct amdgpu_bo *bo;
215 struct drm_device *dev = adev->ddev;
216 struct drm_file *file_priv = NULL, *priv;
218 mutex_lock(&dev->struct_mutex);
219 list_for_each_entry(priv, &dev->filelist, lhead) {
221 if (priv->pid == get_pid(task_pid(current)))
227 mutex_unlock(&dev->struct_mutex);
228 r = dev->driver->prime_fd_to_handle(dev,
229 file_priv, dmabuf_fd,
231 spin_lock(&file_priv->table_lock);
233 /* Check if we currently have a reference on the object */
234 obj = idr_find(&file_priv->object_idr, dma_handle);
236 spin_unlock(&file_priv->table_lock);
239 spin_unlock(&file_priv->table_lock);
240 bo = gem_to_amdgpu_bo(obj);
241 *handle = (cgs_handle_t)bo;
245 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
247 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
250 int r = amdgpu_bo_reserve(obj, false);
251 if (likely(r == 0)) {
252 amdgpu_bo_kunmap(obj);
253 amdgpu_bo_unpin(obj);
254 amdgpu_bo_unreserve(obj);
256 amdgpu_bo_unref(&obj);
262 static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
266 u64 min_offset, max_offset;
267 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
269 WARN_ON_ONCE(obj->placement.num_placement > 1);
271 min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
272 max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
274 r = amdgpu_bo_reserve(obj, false);
275 if (unlikely(r != 0))
277 r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
278 min_offset, max_offset, mcaddr);
279 amdgpu_bo_unreserve(obj);
283 static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
286 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
287 r = amdgpu_bo_reserve(obj, false);
288 if (unlikely(r != 0))
290 r = amdgpu_bo_unpin(obj);
291 amdgpu_bo_unreserve(obj);
295 static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
299 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
300 r = amdgpu_bo_reserve(obj, false);
301 if (unlikely(r != 0))
303 r = amdgpu_bo_kmap(obj, map);
304 amdgpu_bo_unreserve(obj);
308 static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
311 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
312 r = amdgpu_bo_reserve(obj, false);
313 if (unlikely(r != 0))
315 amdgpu_bo_kunmap(obj);
316 amdgpu_bo_unreserve(obj);
320 static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
323 return RREG32(offset);
326 static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
330 WREG32(offset, value);
333 static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
334 enum cgs_ind_reg space,
339 case CGS_IND_REG__MMIO:
340 return RREG32_IDX(index);
341 case CGS_IND_REG__PCIE:
342 return RREG32_PCIE(index);
343 case CGS_IND_REG__SMC:
344 return RREG32_SMC(index);
345 case CGS_IND_REG__UVD_CTX:
346 return RREG32_UVD_CTX(index);
347 case CGS_IND_REG__DIDT:
348 return RREG32_DIDT(index);
349 case CGS_IND_REG__AUDIO_ENDPT:
350 DRM_ERROR("audio endpt register access not implemented.\n");
353 WARN(1, "Invalid indirect register space");
357 static void amdgpu_cgs_write_ind_register(void *cgs_device,
358 enum cgs_ind_reg space,
359 unsigned index, uint32_t value)
363 case CGS_IND_REG__MMIO:
364 return WREG32_IDX(index, value);
365 case CGS_IND_REG__PCIE:
366 return WREG32_PCIE(index, value);
367 case CGS_IND_REG__SMC:
368 return WREG32_SMC(index, value);
369 case CGS_IND_REG__UVD_CTX:
370 return WREG32_UVD_CTX(index, value);
371 case CGS_IND_REG__DIDT:
372 return WREG32_DIDT(index, value);
373 case CGS_IND_REG__AUDIO_ENDPT:
374 DRM_ERROR("audio endpt register access not implemented.\n");
377 WARN(1, "Invalid indirect register space");
380 static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
384 int ret = pci_read_config_byte(adev->pdev, addr, &val);
385 if (WARN(ret, "pci_read_config_byte error"))
390 static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
394 int ret = pci_read_config_word(adev->pdev, addr, &val);
395 if (WARN(ret, "pci_read_config_word error"))
400 static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
405 int ret = pci_read_config_dword(adev->pdev, addr, &val);
406 if (WARN(ret, "pci_read_config_dword error"))
411 static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
415 int ret = pci_write_config_byte(adev->pdev, addr, value);
416 WARN(ret, "pci_write_config_byte error");
419 static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
423 int ret = pci_write_config_word(adev->pdev, addr, value);
424 WARN(ret, "pci_write_config_word error");
427 static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
431 int ret = pci_write_config_dword(adev->pdev, addr, value);
432 WARN(ret, "pci_write_config_dword error");
435 static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
436 unsigned table, uint16_t *size,
437 uint8_t *frev, uint8_t *crev)
442 if (amdgpu_atom_parse_data_header(
443 adev->mode_info.atom_context, table, size,
444 frev, crev, &data_start))
445 return (uint8_t*)adev->mode_info.atom_context->bios +
451 static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
452 uint8_t *frev, uint8_t *crev)
456 if (amdgpu_atom_parse_cmd_header(
457 adev->mode_info.atom_context, table,
464 static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
469 return amdgpu_atom_execute_table(
470 adev->mode_info.atom_context, table, args);
473 static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
479 static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
485 static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
492 static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
493 enum cgs_clock clock, unsigned freq)
499 static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
500 enum cgs_engine engine, int powered)
508 static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
509 enum cgs_clock clock,
510 struct cgs_clock_limits *limits)
516 static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
517 const uint32_t *voltages)
519 DRM_ERROR("not implemented");
523 struct cgs_irq_params {
525 cgs_irq_source_set_func_t set;
526 cgs_irq_handler_func_t handler;
530 static int cgs_set_irq_state(struct amdgpu_device *adev,
531 struct amdgpu_irq_src *src,
533 enum amdgpu_interrupt_state state)
535 struct cgs_irq_params *irq_params =
536 (struct cgs_irq_params *)src->data;
539 if (!irq_params->set)
541 return irq_params->set(irq_params->private_data,
547 static int cgs_process_irq(struct amdgpu_device *adev,
548 struct amdgpu_irq_src *source,
549 struct amdgpu_iv_entry *entry)
551 struct cgs_irq_params *irq_params =
552 (struct cgs_irq_params *)source->data;
555 if (!irq_params->handler)
557 return irq_params->handler(irq_params->private_data,
562 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
563 .set = cgs_set_irq_state,
564 .process = cgs_process_irq,
567 static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
569 cgs_irq_source_set_func_t set,
570 cgs_irq_handler_func_t handler,
575 struct cgs_irq_params *irq_params;
576 struct amdgpu_irq_src *source =
577 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
581 kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
586 source->num_types = num_types;
587 source->funcs = &cgs_irq_funcs;
588 irq_params->src_id = src_id;
589 irq_params->set = set;
590 irq_params->handler = handler;
591 irq_params->private_data = private_data;
592 source->data = (void *)irq_params;
593 ret = amdgpu_irq_add_id(adev, src_id, source);
602 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
605 return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
608 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
611 return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
614 static const struct cgs_ops amdgpu_cgs_ops = {
615 amdgpu_cgs_gpu_mem_info,
616 amdgpu_cgs_gmap_kmem,
617 amdgpu_cgs_gunmap_kmem,
618 amdgpu_cgs_alloc_gpu_mem,
619 amdgpu_cgs_free_gpu_mem,
620 amdgpu_cgs_gmap_gpu_mem,
621 amdgpu_cgs_gunmap_gpu_mem,
622 amdgpu_cgs_kmap_gpu_mem,
623 amdgpu_cgs_kunmap_gpu_mem,
624 amdgpu_cgs_read_register,
625 amdgpu_cgs_write_register,
626 amdgpu_cgs_read_ind_register,
627 amdgpu_cgs_write_ind_register,
628 amdgpu_cgs_read_pci_config_byte,
629 amdgpu_cgs_read_pci_config_word,
630 amdgpu_cgs_read_pci_config_dword,
631 amdgpu_cgs_write_pci_config_byte,
632 amdgpu_cgs_write_pci_config_word,
633 amdgpu_cgs_write_pci_config_dword,
634 amdgpu_cgs_atom_get_data_table,
635 amdgpu_cgs_atom_get_cmd_table_revs,
636 amdgpu_cgs_atom_exec_cmd_table,
637 amdgpu_cgs_create_pm_request,
638 amdgpu_cgs_destroy_pm_request,
639 amdgpu_cgs_set_pm_request,
640 amdgpu_cgs_pm_request_clock,
641 amdgpu_cgs_pm_request_engine,
642 amdgpu_cgs_pm_query_clock_limits,
643 amdgpu_cgs_set_camera_voltages
646 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
647 amdgpu_cgs_import_gpu_mem,
648 amdgpu_cgs_add_irq_source,
653 void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
655 struct amdgpu_cgs_device *cgs_device =
656 kmalloc(sizeof(*cgs_device), GFP_KERNEL);
659 DRM_ERROR("Couldn't allocate CGS device structure\n");
663 cgs_device->base.ops = &amdgpu_cgs_ops;
664 cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
665 cgs_device->adev = adev;
670 void amdgpu_cgs_destroy_device(void *cgs_device)