2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
31 #include <linux/atomic.h>
32 #include <linux/wait.h>
33 #include <linux/list.h>
34 #include <linux/kref.h>
35 #include <linux/rbtree.h>
36 #include <linux/hashtable.h>
37 #include <linux/dma-fence.h>
39 #include <drm/ttm/ttm_bo_api.h>
40 #include <drm/ttm/ttm_bo_driver.h>
41 #include <drm/ttm/ttm_placement.h>
42 #include <drm/ttm/ttm_module.h>
43 #include <drm/ttm/ttm_execbuf_util.h>
46 #include <drm/drm_gem.h>
47 #include <drm/amdgpu_drm.h>
48 #include <drm/gpu_scheduler.h>
50 #include <kgd_kfd_interface.h>
51 #include "dm_pp_interface.h"
52 #include "kgd_pp_interface.h"
54 #include "amd_shared.h"
55 #include "amdgpu_mode.h"
56 #include "amdgpu_ih.h"
57 #include "amdgpu_irq.h"
58 #include "amdgpu_ucode.h"
59 #include "amdgpu_ttm.h"
60 #include "amdgpu_psp.h"
61 #include "amdgpu_gds.h"
62 #include "amdgpu_sync.h"
63 #include "amdgpu_ring.h"
64 #include "amdgpu_vm.h"
65 #include "amdgpu_dpm.h"
66 #include "amdgpu_acp.h"
67 #include "amdgpu_uvd.h"
68 #include "amdgpu_vce.h"
69 #include "amdgpu_vcn.h"
70 #include "amdgpu_mn.h"
71 #include "amdgpu_gmc.h"
72 #include "amdgpu_dm.h"
73 #include "amdgpu_virt.h"
74 #include "amdgpu_gart.h"
75 #include "amdgpu_debugfs.h"
80 extern int amdgpu_modeset;
81 extern int amdgpu_vram_limit;
82 extern int amdgpu_vis_vram_limit;
83 extern int amdgpu_gart_size;
84 extern int amdgpu_gtt_size;
85 extern int amdgpu_moverate;
86 extern int amdgpu_benchmarking;
87 extern int amdgpu_testing;
88 extern int amdgpu_audio;
89 extern int amdgpu_disp_priority;
90 extern int amdgpu_hw_i2c;
91 extern int amdgpu_pcie_gen2;
92 extern int amdgpu_msi;
93 extern int amdgpu_lockup_timeout;
94 extern int amdgpu_dpm;
95 extern int amdgpu_fw_load_type;
96 extern int amdgpu_aspm;
97 extern int amdgpu_runtime_pm;
98 extern uint amdgpu_ip_block_mask;
99 extern int amdgpu_bapm;
100 extern int amdgpu_deep_color;
101 extern int amdgpu_vm_size;
102 extern int amdgpu_vm_block_size;
103 extern int amdgpu_vm_fragment_size;
104 extern int amdgpu_vm_fault_stop;
105 extern int amdgpu_vm_debug;
106 extern int amdgpu_vm_update_mode;
107 extern int amdgpu_dc;
108 extern int amdgpu_dc_log;
109 extern int amdgpu_sched_jobs;
110 extern int amdgpu_sched_hw_submission;
111 extern int amdgpu_no_evict;
112 extern int amdgpu_direct_gma_size;
113 extern uint amdgpu_pcie_gen_cap;
114 extern uint amdgpu_pcie_lane_cap;
115 extern uint amdgpu_cg_mask;
116 extern uint amdgpu_pg_mask;
117 extern uint amdgpu_sdma_phase_quantum;
118 extern char *amdgpu_disable_cu;
119 extern char *amdgpu_virtual_display;
120 extern uint amdgpu_pp_feature_mask;
121 extern int amdgpu_vram_page_split;
122 extern int amdgpu_ngg;
123 extern int amdgpu_prim_buf_per_se;
124 extern int amdgpu_pos_buf_per_se;
125 extern int amdgpu_cntl_sb_buf_per_se;
126 extern int amdgpu_param_buf_per_se;
127 extern int amdgpu_job_hang_limit;
128 extern int amdgpu_lbpw;
129 extern int amdgpu_compute_multipipe;
130 extern int amdgpu_gpu_recovery;
131 extern int amdgpu_emu_mode;
132 extern uint amdgpu_smu_memory_pool_size;
134 #ifdef CONFIG_DRM_AMDGPU_SI
135 extern int amdgpu_si_support;
137 #ifdef CONFIG_DRM_AMDGPU_CIK
138 extern int amdgpu_cik_support;
141 #define AMDGPU_SG_THRESHOLD (256*1024*1024)
142 #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
143 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
144 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
145 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
146 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
147 #define AMDGPU_IB_POOL_SIZE 16
148 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
149 #define AMDGPUFB_CONN_LIMIT 4
150 #define AMDGPU_BIOS_NUM_SCRATCH 16
152 /* max number of IP instances */
153 #define AMDGPU_MAX_SDMA_INSTANCES 2
155 /* hard reset data */
156 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
159 #define AMDGPU_RESET_GFX (1 << 0)
160 #define AMDGPU_RESET_COMPUTE (1 << 1)
161 #define AMDGPU_RESET_DMA (1 << 2)
162 #define AMDGPU_RESET_CP (1 << 3)
163 #define AMDGPU_RESET_GRBM (1 << 4)
164 #define AMDGPU_RESET_DMA1 (1 << 5)
165 #define AMDGPU_RESET_RLC (1 << 6)
166 #define AMDGPU_RESET_SEM (1 << 7)
167 #define AMDGPU_RESET_IH (1 << 8)
168 #define AMDGPU_RESET_VMC (1 << 9)
169 #define AMDGPU_RESET_MC (1 << 10)
170 #define AMDGPU_RESET_DISPLAY (1 << 11)
171 #define AMDGPU_RESET_UVD (1 << 12)
172 #define AMDGPU_RESET_VCE (1 << 13)
173 #define AMDGPU_RESET_VCE1 (1 << 14)
175 /* GFX current status */
176 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
177 #define AMDGPU_GFX_SAFE_MODE 0x00000001L
178 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
179 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
180 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
182 /* max cursor sizes (in pixels) */
183 #define CIK_CURSOR_WIDTH 128
184 #define CIK_CURSOR_HEIGHT 128
186 struct amdgpu_device;
188 struct amdgpu_cs_parser;
190 struct amdgpu_irq_src;
192 struct amdgpu_bo_va_mapping;
196 AMDGPU_CP_IRQ_GFX_EOP = 0,
197 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
198 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
199 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
200 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
201 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
202 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
203 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
204 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
209 enum amdgpu_sdma_irq {
210 AMDGPU_SDMA_IRQ_TRAP0 = 0,
211 AMDGPU_SDMA_IRQ_TRAP1,
216 enum amdgpu_thermal_irq {
217 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
218 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
220 AMDGPU_THERMAL_IRQ_LAST
223 enum amdgpu_kiq_irq {
224 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
225 AMDGPU_CP_KIQ_IRQ_LAST
228 int amdgpu_device_ip_set_clockgating_state(void *dev,
229 enum amd_ip_block_type block_type,
230 enum amd_clockgating_state state);
231 int amdgpu_device_ip_set_powergating_state(void *dev,
232 enum amd_ip_block_type block_type,
233 enum amd_powergating_state state);
234 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
236 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
237 enum amd_ip_block_type block_type);
238 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
239 enum amd_ip_block_type block_type);
241 #define AMDGPU_MAX_IP_NUM 16
243 struct amdgpu_ip_block_status {
247 bool late_initialized;
251 struct amdgpu_ip_block_version {
252 const enum amd_ip_block_type type;
256 const struct amd_ip_funcs *funcs;
259 struct amdgpu_ip_block {
260 struct amdgpu_ip_block_status status;
261 const struct amdgpu_ip_block_version *version;
264 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
265 enum amd_ip_block_type type,
266 u32 major, u32 minor);
268 struct amdgpu_ip_block *
269 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
270 enum amd_ip_block_type type);
272 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
273 const struct amdgpu_ip_block_version *ip_block_version);
275 /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
276 struct amdgpu_buffer_funcs {
277 /* maximum bytes in a single operation */
278 uint32_t copy_max_bytes;
280 /* number of dw to reserve per operation */
281 unsigned copy_num_dw;
283 /* used for buffer migration */
284 void (*emit_copy_buffer)(struct amdgpu_ib *ib,
285 /* src addr in bytes */
287 /* dst addr in bytes */
289 /* number of byte to transfer */
290 uint32_t byte_count);
292 /* maximum bytes in a single operation */
293 uint32_t fill_max_bytes;
295 /* number of dw to reserve per operation */
296 unsigned fill_num_dw;
298 /* used for buffer clearing */
299 void (*emit_fill_buffer)(struct amdgpu_ib *ib,
300 /* value to write to memory */
302 /* dst addr in bytes */
304 /* number of byte to fill */
305 uint32_t byte_count);
308 /* provided by hw blocks that can write ptes, e.g., sdma */
309 struct amdgpu_vm_pte_funcs {
310 /* number of dw to reserve per operation */
311 unsigned copy_pte_num_dw;
313 /* copy pte entries from GART */
314 void (*copy_pte)(struct amdgpu_ib *ib,
315 uint64_t pe, uint64_t src,
318 /* write pte one entry at a time with addr mapping */
319 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
320 uint64_t value, unsigned count,
322 /* for linear pte/pde updates without addr mapping */
323 void (*set_pte_pde)(struct amdgpu_ib *ib,
325 uint64_t addr, unsigned count,
326 uint32_t incr, uint64_t flags);
329 /* provided by the ih block */
330 struct amdgpu_ih_funcs {
331 /* ring read/write ptr handling, called from interrupt context */
332 u32 (*get_wptr)(struct amdgpu_device *adev);
333 bool (*prescreen_iv)(struct amdgpu_device *adev);
334 void (*decode_iv)(struct amdgpu_device *adev,
335 struct amdgpu_iv_entry *entry);
336 void (*set_rptr)(struct amdgpu_device *adev);
342 bool amdgpu_get_bios(struct amdgpu_device *adev);
343 bool amdgpu_read_bios(struct amdgpu_device *adev);
349 #define AMDGPU_MAX_PPLL 3
351 struct amdgpu_clock {
352 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
353 struct amdgpu_pll spll;
354 struct amdgpu_pll mpll;
356 uint32_t default_mclk;
357 uint32_t default_sclk;
358 uint32_t default_dispclk;
359 uint32_t current_dispclk;
361 uint32_t max_pixel_clock;
368 #define AMDGPU_GEM_DOMAIN_MAX 0x3
369 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
371 void amdgpu_gem_object_free(struct drm_gem_object *obj);
372 int amdgpu_gem_object_open(struct drm_gem_object *obj,
373 struct drm_file *file_priv);
374 void amdgpu_gem_object_close(struct drm_gem_object *obj,
375 struct drm_file *file_priv);
376 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
377 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
378 struct drm_gem_object *
379 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
380 struct dma_buf_attachment *attach,
381 struct sg_table *sg);
382 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
383 struct drm_gem_object *gobj,
385 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
386 struct dma_buf *dma_buf);
387 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
388 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
389 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
390 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
392 /* sub-allocation manager, it has to be protected by another lock.
393 * By conception this is an helper for other part of the driver
394 * like the indirect buffer or semaphore, which both have their
397 * Principe is simple, we keep a list of sub allocation in offset
398 * order (first entry has offset == 0, last entry has the highest
401 * When allocating new object we first check if there is room at
402 * the end total_size - (last_object_offset + last_object_size) >=
403 * alloc_size. If so we allocate new object there.
405 * When there is not enough room at the end, we start waiting for
406 * each sub object until we reach object_offset+object_size >=
407 * alloc_size, this object then become the sub object we return.
409 * Alignment can't be bigger than page size.
411 * Hole are not considered for allocation to keep things simple.
412 * Assumption is that there won't be hole (all object on same
416 #define AMDGPU_SA_NUM_FENCE_LISTS 32
418 struct amdgpu_sa_manager {
419 wait_queue_head_t wq;
420 struct amdgpu_bo *bo;
421 struct list_head *hole;
422 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
423 struct list_head olist;
431 /* sub-allocation buffer */
432 struct amdgpu_sa_bo {
433 struct list_head olist;
434 struct list_head flist;
435 struct amdgpu_sa_manager *manager;
438 struct dma_fence *fence;
444 void amdgpu_gem_force_release(struct amdgpu_device *adev);
445 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
446 int alignment, u32 initial_domain,
447 u64 flags, enum ttm_bo_type type,
448 struct reservation_object *resv,
449 struct drm_gem_object **obj);
451 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
452 struct drm_device *dev,
453 struct drm_mode_create_dumb *args);
454 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
455 struct drm_device *dev,
456 uint32_t handle, uint64_t *offset_p);
457 int amdgpu_fence_slab_init(void);
458 void amdgpu_fence_slab_fini(void);
461 * GPU doorbell structures, functions & helpers
463 typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
465 AMDGPU_DOORBELL_KIQ = 0x000,
466 AMDGPU_DOORBELL_HIQ = 0x001,
467 AMDGPU_DOORBELL_DIQ = 0x002,
468 AMDGPU_DOORBELL_MEC_RING0 = 0x010,
469 AMDGPU_DOORBELL_MEC_RING1 = 0x011,
470 AMDGPU_DOORBELL_MEC_RING2 = 0x012,
471 AMDGPU_DOORBELL_MEC_RING3 = 0x013,
472 AMDGPU_DOORBELL_MEC_RING4 = 0x014,
473 AMDGPU_DOORBELL_MEC_RING5 = 0x015,
474 AMDGPU_DOORBELL_MEC_RING6 = 0x016,
475 AMDGPU_DOORBELL_MEC_RING7 = 0x017,
476 AMDGPU_DOORBELL_GFX_RING0 = 0x020,
477 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
478 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
479 AMDGPU_DOORBELL_IH = 0x1E8,
480 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
481 AMDGPU_DOORBELL_INVALID = 0xFFFF
482 } AMDGPU_DOORBELL_ASSIGNMENT;
484 struct amdgpu_doorbell {
486 resource_size_t base;
487 resource_size_t size;
489 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
493 * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
495 typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
498 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
499 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
500 * Compute related doorbells are allocated from 0x00 to 0x8a
504 /* kernel scheduling */
505 AMDGPU_DOORBELL64_KIQ = 0x00,
507 /* HSA interface queue and debug queue */
508 AMDGPU_DOORBELL64_HIQ = 0x01,
509 AMDGPU_DOORBELL64_DIQ = 0x02,
511 /* Compute engines */
512 AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
513 AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
514 AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
515 AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
516 AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
517 AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
518 AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
519 AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
521 /* User queue doorbell range (128 doorbells) */
522 AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
523 AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
525 /* Graphics engine */
526 AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
529 * Other graphics doorbells can be allocated here: from 0x8c to 0xef
530 * Graphics voltage island aperture 1
531 * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
535 AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
536 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
537 AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
538 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
540 /* Interrupt handler */
541 AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
542 AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
543 AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
545 /* VCN engine use 32 bits doorbell */
546 AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
547 AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
548 AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
549 AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
551 /* overlap the doorbell assignment with VCN as they are mutually exclusive
552 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
554 AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
555 AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
556 AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
557 AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
559 AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
560 AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
561 AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
562 AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
564 AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
565 AMDGPU_DOORBELL64_INVALID = 0xFFFF
566 } AMDGPU_DOORBELL64_ASSIGNMENT;
572 struct amdgpu_flip_work {
573 struct delayed_work flip_work;
574 struct work_struct unpin_work;
575 struct amdgpu_device *adev;
579 struct drm_pending_vblank_event *event;
580 struct amdgpu_bo *old_abo;
581 struct dma_fence *excl;
582 unsigned shared_count;
583 struct dma_fence **shared;
584 struct dma_fence_cb cb;
594 struct amdgpu_sa_bo *sa_bo;
601 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
603 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
604 struct amdgpu_job **job, struct amdgpu_vm *vm);
605 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
606 struct amdgpu_job **job);
608 void amdgpu_job_free_resources(struct amdgpu_job *job);
609 void amdgpu_job_free(struct amdgpu_job *job);
610 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
611 struct drm_sched_entity *entity, void *owner,
612 struct dma_fence **f);
617 struct amdgpu_queue_mapper {
620 /* protected by lock */
621 struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
624 struct amdgpu_queue_mgr {
625 struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
628 int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
629 struct amdgpu_queue_mgr *mgr);
630 int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
631 struct amdgpu_queue_mgr *mgr);
632 int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
633 struct amdgpu_queue_mgr *mgr,
634 u32 hw_ip, u32 instance, u32 ring,
635 struct amdgpu_ring **out_ring);
638 * context related structures
641 struct amdgpu_ctx_ring {
643 struct dma_fence **fences;
644 struct drm_sched_entity entity;
648 struct kref refcount;
649 struct amdgpu_device *adev;
650 struct amdgpu_queue_mgr queue_mgr;
651 unsigned reset_counter;
652 unsigned reset_counter_query;
653 uint32_t vram_lost_counter;
654 spinlock_t ring_lock;
655 struct dma_fence **fences;
656 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
657 bool preamble_presented;
658 enum drm_sched_priority init_priority;
659 enum drm_sched_priority override_priority;
664 struct amdgpu_ctx_mgr {
665 struct amdgpu_device *adev;
667 /* protected by lock */
668 struct idr ctx_handles;
671 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
672 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
674 int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
675 struct dma_fence *fence, uint64_t *seq);
676 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
677 struct amdgpu_ring *ring, uint64_t seq);
678 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
679 enum drm_sched_priority priority);
681 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
682 struct drm_file *filp);
684 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
686 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
687 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
688 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
689 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
693 * file private structure
696 struct amdgpu_fpriv {
698 struct amdgpu_bo_va *prt_va;
699 struct amdgpu_bo_va *csa_va;
700 struct mutex bo_list_lock;
701 struct idr bo_list_handles;
702 struct amdgpu_ctx_mgr ctx_mgr;
708 struct amdgpu_bo_list_entry {
709 struct amdgpu_bo *robj;
710 struct ttm_validate_buffer tv;
711 struct amdgpu_bo_va *bo_va;
713 struct page **user_pages;
714 int user_invalidated;
717 struct amdgpu_bo_list {
719 struct rcu_head rhead;
720 struct kref refcount;
721 struct amdgpu_bo *gds_obj;
722 struct amdgpu_bo *gws_obj;
723 struct amdgpu_bo *oa_obj;
724 unsigned first_userptr;
725 unsigned num_entries;
726 struct amdgpu_bo_list_entry *array;
729 struct amdgpu_bo_list *
730 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
731 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
732 struct list_head *validated);
733 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
734 void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
739 #include "clearstate_defs.h"
741 struct amdgpu_rlc_funcs {
742 void (*enter_safe_mode)(struct amdgpu_device *adev);
743 void (*exit_safe_mode)(struct amdgpu_device *adev);
747 /* for power gating */
748 struct amdgpu_bo *save_restore_obj;
749 uint64_t save_restore_gpu_addr;
750 volatile uint32_t *sr_ptr;
753 /* for clear state */
754 struct amdgpu_bo *clear_state_obj;
755 uint64_t clear_state_gpu_addr;
756 volatile uint32_t *cs_ptr;
757 const struct cs_section_def *cs_data;
758 u32 clear_state_size;
760 struct amdgpu_bo *cp_table_obj;
761 uint64_t cp_table_gpu_addr;
762 volatile uint32_t *cp_table_ptr;
765 /* safe mode for updating CG/PG state */
767 const struct amdgpu_rlc_funcs *funcs;
769 /* for firmware data */
770 u32 save_and_restore_offset;
771 u32 clear_state_descriptor_offset;
772 u32 avail_scratch_ram_locations;
773 u32 reg_restore_list_size;
774 u32 reg_list_format_start;
775 u32 reg_list_format_separate_start;
776 u32 starting_offsets_start;
777 u32 reg_list_format_size_bytes;
778 u32 reg_list_size_bytes;
779 u32 reg_list_format_direct_reg_list_length;
780 u32 save_restore_list_cntl_size_bytes;
781 u32 save_restore_list_gpm_size_bytes;
782 u32 save_restore_list_srm_size_bytes;
784 u32 *register_list_format;
785 u32 *register_restore;
786 u8 *save_restore_list_cntl;
787 u8 *save_restore_list_gpm;
788 u8 *save_restore_list_srm;
793 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
796 struct amdgpu_bo *hpd_eop_obj;
797 u64 hpd_eop_gpu_addr;
798 struct amdgpu_bo *mec_fw_obj;
801 u32 num_pipe_per_mec;
802 u32 num_queue_per_pipe;
803 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
805 /* These are the resources for which amdgpu takes ownership */
806 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
811 struct amdgpu_bo *eop_obj;
812 spinlock_t ring_lock;
813 struct amdgpu_ring ring;
814 struct amdgpu_irq_src irq;
818 * GPU scratch registers structures, functions & helpers
820 struct amdgpu_scratch {
829 #define AMDGPU_GFX_MAX_SE 4
830 #define AMDGPU_GFX_MAX_SH_PER_SE 2
832 struct amdgpu_rb_config {
833 uint32_t rb_backend_disable;
834 uint32_t user_rb_backend_disable;
835 uint32_t raster_config;
836 uint32_t raster_config_1;
839 struct gb_addr_config {
840 uint16_t pipe_interleave_size;
842 uint8_t max_compress_frags;
845 uint8_t num_rb_per_se;
848 struct amdgpu_gfx_config {
849 unsigned max_shader_engines;
850 unsigned max_tile_pipes;
851 unsigned max_cu_per_sh;
852 unsigned max_sh_per_se;
853 unsigned max_backends_per_se;
854 unsigned max_texture_channel_caches;
856 unsigned max_gs_threads;
857 unsigned max_hw_contexts;
858 unsigned sc_prim_fifo_size_frontend;
859 unsigned sc_prim_fifo_size_backend;
860 unsigned sc_hiz_tile_fifo_size;
861 unsigned sc_earlyz_tile_fifo_size;
863 unsigned num_tile_pipes;
864 unsigned backend_enable_mask;
865 unsigned mem_max_burst_length_bytes;
866 unsigned mem_row_size_in_kb;
867 unsigned shader_engine_tile_size;
869 unsigned multi_gpu_tile_size;
870 unsigned mc_arb_ramcfg;
871 unsigned gb_addr_config;
873 unsigned gs_vgt_table_depth;
874 unsigned gs_prim_buffer_depth;
876 uint32_t tile_mode_array[32];
877 uint32_t macrotile_mode_array[16];
879 struct gb_addr_config gb_addr_config_fields;
880 struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
882 /* gfx configure feature */
883 uint32_t double_offchip_lds_buf;
884 /* cached value of DB_DEBUG2 */
888 struct amdgpu_cu_info {
889 uint32_t simd_per_cu;
890 uint32_t max_waves_per_simd;
891 uint32_t wave_front_size;
892 uint32_t max_scratch_slots_per_cu;
895 /* total active CU number */
898 uint32_t ao_cu_bitmap[4][4];
899 uint32_t bitmap[4][4];
902 struct amdgpu_gfx_funcs {
903 /* get the gpu clock counter */
904 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
905 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
906 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
907 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
908 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
909 void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
912 struct amdgpu_ngg_buf {
913 struct amdgpu_bo *bo;
928 struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
929 uint32_t gds_reserve_addr;
930 uint32_t gds_reserve_size;
935 struct work_struct work;
940 struct mutex gpu_clock_mutex;
941 struct amdgpu_gfx_config config;
942 struct amdgpu_rlc rlc;
943 struct amdgpu_mec mec;
944 struct amdgpu_kiq kiq;
945 struct amdgpu_scratch scratch;
946 const struct firmware *me_fw; /* ME firmware */
947 uint32_t me_fw_version;
948 const struct firmware *pfp_fw; /* PFP firmware */
949 uint32_t pfp_fw_version;
950 const struct firmware *ce_fw; /* CE firmware */
951 uint32_t ce_fw_version;
952 const struct firmware *rlc_fw; /* RLC firmware */
953 uint32_t rlc_fw_version;
954 const struct firmware *mec_fw; /* MEC firmware */
955 uint32_t mec_fw_version;
956 const struct firmware *mec2_fw; /* MEC2 firmware */
957 uint32_t mec2_fw_version;
958 uint32_t me_feature_version;
959 uint32_t ce_feature_version;
960 uint32_t pfp_feature_version;
961 uint32_t rlc_feature_version;
962 uint32_t rlc_srlc_fw_version;
963 uint32_t rlc_srlc_feature_version;
964 uint32_t rlc_srlg_fw_version;
965 uint32_t rlc_srlg_feature_version;
966 uint32_t rlc_srls_fw_version;
967 uint32_t rlc_srls_feature_version;
968 uint32_t mec_feature_version;
969 uint32_t mec2_feature_version;
970 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
971 unsigned num_gfx_rings;
972 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
973 unsigned num_compute_rings;
974 struct amdgpu_irq_src eop_irq;
975 struct amdgpu_irq_src priv_reg_irq;
976 struct amdgpu_irq_src priv_inst_irq;
977 struct amdgpu_irq_src cp_ecc_error_irq;
978 struct amdgpu_irq_src sq_irq;
979 struct sq_work sq_work;
982 uint32_t gfx_current_status;
984 unsigned ce_ram_size;
985 struct amdgpu_cu_info cu_info;
986 const struct amdgpu_gfx_funcs *funcs;
989 uint32_t grbm_soft_reset;
990 uint32_t srbm_soft_reset;
994 struct amdgpu_ngg ngg;
996 /* pipe reservation */
997 struct mutex pipe_reserve_mutex;
998 DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1001 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1002 unsigned size, struct amdgpu_ib *ib);
1003 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
1004 struct dma_fence *f);
1005 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
1006 struct amdgpu_ib *ibs, struct amdgpu_job *job,
1007 struct dma_fence **f);
1008 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1009 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1010 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1015 struct amdgpu_cs_chunk {
1021 struct amdgpu_cs_parser {
1022 struct amdgpu_device *adev;
1023 struct drm_file *filp;
1024 struct amdgpu_ctx *ctx;
1028 struct amdgpu_cs_chunk *chunks;
1030 /* scheduler job object */
1031 struct amdgpu_job *job;
1033 /* buffer objects */
1034 struct ww_acquire_ctx ticket;
1035 struct amdgpu_bo_list *bo_list;
1036 struct amdgpu_mn *mn;
1037 struct amdgpu_bo_list_entry vm_pd;
1038 struct list_head validated;
1039 struct dma_fence *fence;
1040 uint64_t bytes_moved_threshold;
1041 uint64_t bytes_moved_vis_threshold;
1042 uint64_t bytes_moved;
1043 uint64_t bytes_moved_vis;
1044 struct amdgpu_bo_list_entry *evictable;
1047 struct amdgpu_bo_list_entry uf_entry;
1049 unsigned num_post_dep_syncobjs;
1050 struct drm_syncobj **post_dep_syncobjs;
1053 #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
1054 #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
1055 #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
1058 struct drm_sched_job base;
1059 struct amdgpu_device *adev;
1060 struct amdgpu_vm *vm;
1061 struct amdgpu_ring *ring;
1062 struct amdgpu_sync sync;
1063 struct amdgpu_sync sched_sync;
1064 struct amdgpu_ib *ibs;
1065 struct dma_fence *fence; /* the hw fence */
1066 uint32_t preamble_status;
1069 uint64_t fence_ctx; /* the fence_context this job uses */
1070 bool vm_needs_flush;
1071 uint64_t vm_pd_addr;
1074 uint32_t gds_base, gds_size;
1075 uint32_t gws_base, gws_size;
1076 uint32_t oa_base, oa_size;
1077 uint32_t vram_lost_counter;
1079 /* user fence handling */
1081 uint64_t uf_sequence;
1084 #define to_amdgpu_job(sched_job) \
1085 container_of((sched_job), struct amdgpu_job, base)
1087 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
1088 uint32_t ib_idx, int idx)
1090 return p->job->ibs[ib_idx].ptr[idx];
1093 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1094 uint32_t ib_idx, int idx,
1097 p->job->ibs[ib_idx].ptr[idx] = value;
1103 #define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
1106 struct amdgpu_bo *wb_obj;
1107 volatile uint32_t *wb;
1109 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
1110 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1113 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
1114 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
1119 struct amdgpu_sdma_instance {
1121 const struct firmware *fw;
1122 uint32_t fw_version;
1123 uint32_t feature_version;
1125 struct amdgpu_ring ring;
1129 struct amdgpu_sdma {
1130 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1131 #ifdef CONFIG_DRM_AMDGPU_SI
1132 //SI DMA has a difference trap irq number for the second engine
1133 struct amdgpu_irq_src trap_irq_1;
1135 struct amdgpu_irq_src trap_irq;
1136 struct amdgpu_irq_src illegal_inst_irq;
1138 uint32_t srbm_soft_reset;
1144 enum amdgpu_firmware_load_type {
1145 AMDGPU_FW_LOAD_DIRECT = 0,
1150 struct amdgpu_firmware {
1151 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1152 enum amdgpu_firmware_load_type load_type;
1153 struct amdgpu_bo *fw_buf;
1154 unsigned int fw_size;
1155 unsigned int max_ucodes;
1156 /* firmwares are loaded by psp instead of smu from vega10 */
1157 const struct amdgpu_psp_funcs *funcs;
1158 struct amdgpu_bo *rbuf;
1161 /* gpu info firmware data pointer */
1162 const struct firmware *gpu_info_fw;
1171 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1177 void amdgpu_test_moves(struct amdgpu_device *adev);
1181 * amdgpu smumgr functions
1183 struct amdgpu_smumgr_funcs {
1184 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1185 int (*request_smu_load_fw)(struct amdgpu_device *adev);
1186 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1192 struct amdgpu_smumgr {
1193 struct amdgpu_bo *toc_buf;
1194 struct amdgpu_bo *smu_buf;
1195 /* asic priv smu data */
1197 spinlock_t smu_lock;
1198 /* smumgr functions */
1199 const struct amdgpu_smumgr_funcs *smumgr_funcs;
1200 /* ucode loading complete flag */
1205 * ASIC specific register table accessible by UMD
1207 struct amdgpu_allowed_register_entry {
1208 uint32_t reg_offset;
1213 * ASIC specific functions.
1215 struct amdgpu_asic_funcs {
1216 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1217 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1218 u8 *bios, u32 length_bytes);
1219 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1220 u32 sh_num, u32 reg_offset, u32 *value);
1221 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1222 int (*reset)(struct amdgpu_device *adev);
1223 /* get the reference clock */
1224 u32 (*get_xclk)(struct amdgpu_device *adev);
1225 /* MM block clocks */
1226 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1227 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1228 /* static power management */
1229 int (*get_pcie_lanes)(struct amdgpu_device *adev);
1230 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
1231 /* get config memsize register */
1232 u32 (*get_config_memsize)(struct amdgpu_device *adev);
1233 /* flush hdp write queue */
1234 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
1235 /* invalidate hdp read cache */
1236 void (*invalidate_hdp)(struct amdgpu_device *adev,
1237 struct amdgpu_ring *ring);
1238 /* check if the asic needs a full reset of if soft reset will work */
1239 bool (*need_full_reset)(struct amdgpu_device *adev);
1245 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1246 struct drm_file *filp);
1247 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1248 struct drm_file *filp);
1250 int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1251 struct drm_file *filp);
1252 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1253 struct drm_file *filp);
1254 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1255 struct drm_file *filp);
1256 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1257 struct drm_file *filp);
1258 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1259 struct drm_file *filp);
1260 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1261 struct drm_file *filp);
1262 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1263 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1264 struct drm_file *filp);
1265 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1266 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1267 struct drm_file *filp);
1269 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1270 struct drm_file *filp);
1272 /* VRAM scratch page for HDP bug, default vram page */
1273 struct amdgpu_vram_scratch {
1274 struct amdgpu_bo *robj;
1275 volatile uint32_t *ptr;
1282 struct amdgpu_atcs_functions {
1286 bool pcie_bus_width;
1289 struct amdgpu_atcs {
1290 struct amdgpu_atcs_functions functions;
1294 * Firmware VRAM reservation
1296 struct amdgpu_fw_vram_usage {
1299 struct amdgpu_bo *reserved_bo;
1306 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1307 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1310 * Core structure, functions and helpers.
1312 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1313 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1315 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1316 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1320 * amdgpu nbio functions
1323 struct nbio_hdp_flush_reg {
1324 u32 ref_and_mask_cp0;
1325 u32 ref_and_mask_cp1;
1326 u32 ref_and_mask_cp2;
1327 u32 ref_and_mask_cp3;
1328 u32 ref_and_mask_cp4;
1329 u32 ref_and_mask_cp5;
1330 u32 ref_and_mask_cp6;
1331 u32 ref_and_mask_cp7;
1332 u32 ref_and_mask_cp8;
1333 u32 ref_and_mask_cp9;
1334 u32 ref_and_mask_sdma0;
1335 u32 ref_and_mask_sdma1;
1338 struct amdgpu_nbio_funcs {
1339 const struct nbio_hdp_flush_reg *hdp_flush_reg;
1340 u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
1341 u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
1342 u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
1343 u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
1344 u32 (*get_rev_id)(struct amdgpu_device *adev);
1345 void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
1346 void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
1347 u32 (*get_memsize)(struct amdgpu_device *adev);
1348 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
1349 bool use_doorbell, int doorbell_index);
1350 void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
1352 void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
1354 void (*ih_doorbell_range)(struct amdgpu_device *adev,
1355 bool use_doorbell, int doorbell_index);
1356 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1358 void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
1360 void (*get_clockgating_state)(struct amdgpu_device *adev,
1362 void (*ih_control)(struct amdgpu_device *adev);
1363 void (*init_registers)(struct amdgpu_device *adev);
1364 void (*detect_hw_virt)(struct amdgpu_device *adev);
1367 struct amdgpu_df_funcs {
1368 void (*init)(struct amdgpu_device *adev);
1369 void (*enable_broadcast_mode)(struct amdgpu_device *adev,
1371 u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
1372 u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
1373 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1375 void (*get_clockgating_state)(struct amdgpu_device *adev,
1377 void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
1380 /* Define the HW IP blocks will be used in driver , add more if necessary */
1381 enum amd_hw_ip_block_type {
1392 VCN_HWIP = UVD_HWIP,
1404 #define HWIP_MAX_INSTANCE 6
1406 struct amd_powerplay {
1408 const struct amd_pm_funcs *pp_funcs;
1409 uint32_t pp_feature;
1412 #define AMDGPU_RESET_MAGIC_NUM 64
1413 struct amdgpu_device {
1415 struct drm_device *ddev;
1416 struct pci_dev *pdev;
1418 #ifdef CONFIG_DRM_AMD_ACP
1419 struct amdgpu_acp acp;
1423 enum amd_asic_type asic_type;
1426 uint32_t external_rev_id;
1427 unsigned long flags;
1429 const struct amdgpu_asic_funcs *asic_funcs;
1434 struct work_struct reset_work;
1435 struct notifier_block acpi_nb;
1436 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
1437 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1438 unsigned debugfs_count;
1439 #if defined(CONFIG_DEBUG_FS)
1440 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1442 struct amdgpu_atif *atif;
1443 struct amdgpu_atcs atcs;
1444 struct mutex srbm_mutex;
1445 /* GRBM index mutex. Protects concurrent access to GRBM index */
1446 struct mutex grbm_idx_mutex;
1447 struct dev_pm_domain vga_pm_domain;
1448 bool have_disp_power_ref;
1454 struct amdgpu_bo *stolen_vga_memory;
1455 uint32_t bios_scratch_reg_offset;
1456 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1458 /* Register/doorbell mmio */
1459 resource_size_t rmmio_base;
1460 resource_size_t rmmio_size;
1461 void __iomem *rmmio;
1462 /* protects concurrent MM_INDEX/DATA based register access */
1463 spinlock_t mmio_idx_lock;
1464 /* protects concurrent SMC based register access */
1465 spinlock_t smc_idx_lock;
1466 amdgpu_rreg_t smc_rreg;
1467 amdgpu_wreg_t smc_wreg;
1468 /* protects concurrent PCIE register access */
1469 spinlock_t pcie_idx_lock;
1470 amdgpu_rreg_t pcie_rreg;
1471 amdgpu_wreg_t pcie_wreg;
1472 amdgpu_rreg_t pciep_rreg;
1473 amdgpu_wreg_t pciep_wreg;
1474 /* protects concurrent UVD register access */
1475 spinlock_t uvd_ctx_idx_lock;
1476 amdgpu_rreg_t uvd_ctx_rreg;
1477 amdgpu_wreg_t uvd_ctx_wreg;
1478 /* protects concurrent DIDT register access */
1479 spinlock_t didt_idx_lock;
1480 amdgpu_rreg_t didt_rreg;
1481 amdgpu_wreg_t didt_wreg;
1482 /* protects concurrent gc_cac register access */
1483 spinlock_t gc_cac_idx_lock;
1484 amdgpu_rreg_t gc_cac_rreg;
1485 amdgpu_wreg_t gc_cac_wreg;
1486 /* protects concurrent se_cac register access */
1487 spinlock_t se_cac_idx_lock;
1488 amdgpu_rreg_t se_cac_rreg;
1489 amdgpu_wreg_t se_cac_wreg;
1490 /* protects concurrent ENDPOINT (audio) register access */
1491 spinlock_t audio_endpt_idx_lock;
1492 amdgpu_block_rreg_t audio_endpt_rreg;
1493 amdgpu_block_wreg_t audio_endpt_wreg;
1494 void __iomem *rio_mem;
1495 resource_size_t rio_mem_size;
1496 struct amdgpu_doorbell doorbell;
1498 /* clock/pll info */
1499 struct amdgpu_clock clock;
1502 struct amdgpu_gmc gmc;
1503 struct amdgpu_gart gart;
1504 dma_addr_t dummy_page_addr;
1505 struct amdgpu_vm_manager vm_manager;
1506 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
1508 /* memory management */
1509 struct amdgpu_mman mman;
1510 struct amdgpu_vram_scratch vram_scratch;
1511 struct amdgpu_wb wb;
1512 atomic64_t num_bytes_moved;
1513 atomic64_t num_evictions;
1514 atomic64_t num_vram_cpu_page_faults;
1515 atomic_t gpu_reset_counter;
1516 atomic_t vram_lost_counter;
1518 /* data for buffer migration throttling */
1522 s64 accum_us; /* accumulated microseconds */
1523 s64 accum_us_vis; /* for visible VRAM */
1528 bool enable_virtual_display;
1529 struct amdgpu_mode_info mode_info;
1530 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
1531 struct work_struct hotplug_work;
1532 struct amdgpu_irq_src crtc_irq;
1533 struct amdgpu_irq_src pageflip_irq;
1534 struct amdgpu_irq_src hpd_irq;
1539 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
1541 struct amdgpu_sa_manager ring_tmp_bo;
1544 struct amdgpu_irq irq;
1547 struct amd_powerplay powerplay;
1548 bool pp_force_state_enabled;
1551 struct amdgpu_pm pm;
1556 struct amdgpu_smumgr smu;
1559 struct amdgpu_gfx gfx;
1562 struct amdgpu_sdma sdma;
1565 struct amdgpu_uvd uvd;
1568 struct amdgpu_vce vce;
1571 struct amdgpu_vcn vcn;
1574 struct amdgpu_firmware firmware;
1577 struct psp_context psp;
1580 struct amdgpu_gds gds;
1582 /* display related functionality */
1583 struct amdgpu_display_manager dm;
1585 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
1587 struct mutex mn_lock;
1588 DECLARE_HASHTABLE(mn_hash, 7);
1590 /* tracking pinned memory */
1592 u64 invisible_pin_size;
1595 /* amdkfd interface */
1596 struct kfd_dev *kfd;
1598 /* soc15 register offset based on ip, instance and segment */
1599 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1601 const struct amdgpu_nbio_funcs *nbio_funcs;
1602 const struct amdgpu_df_funcs *df_funcs;
1604 /* delayed work_func for deferring clockgating during resume */
1605 struct delayed_work late_init_work;
1607 struct amdgpu_virt virt;
1608 /* firmware VRAM reservation */
1609 struct amdgpu_fw_vram_usage fw_vram_usage;
1611 /* link all shadow bo */
1612 struct list_head shadow_list;
1613 struct mutex shadow_list_lock;
1614 /* keep an lru list of rings by HW IP */
1615 struct list_head ring_lru_list;
1616 spinlock_t ring_lru_list_lock;
1618 /* record hw reset is performed */
1620 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
1622 /* record last mm index being written through WREG32*/
1623 unsigned long last_mm_index;
1625 struct mutex lock_reset;
1628 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
1630 return container_of(bdev, struct amdgpu_device, mman.bdev);
1633 int amdgpu_device_init(struct amdgpu_device *adev,
1634 struct drm_device *ddev,
1635 struct pci_dev *pdev,
1637 void amdgpu_device_fini(struct amdgpu_device *adev);
1638 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1640 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
1641 uint32_t acc_flags);
1642 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
1643 uint32_t acc_flags);
1644 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1645 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1647 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
1648 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
1650 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
1651 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
1652 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
1653 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
1655 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1656 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1658 int emu_soc_asic_init(struct amdgpu_device *adev);
1661 * Registers read & write functions.
1664 #define AMDGPU_REGS_IDX (1<<0)
1665 #define AMDGPU_REGS_NO_KIQ (1<<1)
1667 #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1668 #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1670 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1671 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1673 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
1674 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
1675 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
1676 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
1677 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
1678 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1679 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1680 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1681 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1682 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1683 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1684 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1685 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1686 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1687 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1688 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1689 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1690 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1691 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1692 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1693 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1694 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1695 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1696 #define WREG32_P(reg, val, mask) \
1698 uint32_t tmp_ = RREG32(reg); \
1700 tmp_ |= ((val) & ~(mask)); \
1701 WREG32(reg, tmp_); \
1703 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1704 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1705 #define WREG32_PLL_P(reg, val, mask) \
1707 uint32_t tmp_ = RREG32_PLL(reg); \
1709 tmp_ |= ((val) & ~(mask)); \
1710 WREG32_PLL(reg, tmp_); \
1712 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
1713 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
1714 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
1716 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
1717 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
1718 #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
1719 #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
1721 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1722 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1724 #define REG_SET_FIELD(orig_val, reg, field, field_val) \
1725 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
1726 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1728 #define REG_GET_FIELD(value, reg, field) \
1729 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1731 #define WREG32_FIELD(reg, field, val) \
1732 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1734 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1735 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1740 #define RBIOS8(i) (adev->bios[i])
1741 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1742 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1744 static inline struct amdgpu_sdma_instance *
1745 amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1747 struct amdgpu_device *adev = ring->adev;
1750 for (i = 0; i < adev->sdma.num_instances; i++)
1751 if (&adev->sdma.instance[i].ring == ring)
1754 if (i < AMDGPU_MAX_SDMA_INSTANCES)
1755 return &adev->sdma.instance[i];
1763 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
1764 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1765 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1766 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1767 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1768 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1769 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1770 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1771 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1772 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1773 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1774 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1775 #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
1776 #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
1777 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1778 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
1779 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
1780 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
1781 #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
1782 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
1783 #define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
1784 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
1785 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
1786 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
1787 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
1788 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
1789 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
1790 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
1791 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
1792 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
1793 #define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
1794 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
1795 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
1796 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
1797 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
1798 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
1799 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
1800 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
1801 #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
1802 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
1803 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
1804 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
1805 #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
1806 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
1807 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
1808 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
1809 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
1810 #define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
1811 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
1812 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
1813 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
1814 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
1815 #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
1816 #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
1817 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
1818 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
1819 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
1820 #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
1821 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
1822 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
1823 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
1824 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
1825 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
1826 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
1827 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
1828 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
1829 #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
1830 #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
1832 /* Common functions */
1833 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1834 struct amdgpu_job* job, bool force);
1835 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1836 bool amdgpu_device_need_post(struct amdgpu_device *adev);
1837 void amdgpu_display_update_priority(struct amdgpu_device *adev);
1839 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1841 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
1842 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
1843 void amdgpu_device_vram_location(struct amdgpu_device *adev,
1844 struct amdgpu_gmc *mc, u64 base);
1845 void amdgpu_device_gart_location(struct amdgpu_device *adev,
1846 struct amdgpu_gmc *mc);
1847 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1848 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1849 const u32 *registers,
1850 const u32 array_size);
1852 bool amdgpu_device_is_px(struct drm_device *dev);
1854 #if defined(CONFIG_VGA_SWITCHEROO)
1855 void amdgpu_register_atpx_handler(void);
1856 void amdgpu_unregister_atpx_handler(void);
1857 bool amdgpu_has_atpx_dgpu_power_cntl(void);
1858 bool amdgpu_is_atpx_hybrid(void);
1859 bool amdgpu_atpx_dgpu_req_power_for_displays(void);
1860 bool amdgpu_has_atpx(void);
1862 static inline void amdgpu_register_atpx_handler(void) {}
1863 static inline void amdgpu_unregister_atpx_handler(void) {}
1864 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1865 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
1866 static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
1867 static inline bool amdgpu_has_atpx(void) { return false; }
1870 #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
1871 void *amdgpu_atpx_get_dhandle(void);
1873 static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
1879 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1880 extern const int amdgpu_max_kms_ioctl;
1882 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
1883 void amdgpu_driver_unload_kms(struct drm_device *dev);
1884 void amdgpu_driver_lastclose_kms(struct drm_device *dev);
1885 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1886 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1887 struct drm_file *file_priv);
1888 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1889 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
1890 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
1891 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
1892 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1893 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1894 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
1898 * functions used by amdgpu_encoder.c
1900 struct amdgpu_afmt_acr {
1914 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1917 #if defined(CONFIG_ACPI)
1918 int amdgpu_acpi_init(struct amdgpu_device *adev);
1919 void amdgpu_acpi_fini(struct amdgpu_device *adev);
1920 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1921 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1922 u8 perf_req, bool advertise);
1923 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1925 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1926 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1929 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1930 uint64_t addr, struct amdgpu_bo **bo,
1931 struct amdgpu_bo_va_mapping **mapping);
1933 #if defined(CONFIG_DRM_AMD_DC)
1934 int amdgpu_dm_display_resume(struct amdgpu_device *adev );
1936 static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
1939 #include "amdgpu_object.h"