]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / i915 / gt / uc / intel_guc_fwif.h
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5
6 #ifndef _INTEL_GUC_FWIF_H
7 #define _INTEL_GUC_FWIF_H
8
9 #include <linux/bits.h>
10 #include <linux/compiler.h>
11 #include <linux/types.h>
12 #include "gt/intel_engine_types.h"
13
14 #include "abi/guc_actions_abi.h"
15 #include "abi/guc_actions_slpc_abi.h"
16 #include "abi/guc_errors_abi.h"
17 #include "abi/guc_communication_mmio_abi.h"
18 #include "abi/guc_communication_ctb_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "abi/guc_messages_abi.h"
21
22 /* Payload length only i.e. don't include G2H header length */
23 #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET       2
24 #define G2H_LEN_DW_DEREGISTER_CONTEXT           1
25
26 #define GUC_CONTEXT_DISABLE             0
27 #define GUC_CONTEXT_ENABLE              1
28
29 #define GUC_CLIENT_PRIORITY_KMD_HIGH    0
30 #define GUC_CLIENT_PRIORITY_HIGH        1
31 #define GUC_CLIENT_PRIORITY_KMD_NORMAL  2
32 #define GUC_CLIENT_PRIORITY_NORMAL      3
33 #define GUC_CLIENT_PRIORITY_NUM         4
34
35 #define GUC_MAX_CONTEXT_ID              65535
36 #define GUC_INVALID_CONTEXT_ID          GUC_MAX_CONTEXT_ID
37
38 #define GUC_RENDER_CLASS                0
39 #define GUC_VIDEO_CLASS                 1
40 #define GUC_VIDEOENHANCE_CLASS          2
41 #define GUC_BLITTER_CLASS               3
42 #define GUC_COMPUTE_CLASS               4
43 #define GUC_GSC_OTHER_CLASS             5
44 #define GUC_LAST_ENGINE_CLASS           GUC_GSC_OTHER_CLASS
45 #define GUC_MAX_ENGINE_CLASSES          16
46 #define GUC_MAX_INSTANCES_PER_CLASS     32
47
48 #define GUC_DOORBELL_INVALID            256
49
50 /*
51  * Work queue item header definitions
52  *
53  * Work queue is circular buffer used to submit complex (multi-lrc) submissions
54  * to the GuC. A work queue item is an entry in the circular buffer.
55  */
56 #define WQ_STATUS_ACTIVE                1
57 #define WQ_STATUS_SUSPENDED             2
58 #define WQ_STATUS_CMD_ERROR             3
59 #define WQ_STATUS_ENGINE_ID_NOT_USED    4
60 #define WQ_STATUS_SUSPENDED_FROM_RESET  5
61 #define WQ_TYPE_BATCH_BUF               0x1
62 #define WQ_TYPE_PSEUDO                  0x2
63 #define WQ_TYPE_INORDER                 0x3
64 #define WQ_TYPE_NOOP                    0x4
65 #define WQ_TYPE_MULTI_LRC               0x5
66 #define WQ_TYPE_MASK                    GENMASK(7, 0)
67 #define WQ_LEN_MASK                     GENMASK(26, 16)
68
69 #define WQ_GUC_ID_MASK                  GENMASK(15, 0)
70 #define WQ_RING_TAIL_MASK               GENMASK(28, 18)
71
72 #define GUC_STAGE_DESC_ATTR_ACTIVE      BIT(0)
73 #define GUC_STAGE_DESC_ATTR_PENDING_DB  BIT(1)
74 #define GUC_STAGE_DESC_ATTR_KERNEL      BIT(2)
75 #define GUC_STAGE_DESC_ATTR_PREEMPT     BIT(3)
76 #define GUC_STAGE_DESC_ATTR_RESET       BIT(4)
77 #define GUC_STAGE_DESC_ATTR_WQLOCKED    BIT(5)
78 #define GUC_STAGE_DESC_ATTR_PCH         BIT(6)
79 #define GUC_STAGE_DESC_ATTR_TERMINATED  BIT(7)
80
81 #define GUC_CTL_LOG_PARAMS              0
82 #define   GUC_LOG_VALID                 BIT(0)
83 #define   GUC_LOG_NOTIFY_ON_HALF_FULL   BIT(1)
84 #define   GUC_LOG_CAPTURE_ALLOC_UNITS   BIT(2)
85 #define   GUC_LOG_LOG_ALLOC_UNITS       BIT(3)
86 #define   GUC_LOG_CRASH_SHIFT           4
87 #define   GUC_LOG_CRASH_MASK            (0x3 << GUC_LOG_CRASH_SHIFT)
88 #define   GUC_LOG_DEBUG_SHIFT           6
89 #define   GUC_LOG_DEBUG_MASK            (0xF << GUC_LOG_DEBUG_SHIFT)
90 #define   GUC_LOG_CAPTURE_SHIFT         10
91 #define   GUC_LOG_CAPTURE_MASK          (0x3 << GUC_LOG_CAPTURE_SHIFT)
92 #define   GUC_LOG_BUF_ADDR_SHIFT        12
93
94 #define GUC_CTL_WA                      1
95 #define   GUC_WA_GAM_CREDITS            BIT(10)
96 #define   GUC_WA_DUAL_QUEUE             BIT(11)
97 #define   GUC_WA_RCS_RESET_BEFORE_RC6   BIT(13)
98 #define   GUC_WA_CONTEXT_ISOLATION      BIT(15)
99 #define   GUC_WA_PRE_PARSER             BIT(14)
100 #define   GUC_WA_HOLD_CCS_SWITCHOUT     BIT(17)
101 #define   GUC_WA_POLLCS                 BIT(18)
102 #define   GUC_WA_RCS_REGS_IN_CCS_REGS_LIST      BIT(21)
103
104 #define GUC_CTL_FEATURE                 2
105 #define   GUC_CTL_ENABLE_SLPC           BIT(2)
106 #define   GUC_CTL_DISABLE_SCHEDULER     BIT(14)
107
108 #define GUC_CTL_DEBUG                   3
109 #define   GUC_LOG_VERBOSITY_SHIFT       0
110 #define   GUC_LOG_VERBOSITY_LOW         (0 << GUC_LOG_VERBOSITY_SHIFT)
111 #define   GUC_LOG_VERBOSITY_MED         (1 << GUC_LOG_VERBOSITY_SHIFT)
112 #define   GUC_LOG_VERBOSITY_HIGH        (2 << GUC_LOG_VERBOSITY_SHIFT)
113 #define   GUC_LOG_VERBOSITY_ULTRA       (3 << GUC_LOG_VERBOSITY_SHIFT)
114 /* Verbosity range-check limits, without the shift */
115 #define   GUC_LOG_VERBOSITY_MIN         0
116 #define   GUC_LOG_VERBOSITY_MAX         3
117 #define   GUC_LOG_VERBOSITY_MASK        0x0000000f
118 #define   GUC_LOG_DESTINATION_MASK      (3 << 4)
119 #define   GUC_LOG_DISABLED              (1 << 6)
120 #define   GUC_PROFILE_ENABLED           (1 << 7)
121
122 #define GUC_CTL_ADS                     4
123 #define   GUC_ADS_ADDR_SHIFT            1
124 #define   GUC_ADS_ADDR_MASK             (0xFFFFF << GUC_ADS_ADDR_SHIFT)
125
126 #define GUC_CTL_DEVID                   5
127
128 #define GUC_CTL_MAX_DWORDS              (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
129
130 /* Generic GT SysInfo data types */
131 #define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED            0
132 #define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK   1
133 #define GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI 2
134 #define GUC_GENERIC_GT_SYSINFO_MAX                      16
135
136 /*
137  * The class goes in bits [0..2] of the GuC ID, the instance in bits [3..6].
138  * Bit 7 can be used for operations that apply to all engine classes&instances.
139  */
140 #define GUC_ENGINE_CLASS_SHIFT          0
141 #define GUC_ENGINE_CLASS_MASK           (0x7 << GUC_ENGINE_CLASS_SHIFT)
142 #define GUC_ENGINE_INSTANCE_SHIFT       3
143 #define GUC_ENGINE_INSTANCE_MASK        (0xf << GUC_ENGINE_INSTANCE_SHIFT)
144 #define GUC_ENGINE_ALL_INSTANCES        BIT(7)
145
146 #define MAKE_GUC_ID(class, instance) \
147         (((class) << GUC_ENGINE_CLASS_SHIFT) | \
148          ((instance) << GUC_ENGINE_INSTANCE_SHIFT))
149
150 #define GUC_ID_TO_ENGINE_CLASS(guc_id) \
151         (((guc_id) & GUC_ENGINE_CLASS_MASK) >> GUC_ENGINE_CLASS_SHIFT)
152 #define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
153         (((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT)
154
155 #define SLPC_EVENT(id, c) (\
156 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
157 FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, c) \
158 )
159
160 /* the GuC arrays don't include OTHER_CLASS */
161 static u8 engine_class_guc_class_map[] = {
162         [RENDER_CLASS]            = GUC_RENDER_CLASS,
163         [COPY_ENGINE_CLASS]       = GUC_BLITTER_CLASS,
164         [VIDEO_DECODE_CLASS]      = GUC_VIDEO_CLASS,
165         [VIDEO_ENHANCEMENT_CLASS] = GUC_VIDEOENHANCE_CLASS,
166         [OTHER_CLASS]             = GUC_GSC_OTHER_CLASS,
167         [COMPUTE_CLASS]           = GUC_COMPUTE_CLASS,
168 };
169
170 static u8 guc_class_engine_class_map[] = {
171         [GUC_RENDER_CLASS]       = RENDER_CLASS,
172         [GUC_BLITTER_CLASS]      = COPY_ENGINE_CLASS,
173         [GUC_VIDEO_CLASS]        = VIDEO_DECODE_CLASS,
174         [GUC_VIDEOENHANCE_CLASS] = VIDEO_ENHANCEMENT_CLASS,
175         [GUC_COMPUTE_CLASS]      = COMPUTE_CLASS,
176         [GUC_GSC_OTHER_CLASS]    = OTHER_CLASS,
177 };
178
179 static inline u8 engine_class_to_guc_class(u8 class)
180 {
181         BUILD_BUG_ON(ARRAY_SIZE(engine_class_guc_class_map) != MAX_ENGINE_CLASS + 1);
182         GEM_BUG_ON(class > MAX_ENGINE_CLASS);
183
184         return engine_class_guc_class_map[class];
185 }
186
187 static inline u8 guc_class_to_engine_class(u8 guc_class)
188 {
189         BUILD_BUG_ON(ARRAY_SIZE(guc_class_engine_class_map) != GUC_LAST_ENGINE_CLASS + 1);
190         GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS);
191
192         return guc_class_engine_class_map[guc_class];
193 }
194
195 /* Work item for submitting workloads into work queue of GuC. */
196 struct guc_wq_item {
197         u32 header;
198         u32 context_desc;
199         u32 submit_element_info;
200         u32 fence_id;
201 } __packed;
202
203 struct guc_process_desc_v69 {
204         u32 stage_id;
205         u64 db_base_addr;
206         u32 head;
207         u32 tail;
208         u32 error_offset;
209         u64 wq_base_addr;
210         u32 wq_size_bytes;
211         u32 wq_status;
212         u32 engine_presence;
213         u32 priority;
214         u32 reserved[36];
215 } __packed;
216
217 struct guc_sched_wq_desc {
218         u32 head;
219         u32 tail;
220         u32 error_offset;
221         u32 wq_status;
222         u32 reserved[28];
223 } __packed;
224
225 /* Helper for context registration H2G */
226 struct guc_ctxt_registration_info {
227         u32 flags;
228         u32 context_idx;
229         u32 engine_class;
230         u32 engine_submit_mask;
231         u32 wq_desc_lo;
232         u32 wq_desc_hi;
233         u32 wq_base_lo;
234         u32 wq_base_hi;
235         u32 wq_size;
236         u32 hwlrca_lo;
237         u32 hwlrca_hi;
238 };
239 #define CONTEXT_REGISTRATION_FLAG_KMD   BIT(0)
240
241 /* Preempt to idle on quantum expiry */
242 #define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69 BIT(0)
243
244 /*
245  * GuC Context registration descriptor.
246  * FIXME: This is only required to exist during context registration.
247  * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
248  * is not required.
249  */
250 struct guc_lrc_desc_v69 {
251         u32 hw_context_desc;
252         u32 slpm_perf_mode_hint;        /* SPLC v1 only */
253         u32 slpm_freq_hint;
254         u32 engine_submit_mask;         /* In logical space */
255         u8 engine_class;
256         u8 reserved0[3];
257         u32 priority;
258         u32 process_desc;
259         u32 wq_addr;
260         u32 wq_size;
261         u32 context_flags;              /* CONTEXT_REGISTRATION_* */
262         /* Time for one workload to execute. (in micro seconds) */
263         u32 execution_quantum;
264         /* Time to wait for a preemption request to complete before issuing a
265          * reset. (in micro seconds).
266          */
267         u32 preemption_timeout;
268         u32 policy_flags;               /* CONTEXT_POLICY_* */
269         u32 reserved1[19];
270 } __packed;
271
272 /* 32-bit KLV structure as used by policy updates and others */
273 struct guc_klv_generic_dw_t {
274         u32 kl;
275         u32 value;
276 } __packed;
277
278 /* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
279 struct guc_update_context_policy_header {
280         u32 action;
281         u32 ctx_id;
282 } __packed;
283
284 struct guc_update_context_policy {
285         struct guc_update_context_policy_header header;
286         struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
287 } __packed;
288
289 /* Format of the UPDATE_SCHEDULING_POLICIES H2G data packet */
290 struct guc_update_scheduling_policy_header {
291         u32 action;
292 } __packed;
293
294 /*
295  * Can't dynmically allocate memory for the scheduling policy KLV because
296  * it will be sent from within the reset path. Need a fixed size lump on
297  * the stack instead :(.
298  *
299  * Currently, there is only one KLV defined, which has 1 word of KL + 2 words of V.
300  */
301 #define MAX_SCHEDULING_POLICY_SIZE 3
302
303 struct guc_update_scheduling_policy {
304         struct guc_update_scheduling_policy_header header;
305         u32 data[MAX_SCHEDULING_POLICY_SIZE];
306 } __packed;
307
308 #define GUC_POWER_UNSPECIFIED   0
309 #define GUC_POWER_D0            1
310 #define GUC_POWER_D1            2
311 #define GUC_POWER_D2            3
312 #define GUC_POWER_D3            4
313
314 /* Scheduling policy settings */
315
316 #define GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION        100     /* in ms */
317 #define GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO           50      /* in percent */
318
319 #define GLOBAL_POLICY_MAX_NUM_WI 15
320
321 /* Don't reset an engine upon preemption failure */
322 #define GLOBAL_POLICY_DISABLE_ENGINE_RESET                              BIT(0)
323
324 #define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
325
326 /*
327  * GuC converts the timeout to clock ticks internally. Different platforms have
328  * different GuC clocks. Thus, the maximum value before overflow is platform
329  * dependent. Current worst case scenario is about 110s. So, the spec says to
330  * limit to 100s to be safe.
331  */
332 #define GUC_POLICY_MAX_EXEC_QUANTUM_US          (100 * 1000 * 1000UL)
333 #define GUC_POLICY_MAX_PREEMPT_TIMEOUT_US       (100 * 1000 * 1000UL)
334
335 static inline u32 guc_policy_max_exec_quantum_ms(void)
336 {
337         BUILD_BUG_ON(GUC_POLICY_MAX_EXEC_QUANTUM_US >= UINT_MAX);
338         return GUC_POLICY_MAX_EXEC_QUANTUM_US / 1000;
339 }
340
341 static inline u32 guc_policy_max_preempt_timeout_ms(void)
342 {
343         BUILD_BUG_ON(GUC_POLICY_MAX_PREEMPT_TIMEOUT_US >= UINT_MAX);
344         return GUC_POLICY_MAX_PREEMPT_TIMEOUT_US / 1000;
345 }
346
347 struct guc_policies {
348         u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
349         /* In micro seconds. How much time to allow before DPC processing is
350          * called back via interrupt (to prevent DPC queue drain starving).
351          * Typically 1000s of micro seconds (example only, not granularity). */
352         u32 dpc_promote_time;
353
354         /* Must be set to take these new values. */
355         u32 is_valid;
356
357         /* Max number of WIs to process per call. A large value may keep CS
358          * idle. */
359         u32 max_num_work_items;
360
361         u32 global_flags;
362         u32 reserved[4];
363 } __packed;
364
365 /* GuC MMIO reg state struct */
366 struct guc_mmio_reg {
367         u32 offset;
368         u32 value;
369         u32 flags;
370 #define GUC_REGSET_MASKED               BIT(0)
371 #define GUC_REGSET_NEEDS_STEERING       BIT(1)
372 #define GUC_REGSET_MASKED_WITH_VALUE    BIT(2)
373 #define GUC_REGSET_RESTORE_ONLY         BIT(3)
374 #define GUC_REGSET_STEERING_GROUP       GENMASK(15, 12)
375 #define GUC_REGSET_STEERING_INSTANCE    GENMASK(23, 20)
376         u32 mask;
377 } __packed;
378
379 /* GuC register sets */
380 struct guc_mmio_reg_set {
381         u32 address;
382         u16 count;
383         u16 reserved;
384 } __packed;
385
386 /* HW info */
387 struct guc_gt_system_info {
388         u8 mapping_table[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
389         u32 engine_enabled_masks[GUC_MAX_ENGINE_CLASSES];
390         u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
391 } __packed;
392
393 enum {
394         GUC_CAPTURE_LIST_INDEX_PF = 0,
395         GUC_CAPTURE_LIST_INDEX_VF = 1,
396         GUC_CAPTURE_LIST_INDEX_MAX = 2,
397 };
398
399 /*Register-types of GuC capture register lists */
400 enum guc_capture_type {
401         GUC_CAPTURE_LIST_TYPE_GLOBAL = 0,
402         GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
403         GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
404         GUC_CAPTURE_LIST_TYPE_MAX,
405 };
406
407 /* Class indecies for capture_class and capture_instance arrays */
408 enum {
409         GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
410         GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
411         GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE = 2,
412         GUC_CAPTURE_LIST_CLASS_BLITTER = 3,
413         GUC_CAPTURE_LIST_CLASS_GSC_OTHER = 4,
414 };
415
416 /* GuC Additional Data Struct */
417 struct guc_ads {
418         struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
419         u32 reserved0;
420         u32 scheduler_policies;
421         u32 gt_system_info;
422         u32 reserved1;
423         u32 control_data;
424         u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
425         u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
426         u32 private_data;
427         u32 reserved2;
428         u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
429         u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
430         u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
431         u32 reserved[14];
432 } __packed;
433
434 /* Engine usage stats */
435 struct guc_engine_usage_record {
436         u32 current_context_index;
437         u32 last_switch_in_stamp;
438         u32 reserved0;
439         u32 total_runtime;
440         u32 reserved1[4];
441 } __packed;
442
443 struct guc_engine_usage {
444         struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
445 } __packed;
446
447 /* GuC logging structures */
448
449 enum guc_log_buffer_type {
450         GUC_DEBUG_LOG_BUFFER,
451         GUC_CRASH_DUMP_LOG_BUFFER,
452         GUC_CAPTURE_LOG_BUFFER,
453         GUC_MAX_LOG_BUFFER
454 };
455
456 /*
457  * struct guc_log_buffer_state - GuC log buffer state
458  *
459  * Below state structure is used for coordination of retrieval of GuC firmware
460  * logs. Separate state is maintained for each log buffer type.
461  * read_ptr points to the location where i915 read last in log buffer and
462  * is read only for GuC firmware. write_ptr is incremented by GuC with number
463  * of bytes written for each log entry and is read only for i915.
464  * When any type of log buffer becomes half full, GuC sends a flush interrupt.
465  * GuC firmware expects that while it is writing to 2nd half of the buffer,
466  * first half would get consumed by Host and then get a flush completed
467  * acknowledgment from Host, so that it does not end up doing any overwrite
468  * causing loss of logs. So when buffer gets half filled & i915 has requested
469  * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
470  * to the value of write_ptr and raise the interrupt.
471  * On receiving the interrupt i915 should read the buffer, clear flush_to_file
472  * field and also update read_ptr with the value of sample_write_ptr, before
473  * sending an acknowledgment to GuC. marker & version fields are for internal
474  * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
475  * time GuC detects the log buffer overflow.
476  */
477 struct guc_log_buffer_state {
478         u32 marker[2];
479         u32 read_ptr;
480         u32 write_ptr;
481         u32 size;
482         u32 sampled_write_ptr;
483         u32 wrap_offset;
484         union {
485                 struct {
486                         u32 flush_to_file:1;
487                         u32 buffer_full_cnt:4;
488                         u32 reserved:27;
489                 };
490                 u32 flags;
491         };
492         u32 version;
493 } __packed;
494
495 /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
496 enum intel_guc_recv_message {
497         INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
498         INTEL_GUC_RECV_MSG_EXCEPTION = BIT(30),
499 };
500
501 #endif
This page took 0.064552 seconds and 4 git commands to generate.