1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_REGISTERS_H_
4 #define _IDXD_REGISTERS_H_
6 #include <uapi/linux/idxd.h>
9 #define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
10 #define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
12 #define DEVICE_VERSION_1 0x100
13 #define DEVICE_VERSION_2 0x200
15 #define IDXD_MMIO_BAR 0
17 #define IDXD_PORTAL_SIZE PAGE_SIZE
19 /* MMIO Device BAR0 Registers */
20 #define IDXD_VER_OFFSET 0x00
21 #define IDXD_VER_MAJOR_MASK 0xf0
22 #define IDXD_VER_MINOR_MASK 0x0f
23 #define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4)
24 #define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK)
30 u64 cache_control_mem:1;
31 u64 cache_control_cache:1;
38 u64 batch_continuation:1;
40 u64 max_batch_shift:4;
47 #define IDXD_GENCAP_OFFSET 0x10
67 #define IDXD_WQCAP_OFFSET 0x20
68 #define IDXD_WQCFG_MIN 5
73 u64 total_rdbufs:8; /* formerly total_tokens */
74 u64 rdbuf_ctrl:1; /* formerly token_en */
75 u64 rdbuf_limit:1; /* formerly token_limit */
76 u64 progress_limit:1; /* descriptor and batch descriptor */
81 #define IDXD_GRPCAP_OFFSET 0x30
83 union engine_cap_reg {
91 #define IDXD_ENGCAP_OFFSET 0x38
93 #define IDXD_OPCAP_NOOP 0x0001
94 #define IDXD_OPCAP_BATCH 0x0002
95 #define IDXD_OPCAP_MEMMOVE 0x0008
100 #define IDXD_MAX_OPCAP_BITS 256U
102 #define IDXD_OPCAP_OFFSET 0x40
104 #define IDXD_TABLE_OFFSET 0x60
117 #define IDXD_TABLE_MULT 0x100
119 #define IDXD_GENCFG_OFFSET 0x80
131 #define IDXD_GENCTRL_OFFSET 0x88
134 u32 softerr_int_en:1;
142 #define IDXD_GENSTATS_OFFSET 0x90
152 enum idxd_device_status_state {
153 IDXD_DEVICE_STATE_DISABLED = 0,
154 IDXD_DEVICE_STATE_ENABLED,
155 IDXD_DEVICE_STATE_DRAIN,
156 IDXD_DEVICE_STATE_HALT,
159 enum idxd_device_reset_type {
160 IDXD_DEVICE_RESET_SOFTWARE = 0,
161 IDXD_DEVICE_RESET_FLR,
162 IDXD_DEVICE_RESET_WARM,
163 IDXD_DEVICE_RESET_COLD,
166 #define IDXD_INTCAUSE_OFFSET 0x98
167 #define IDXD_INTC_ERR 0x01
168 #define IDXD_INTC_CMD 0x02
169 #define IDXD_INTC_OCCUPY 0x04
170 #define IDXD_INTC_PERFMON_OVFL 0x08
171 #define IDXD_INTC_HALT_STATE 0x10
172 #define IDXD_INTC_EVL 0x20
173 #define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
175 #define IDXD_CMD_OFFSET 0xa0
176 union idxd_command_reg {
187 IDXD_CMD_ENABLE_DEVICE = 1,
188 IDXD_CMD_DISABLE_DEVICE,
191 IDXD_CMD_RESET_DEVICE,
197 IDXD_CMD_DRAIN_PASID,
198 IDXD_CMD_ABORT_PASID,
199 IDXD_CMD_REQUEST_INT_HANDLE,
200 IDXD_CMD_RELEASE_INT_HANDLE,
203 #define CMD_INT_HANDLE_IMS 0x10000
205 #define IDXD_CMDSTS_OFFSET 0xa8
215 #define IDXD_CMDSTS_ACTIVE 0x80000000
216 #define IDXD_CMDSTS_ERR_MASK 0xff
217 #define IDXD_CMDSTS_RES_SHIFT 8
219 enum idxd_cmdsts_err {
220 IDXD_CMDSTS_SUCCESS = 0,
221 IDXD_CMDSTS_INVAL_CMD,
222 IDXD_CMDSTS_INVAL_WQIDX,
224 /* enable device errors */
225 IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
226 IDXD_CMDSTS_ERR_CONFIG,
227 IDXD_CMDSTS_ERR_BUSMASTER_EN,
228 IDXD_CMDSTS_ERR_PASID_INVAL,
229 IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
230 IDXD_CMDSTS_ERR_GRP_CONFIG,
231 IDXD_CMDSTS_ERR_GRP_CONFIG2,
232 IDXD_CMDSTS_ERR_GRP_CONFIG3,
233 IDXD_CMDSTS_ERR_GRP_CONFIG4,
234 /* enable wq errors */
235 IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
236 IDXD_CMDSTS_ERR_WQ_ENABLED,
237 IDXD_CMDSTS_ERR_WQ_SIZE,
238 IDXD_CMDSTS_ERR_WQ_PRIOR,
239 IDXD_CMDSTS_ERR_WQ_MODE,
240 IDXD_CMDSTS_ERR_BOF_EN,
241 IDXD_CMDSTS_ERR_PASID_EN,
242 IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
243 IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
244 /* disable device errors */
245 IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
246 /* disable WQ, drain WQ, abort WQ, reset WQ */
247 IDXD_CMDSTS_ERR_DEV_NOT_EN,
248 /* request interrupt handle */
249 IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
250 IDXD_CMDSTS_ERR_NO_HANDLE,
253 #define IDXD_CMDCAP_OFFSET 0xb0
255 #define IDXD_SWERR_OFFSET 0xc0
256 #define IDXD_SWERR_VALID 0x00000001
257 #define IDXD_SWERR_OVERFLOW 0x00000002
258 #define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
278 u64 invalid_flags:32;
289 u64 dec_aecs_format_ver:1;
290 u64 drop_init_bits:1;
292 u64 force_array_output_mod:1;
293 u64 load_part_aecs:1;
294 u64 comp_early_abort:1;
306 #define IDXD_IAACAP_OFFSET 0x180
308 #define IDXD_EVLCFG_OFFSET 0xe0
323 #define IDXD_EVL_SIZE_MIN 0x0040
324 #define IDXD_EVL_SIZE_MAX 0xffff
342 u64 use_rdbuf_limit:1;
343 u64 rdbufs_reserved:8;
345 u64 rdbufs_allowed:8;
347 u64 desc_progress_limit:2;
349 u64 batch_progress_limit:2;
358 union group_flags flags;
372 u32 mode:1; /* shared or dedicated */
373 u32 bof:1; /* block on fault */
374 u32 wq_ats_disable:1;
375 u32 wq_prs_disable:1;
383 u32 max_xfer_shift:5;
384 u32 max_batch_shift:4;
389 u16 occupancy_table_sel:1;
394 u16 occupancy_int_en:1;
413 #define WQCFG_PASID_IDX 2
414 #define WQCFG_PRIVL_IDX 2
415 #define WQCFG_OCCUP_IDX 6
417 #define WQCFG_OCCUP_MASK 0xffff
420 * This macro calculates the offset into the WQCFG register
421 * idxd - struct idxd *
423 * ofs - the index of the 32b dword for the config register
425 * The WQCFG register block is divided into groups per each wq. The n index
426 * allows us to move to the register group that's for that particular wq.
427 * Each register is 32bits. The ofs gives us the number of register to access.
429 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \
431 typeof(_idxd_dev) __idxd_dev = (_idxd_dev); \
432 (__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs); \
435 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
437 #define GRPCFG_SIZE 64
438 #define GRPWQCFG_STRIDES 4
441 * This macro calculates the offset into the GRPCFG register
442 * idxd - struct idxd *
444 * ofs - the index of the 32b dword for the config register
446 * The WQCFG register block is divided into groups per each wq. The n index
447 * allows us to move to the register group that's for that particular wq.
448 * Each register is 32bits. The ofs gives us the number of register to access.
450 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
451 (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
452 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
453 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
455 /* Following is performance monitor registers */
456 #define IDXD_PERFCAP_OFFSET 0x0
459 u64 num_perf_counter:6;
462 u64 num_event_category:4;
463 u64 global_event_category:16;
466 u64 cap_per_counter:1;
467 u64 writeable_counter:1;
468 u64 counter_freeze:1;
469 u64 overflow_interrupt:1;
475 #define IDXD_EVNTCAP_OFFSET 0x80
487 u32 event_category:4;
494 #define IDXD_CNTRCAP_OFFSET 0x800
495 struct idxd_cntrcap {
504 struct idxd_event events[];
507 #define IDXD_PERFRST_OFFSET 0x10
510 u32 perfrst_config:1;
511 u32 perfrst_counter:1;
517 #define IDXD_OVFSTATUS_OFFSET 0x30
518 #define IDXD_PERFFRZ_OFFSET 0x20
519 #define IDXD_CNTRCFG_OFFSET 0x100
524 u64 global_freeze_ovf:1;
526 u64 event_category:4;
534 #define IDXD_FLTCFG_OFFSET 0x300
536 #define IDXD_CNTRDATA_OFFSET 0x200
537 union idxd_cntrdata {
539 u64 event_count_value;
563 #define IDXD_EVLSTATUS_OFFSET 0xf0
565 union evl_status_reg {
581 #define IDXD_MAX_BATCH_IDENT 256
590 u64 err_info_valid:1;
601 /* Invalid Flags 0x11 */
603 /* Invalid Int Handle 0x19 */
604 /* Page fault 0x1a */
605 /* Page fault 0x06, 0x1f, only operand_id */
606 /* Page fault before drain or in batch, 0x26, 0x27 */
612 u16 first_err_in_batch:1;
621 struct dsa_evl_entry {
622 struct __evl_entry e;
623 struct dsa_completion_record cr;
626 struct iax_evl_entry {
627 struct __evl_entry e;
629 struct iax_completion_record cr;