1 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
3 * Copyright (C) 2020-2023 Intel Corporation
6 #ifndef __UAPI_IVPU_DRM_H__
7 #define __UAPI_IVPU_DRM_H__
11 #if defined(__cplusplus)
15 #define DRM_IVPU_DRIVER_MAJOR 1
16 #define DRM_IVPU_DRIVER_MINOR 0
18 #define DRM_IVPU_GET_PARAM 0x00
19 #define DRM_IVPU_SET_PARAM 0x01
20 #define DRM_IVPU_BO_CREATE 0x02
21 #define DRM_IVPU_BO_INFO 0x03
22 #define DRM_IVPU_SUBMIT 0x05
23 #define DRM_IVPU_BO_WAIT 0x06
25 #define DRM_IOCTL_IVPU_GET_PARAM \
26 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
28 #define DRM_IOCTL_IVPU_SET_PARAM \
29 DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
31 #define DRM_IOCTL_IVPU_BO_CREATE \
32 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
34 #define DRM_IOCTL_IVPU_BO_INFO \
35 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
37 #define DRM_IOCTL_IVPU_SUBMIT \
38 DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
40 #define DRM_IOCTL_IVPU_BO_WAIT \
41 DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
46 * VPU contexts have private virtual address space, job queues and priority.
47 * Each context is identified by an unique ID. Context is created on open().
50 #define DRM_IVPU_PARAM_DEVICE_ID 0
51 #define DRM_IVPU_PARAM_DEVICE_REVISION 1
52 #define DRM_IVPU_PARAM_PLATFORM_TYPE 2
53 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
54 #define DRM_IVPU_PARAM_NUM_CONTEXTS 4
55 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
56 #define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
57 #define DRM_IVPU_PARAM_CONTEXT_ID 7
58 #define DRM_IVPU_PARAM_FW_API_VERSION 8
59 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
60 #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
61 #define DRM_IVPU_PARAM_TILE_CONFIG 11
62 #define DRM_IVPU_PARAM_SKU 12
64 #define DRM_IVPU_PLATFORM_TYPE_SILICON 0
66 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
67 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
68 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
69 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
72 * struct drm_ivpu_param - Get/Set VPU parameters
74 struct drm_ivpu_param {
80 * %DRM_IVPU_PARAM_DEVICE_ID:
81 * PCI Device ID of the VPU device (read-only)
83 * %DRM_IVPU_PARAM_DEVICE_REVISION:
84 * VPU device revision (read-only)
86 * %DRM_IVPU_PARAM_PLATFORM_TYPE:
87 * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
88 * platform type when executing on a simulator or emulator (read-only)
90 * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
91 * Current PLL frequency (read-only)
93 * %DRM_IVPU_PARAM_NUM_CONTEXTS:
94 * Maximum number of simultaneously existing contexts (read-only)
96 * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
97 * Lowest VPU virtual address available in the current context (read-only)
99 * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
100 * Value of current context scheduling priority (read-write).
101 * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
103 * %DRM_IVPU_PARAM_CONTEXT_ID:
104 * Current context ID, always greater than 0 (read-only)
106 * %DRM_IVPU_PARAM_FW_API_VERSION:
107 * Firmware API version array (read-only)
109 * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
110 * Heartbeat value from an engine (read-only).
111 * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
113 * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
114 * Device-unique inference ID (read-only)
116 * %DRM_IVPU_PARAM_TILE_CONFIG:
117 * VPU tile configuration (read-only)
119 * %DRM_IVPU_PARAM_SKU:
120 * VPU SKU ID (read-only)
125 /** @index: Index for params that have multiple instances */
128 /** @value: Param value */
132 #define DRM_IVPU_BO_HIGH_MEM 0x00000001
133 #define DRM_IVPU_BO_MAPPABLE 0x00000002
135 #define DRM_IVPU_BO_CACHED 0x00000000
136 #define DRM_IVPU_BO_UNCACHED 0x00010000
137 #define DRM_IVPU_BO_WC 0x00020000
138 #define DRM_IVPU_BO_CACHE_MASK 0x00030000
140 #define DRM_IVPU_BO_FLAGS \
141 (DRM_IVPU_BO_HIGH_MEM | \
142 DRM_IVPU_BO_MAPPABLE | \
143 DRM_IVPU_BO_CACHE_MASK)
146 * struct drm_ivpu_bo_create - Create BO backed by SHMEM
148 * Create GEM buffer object allocated in SHMEM memory.
150 struct drm_ivpu_bo_create {
151 /** @size: The size in bytes of the allocated memory */
159 * %DRM_IVPU_BO_HIGH_MEM:
161 * Allocate VPU address from >4GB range.
162 * Buffer object with vpu address >4GB can be always accessed by the
163 * VPU DMA engine, but some HW generation may not be able to access
164 * this memory from then firmware running on the VPU management processor.
165 * Suitable for input, output and some scratch buffers.
167 * %DRM_IVPU_BO_MAPPABLE:
169 * Buffer object can be mapped using mmap().
171 * %DRM_IVPU_BO_CACHED:
173 * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
174 * This is the default caching mode.
176 * %DRM_IVPU_BO_UNCACHED:
178 * Allocated BO will not be cached on host side nor snooped on the VPU side.
182 * Allocated BO will use write combining buffer for writes but reads will be
187 /** @handle: Returned GEM object handle */
190 /** @vpu_addr: Returned VPU virtual address */
195 * struct drm_ivpu_bo_info - Query buffer object info
197 struct drm_ivpu_bo_info {
198 /** @handle: Handle of the queried BO */
201 /** @flags: Returned flags used to create the BO */
204 /** @vpu_addr: Returned VPU virtual address */
210 * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
214 /** @size: Returned GEM object size, aligned to PAGE_SIZE */
218 /* drm_ivpu_submit engines */
219 #define DRM_IVPU_ENGINE_COMPUTE 0
220 #define DRM_IVPU_ENGINE_COPY 1
223 * struct drm_ivpu_submit - Submit commands to the VPU
225 * Execute a single command buffer on a given VPU engine.
226 * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
228 * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
230 struct drm_ivpu_submit {
234 * A pointer to an u32 array of GEM handles of the BOs required for this job.
235 * The number of elements in the array must be equal to the value given by @buffer_count.
237 * The first BO is the command buffer. The rest of array has to contain all
238 * BOs referenced from the command buffer.
242 /** @buffer_count: Number of elements in the @buffers_ptr */
246 * @engine: Select the engine this job should be executed on
248 * %DRM_IVPU_ENGINE_COMPUTE:
250 * Performs Deep Learning Neural Compute Inference Operations
252 * %DRM_IVPU_ENGINE_COPY:
254 * Performs memory copy operations to/from system memory allocated for VPU
258 /** @flags: Reserved for future use - must be zero */
264 * Offset inside the first buffer in @buffers_ptr containing commands
265 * to be executed. The offset has to be 8-byte aligned.
267 __u32 commands_offset;
270 /* drm_ivpu_bo_wait job status codes */
271 #define DRM_IVPU_JOB_STATUS_SUCCESS 0
274 * struct drm_ivpu_bo_wait - Wait for BO to become inactive
276 * Blocks until a given buffer object becomes inactive.
277 * With @timeout_ms set to 0 returns immediately.
279 struct drm_ivpu_bo_wait {
280 /** @handle: Handle to the buffer object to be waited on */
283 /** @flags: Reserved for future use - must be zero */
286 /** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
292 * Job status code which is updated after the job is completed.
293 * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
294 * Valid only if @handle points to a command buffer.
298 /** @pad: Padding - must be zero */
302 #if defined(__cplusplus)
306 #endif /* __UAPI_IVPU_DRM_H__ */