2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #ifndef _UAPI_I915_DRM_H_
28 #define _UAPI_I915_DRM_H_
32 #if defined(__cplusplus)
36 /* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
41 * DOC: uevents generated by i915 on it's device node
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 * event from the gpu l3 cache. Additional information supplied is ROW,
45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 * track of these events and if a specific cache-line seems to have a
47 * persistent error remap it with the l3 remapping tool supplied in
48 * intel-gpu-tools. The value supplied with the event is always 1.
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 * hangcheck. The error detection event is a good indicator of when things
52 * began to go badly. The value supplied with the event is a 1 upon error
53 * detection, and a 0 upon reset completion, signifying no more error
54 * exists. NOTE: Disabling hangcheck or reset via module parameter will
55 * cause the related events to not be seen.
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 * GPU. The value supplied with the event is always 1. NOTE: Disable
59 * reset via module parameter will cause this event to not be seen.
61 #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
62 #define I915_ERROR_UEVENT "ERROR"
63 #define I915_RESET_UEVENT "RESET"
66 * struct i915_user_extension - Base class for defining a chain of extensions
68 * Many interfaces need to grow over time. In most cases we can simply
69 * extend the struct and have userspace pass in more data. Another option,
70 * as demonstrated by Vulkan's approach to providing extensions for forward
71 * and backward compatibility, is to use a list of optional structs to
72 * provide those extra details.
74 * The key advantage to using an extension chain is that it allows us to
75 * redefine the interface more easily than an ever growing struct of
76 * increasing complexity, and for large parts of that interface to be
77 * entirely optional. The downside is more pointer chasing; chasing across
78 * the __user boundary with pointers encapsulated inside u64.
84 * struct i915_user_extension ext3 {
85 * .next_extension = 0, // end
88 * struct i915_user_extension ext2 {
89 * .next_extension = (uintptr_t)&ext3,
92 * struct i915_user_extension ext1 {
93 * .next_extension = (uintptr_t)&ext2,
97 * Typically the struct i915_user_extension would be embedded in some uAPI
98 * struct, and in this case we would feed it the head of the chain(i.e ext1),
99 * which would then apply all of the above extensions.
102 struct i915_user_extension {
106 * Pointer to the next struct i915_user_extension, or zero if the end.
108 __u64 next_extension;
110 * @name: Name of the extension.
112 * Note that the name here is just some integer.
114 * Also note that the name space for this is not global for the whole
115 * driver, but rather its scope/meaning is limited to the specific piece
116 * of uAPI which has embedded the struct i915_user_extension.
122 * All undefined bits must be zero.
128 * Reserved for future use; must be zero.
134 * MOCS indexes used for GPU surfaces, defining the cacheability of the
135 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
137 enum i915_mocs_table_index {
139 * Not cached anywhere, coherency between CPU and GPU accesses is
144 * Cacheability and coherency controlled by the kernel automatically
145 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
146 * usage of the surface (used for display scanout or not).
150 * Cached in all GPU caches available on the platform.
151 * Coherency between CPU and GPU accesses to the surface is not
152 * guaranteed without extra synchronization.
158 * enum drm_i915_gem_engine_class - uapi engine type enumeration
160 * Different engines serve different roles, and there may be more than one
161 * engine serving each role. This enum provides a classification of the role
162 * of the engine, which may be used when requesting operations to be performed
163 * on a certain subset of engines, or for providing information about that
166 enum drm_i915_gem_engine_class {
168 * @I915_ENGINE_CLASS_RENDER:
170 * Render engines support instructions used for 3D, Compute (GPGPU),
171 * and programmable media workloads. These instructions fetch data and
172 * dispatch individual work items to threads that operate in parallel.
173 * The threads run small programs (called "kernels" or "shaders") on
174 * the GPU's execution units (EUs).
176 I915_ENGINE_CLASS_RENDER = 0,
179 * @I915_ENGINE_CLASS_COPY:
181 * Copy engines (also referred to as "blitters") support instructions
182 * that move blocks of data from one location in memory to another,
183 * or that fill a specified location of memory with fixed data.
184 * Copy engines can perform pre-defined logical or bitwise operations
185 * on the source, destination, or pattern data.
187 I915_ENGINE_CLASS_COPY = 1,
190 * @I915_ENGINE_CLASS_VIDEO:
192 * Video engines (also referred to as "bit stream decode" (BSD) or
193 * "vdbox") support instructions that perform fixed-function media
196 I915_ENGINE_CLASS_VIDEO = 2,
199 * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
201 * Video enhancement engines (also referred to as "vebox") support
202 * instructions related to image enhancement.
204 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
207 * @I915_ENGINE_CLASS_COMPUTE:
209 * Compute engines support a subset of the instructions available
210 * on render engines: compute engines support Compute (GPGPU) and
211 * programmable media workloads, but do not support the 3D pipeline.
213 I915_ENGINE_CLASS_COMPUTE = 4,
215 /* Values in this enum should be kept compact. */
218 * @I915_ENGINE_CLASS_INVALID:
220 * Placeholder value to represent an invalid engine class assignment.
222 I915_ENGINE_CLASS_INVALID = -1
226 * struct i915_engine_class_instance - Engine class/instance identifier
228 * There may be more than one engine fulfilling any role within the system.
229 * Each engine of a class is given a unique instance number and therefore
230 * any engine can be specified by its class:instance tuplet. APIs that allow
231 * access to any engine in the system will use struct i915_engine_class_instance
232 * for this identification.
234 struct i915_engine_class_instance {
238 * Engine class from enum drm_i915_gem_engine_class
241 #define I915_ENGINE_CLASS_INVALID_NONE -1
242 #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
249 __u16 engine_instance;
253 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
257 enum drm_i915_pmu_engine_sample {
258 I915_SAMPLE_BUSY = 0,
259 I915_SAMPLE_WAIT = 1,
263 #define I915_PMU_SAMPLE_BITS (4)
264 #define I915_PMU_SAMPLE_MASK (0xf)
265 #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
266 #define I915_PMU_CLASS_SHIFT \
267 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
269 #define __I915_PMU_ENGINE(class, instance, sample) \
270 ((class) << I915_PMU_CLASS_SHIFT | \
271 (instance) << I915_PMU_SAMPLE_BITS | \
274 #define I915_PMU_ENGINE_BUSY(class, instance) \
275 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
277 #define I915_PMU_ENGINE_WAIT(class, instance) \
278 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
280 #define I915_PMU_ENGINE_SEMA(class, instance) \
281 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
283 #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
285 #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
286 #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
287 #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
288 #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
289 #define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)
291 #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
293 /* Each region is a minimum of 16k, and there are at most 255 of them.
295 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
296 * of chars for next/prev indices */
297 #define I915_LOG_MIN_TEX_REGION_SIZE 14
299 typedef struct _drm_i915_init {
301 I915_INIT_DMA = 0x01,
302 I915_CLEANUP_DMA = 0x02,
303 I915_RESUME_DMA = 0x03
305 unsigned int mmio_offset;
306 int sarea_priv_offset;
307 unsigned int ring_start;
308 unsigned int ring_end;
309 unsigned int ring_size;
310 unsigned int front_offset;
311 unsigned int back_offset;
312 unsigned int depth_offset;
316 unsigned int pitch_bits;
317 unsigned int back_pitch;
318 unsigned int depth_pitch;
320 unsigned int chipset;
323 typedef struct _drm_i915_sarea {
324 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
325 int last_upload; /* last time texture was uploaded */
326 int last_enqueue; /* last time a buffer was enqueued */
327 int last_dispatch; /* age of the most recently dispatched buffer */
328 int ctxOwner; /* last context to upload state */
330 int pf_enabled; /* is pageflipping allowed? */
332 int pf_current_page; /* which buffer is being displayed? */
333 int perf_boxes; /* performance boxes to be displayed */
334 int width, height; /* screen size in pixels */
336 drm_handle_t front_handle;
340 drm_handle_t back_handle;
344 drm_handle_t depth_handle;
348 drm_handle_t tex_handle;
351 int log_tex_granularity;
353 int rotation; /* 0, 90, 180 or 270 */
357 int virtualX, virtualY;
359 unsigned int front_tiled;
360 unsigned int back_tiled;
361 unsigned int depth_tiled;
362 unsigned int rotated_tiled;
363 unsigned int rotated2_tiled;
374 /* fill out some space for old userspace triple buffer */
375 drm_handle_t unused_handle;
376 __u32 unused1, unused2, unused3;
378 /* buffer object handles for static buffers. May change
379 * over the lifetime of the client.
381 __u32 front_bo_handle;
382 __u32 back_bo_handle;
383 __u32 unused_bo_handle;
384 __u32 depth_bo_handle;
388 /* due to userspace building against these headers we need some compat here */
389 #define planeA_x pipeA_x
390 #define planeA_y pipeA_y
391 #define planeA_w pipeA_w
392 #define planeA_h pipeA_h
393 #define planeB_x pipeB_x
394 #define planeB_y pipeB_y
395 #define planeB_w pipeB_w
396 #define planeB_h pipeB_h
398 /* Flags for perf_boxes
400 #define I915_BOX_RING_EMPTY 0x1
401 #define I915_BOX_FLIP 0x2
402 #define I915_BOX_WAIT 0x4
403 #define I915_BOX_TEXTURE_LOAD 0x8
404 #define I915_BOX_LOST_CONTEXT 0x10
407 * i915 specific ioctls.
409 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
410 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
411 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
413 #define DRM_I915_INIT 0x00
414 #define DRM_I915_FLUSH 0x01
415 #define DRM_I915_FLIP 0x02
416 #define DRM_I915_BATCHBUFFER 0x03
417 #define DRM_I915_IRQ_EMIT 0x04
418 #define DRM_I915_IRQ_WAIT 0x05
419 #define DRM_I915_GETPARAM 0x06
420 #define DRM_I915_SETPARAM 0x07
421 #define DRM_I915_ALLOC 0x08
422 #define DRM_I915_FREE 0x09
423 #define DRM_I915_INIT_HEAP 0x0a
424 #define DRM_I915_CMDBUFFER 0x0b
425 #define DRM_I915_DESTROY_HEAP 0x0c
426 #define DRM_I915_SET_VBLANK_PIPE 0x0d
427 #define DRM_I915_GET_VBLANK_PIPE 0x0e
428 #define DRM_I915_VBLANK_SWAP 0x0f
429 #define DRM_I915_HWS_ADDR 0x11
430 #define DRM_I915_GEM_INIT 0x13
431 #define DRM_I915_GEM_EXECBUFFER 0x14
432 #define DRM_I915_GEM_PIN 0x15
433 #define DRM_I915_GEM_UNPIN 0x16
434 #define DRM_I915_GEM_BUSY 0x17
435 #define DRM_I915_GEM_THROTTLE 0x18
436 #define DRM_I915_GEM_ENTERVT 0x19
437 #define DRM_I915_GEM_LEAVEVT 0x1a
438 #define DRM_I915_GEM_CREATE 0x1b
439 #define DRM_I915_GEM_PREAD 0x1c
440 #define DRM_I915_GEM_PWRITE 0x1d
441 #define DRM_I915_GEM_MMAP 0x1e
442 #define DRM_I915_GEM_SET_DOMAIN 0x1f
443 #define DRM_I915_GEM_SW_FINISH 0x20
444 #define DRM_I915_GEM_SET_TILING 0x21
445 #define DRM_I915_GEM_GET_TILING 0x22
446 #define DRM_I915_GEM_GET_APERTURE 0x23
447 #define DRM_I915_GEM_MMAP_GTT 0x24
448 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
449 #define DRM_I915_GEM_MADVISE 0x26
450 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27
451 #define DRM_I915_OVERLAY_ATTRS 0x28
452 #define DRM_I915_GEM_EXECBUFFER2 0x29
453 #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
454 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a
455 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b
456 #define DRM_I915_GEM_WAIT 0x2c
457 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d
458 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
459 #define DRM_I915_GEM_SET_CACHING 0x2f
460 #define DRM_I915_GEM_GET_CACHING 0x30
461 #define DRM_I915_REG_READ 0x31
462 #define DRM_I915_GET_RESET_STATS 0x32
463 #define DRM_I915_GEM_USERPTR 0x33
464 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
465 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
466 #define DRM_I915_PERF_OPEN 0x36
467 #define DRM_I915_PERF_ADD_CONFIG 0x37
468 #define DRM_I915_PERF_REMOVE_CONFIG 0x38
469 #define DRM_I915_QUERY 0x39
470 #define DRM_I915_GEM_VM_CREATE 0x3a
471 #define DRM_I915_GEM_VM_DESTROY 0x3b
472 #define DRM_I915_GEM_CREATE_EXT 0x3c
473 /* Must be kept compact -- no holes */
475 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
476 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
477 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
478 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
479 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
480 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
481 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
482 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
483 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
484 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
485 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
486 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
487 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
488 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
489 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
490 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
491 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
492 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
493 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
494 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
495 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
496 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
497 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
498 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
499 #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
500 #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
501 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
502 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
503 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
504 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
505 #define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
506 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
507 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
508 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
509 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
510 #define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
511 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
512 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
513 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
514 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
515 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
516 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
517 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
518 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
519 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
520 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
521 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
522 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
523 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
524 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
525 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
526 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
527 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
528 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
529 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
530 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
531 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
532 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
533 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
534 #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
535 #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
536 #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
538 /* Allow drivers to submit batchbuffers directly to hardware, relying
539 * on the security mechanisms provided by hardware.
541 typedef struct drm_i915_batchbuffer {
542 int start; /* agp offset */
543 int used; /* nr bytes in use */
544 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
545 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
546 int num_cliprects; /* mulitpass with multiple cliprects? */
547 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
548 } drm_i915_batchbuffer_t;
550 /* As above, but pass a pointer to userspace buffer which can be
551 * validated by the kernel prior to sending to hardware.
553 typedef struct _drm_i915_cmdbuffer {
554 char __user *buf; /* pointer to userspace command buffer */
555 int sz; /* nr bytes in buf */
556 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
557 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
558 int num_cliprects; /* mulitpass with multiple cliprects? */
559 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
560 } drm_i915_cmdbuffer_t;
562 /* Userspace can request & wait on irq's:
564 typedef struct drm_i915_irq_emit {
566 } drm_i915_irq_emit_t;
568 typedef struct drm_i915_irq_wait {
570 } drm_i915_irq_wait_t;
573 * Different modes of per-process Graphics Translation Table,
574 * see I915_PARAM_HAS_ALIASING_PPGTT
576 #define I915_GEM_PPGTT_NONE 0
577 #define I915_GEM_PPGTT_ALIASING 1
578 #define I915_GEM_PPGTT_FULL 2
580 /* Ioctl to query kernel params:
582 #define I915_PARAM_IRQ_ACTIVE 1
583 #define I915_PARAM_ALLOW_BATCHBUFFER 2
584 #define I915_PARAM_LAST_DISPATCH 3
585 #define I915_PARAM_CHIPSET_ID 4
586 #define I915_PARAM_HAS_GEM 5
587 #define I915_PARAM_NUM_FENCES_AVAIL 6
588 #define I915_PARAM_HAS_OVERLAY 7
589 #define I915_PARAM_HAS_PAGEFLIPPING 8
590 #define I915_PARAM_HAS_EXECBUF2 9
591 #define I915_PARAM_HAS_BSD 10
592 #define I915_PARAM_HAS_BLT 11
593 #define I915_PARAM_HAS_RELAXED_FENCING 12
594 #define I915_PARAM_HAS_COHERENT_RINGS 13
595 #define I915_PARAM_HAS_EXEC_CONSTANTS 14
596 #define I915_PARAM_HAS_RELAXED_DELTA 15
597 #define I915_PARAM_HAS_GEN7_SOL_RESET 16
598 #define I915_PARAM_HAS_LLC 17
599 #define I915_PARAM_HAS_ALIASING_PPGTT 18
600 #define I915_PARAM_HAS_WAIT_TIMEOUT 19
601 #define I915_PARAM_HAS_SEMAPHORES 20
602 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
603 #define I915_PARAM_HAS_VEBOX 22
604 #define I915_PARAM_HAS_SECURE_BATCHES 23
605 #define I915_PARAM_HAS_PINNED_BATCHES 24
606 #define I915_PARAM_HAS_EXEC_NO_RELOC 25
607 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
608 #define I915_PARAM_HAS_WT 27
609 #define I915_PARAM_CMD_PARSER_VERSION 28
610 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
611 #define I915_PARAM_MMAP_VERSION 30
612 #define I915_PARAM_HAS_BSD2 31
613 #define I915_PARAM_REVISION 32
614 #define I915_PARAM_SUBSLICE_TOTAL 33
615 #define I915_PARAM_EU_TOTAL 34
616 #define I915_PARAM_HAS_GPU_RESET 35
617 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
618 #define I915_PARAM_HAS_EXEC_SOFTPIN 37
619 #define I915_PARAM_HAS_POOLED_EU 38
620 #define I915_PARAM_MIN_EU_IN_POOL 39
621 #define I915_PARAM_MMAP_GTT_VERSION 40
624 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
625 * priorities and the driver will attempt to execute batches in priority order.
626 * The param returns a capability bitmask, nonzero implies that the scheduler
627 * is enabled, with different features present according to the mask.
629 * The initial priority for each batch is supplied by the context and is
630 * controlled via I915_CONTEXT_PARAM_PRIORITY.
632 #define I915_PARAM_HAS_SCHEDULER 41
633 #define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
634 #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
635 #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
636 #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
637 #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
639 * Indicates the 2k user priority levels are statically mapped into 3 buckets as
642 * -1k to -1 Low priority
644 * 1 to 1k Highest priority
646 #define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
649 * Query the status of HuC load.
651 * The query can fail in the following scenarios with the listed error codes:
652 * -ENODEV if HuC is not present on this platform,
653 * -EOPNOTSUPP if HuC firmware usage is disabled,
654 * -ENOPKG if HuC firmware fetch failed,
655 * -ENOEXEC if HuC firmware is invalid or mismatched,
656 * -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC,
657 * -EIO if the FW transfer or the FW authentication failed.
659 * If the IOCTL is successful, the returned parameter will be set to one of the
661 * * 0 if HuC firmware load is not complete,
662 * * 1 if HuC firmware is authenticated and running.
664 #define I915_PARAM_HUC_STATUS 42
666 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
667 * synchronisation with implicit fencing on individual objects.
668 * See EXEC_OBJECT_ASYNC.
670 #define I915_PARAM_HAS_EXEC_ASYNC 43
672 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
673 * both being able to pass in a sync_file fd to wait upon before executing,
674 * and being able to return a new sync_file fd that is signaled when the
675 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
677 #define I915_PARAM_HAS_EXEC_FENCE 44
679 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
680 * user specified bufffers for post-mortem debugging of GPU hangs. See
681 * EXEC_OBJECT_CAPTURE.
683 #define I915_PARAM_HAS_EXEC_CAPTURE 45
685 #define I915_PARAM_SLICE_MASK 46
687 /* Assuming it's uniform for each slice, this queries the mask of subslices
688 * per-slice for this system.
690 #define I915_PARAM_SUBSLICE_MASK 47
693 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
694 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
696 #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
698 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
699 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
701 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
704 * Query whether every context (both per-file default and user created) is
705 * isolated (insofar as HW supports). If this parameter is not true, then
706 * freshly created contexts may inherit values from an existing context,
707 * rather than default HW values. If true, it also ensures (insofar as HW
708 * supports) that all state set by this context will not leak to any other
711 * As not every engine across every gen support contexts, the returned
712 * value reports the support of context isolation for individual engines by
713 * returning a bitmask of each engine class set to true if that class supports
716 #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
718 /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
719 * registers. This used to be fixed per platform but from CNL onwards, this
720 * might vary depending on the parts.
722 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
725 * Once upon a time we supposed that writes through the GGTT would be
726 * immediately in physical memory (once flushed out of the CPU path). However,
727 * on a few different processors and chipsets, this is not necessarily the case
728 * as the writes appear to be buffered internally. Thus a read of the backing
729 * storage (physical memory) via a different path (with different physical tags
730 * to the indirect write via the GGTT) will see stale values from before
731 * the GGTT write. Inside the kernel, we can for the most part keep track of
732 * the different read/write domains in use (e.g. set-domain), but the assumption
733 * of coherency is baked into the ABI, hence reporting its true state in this
736 * Reports true when writes via mmap_gtt are immediately visible following an
737 * lfence to flush the WCB.
739 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
740 * internal buffer and are _not_ immediately visible to third parties accessing
741 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
742 * communications channel when reporting false is strongly disadvised.
744 #define I915_PARAM_MMAP_GTT_COHERENT 52
747 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
748 * execution through use of explicit fence support.
749 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
751 #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
754 * Revision of the i915-perf uAPI. The value returned helps determine what
755 * i915-perf features are available. See drm_i915_perf_property_id.
757 #define I915_PARAM_PERF_REVISION 54
759 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
760 * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
761 * I915_EXEC_USE_EXTENSIONS.
763 #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
765 /* Query if the kernel supports the I915_USERPTR_PROBE flag. */
766 #define I915_PARAM_HAS_USERPTR_PROBE 56
769 * Frequency of the timestamps in OA reports. This used to be the same as the CS
770 * timestamp frequency, but differs on some platforms.
772 #define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
774 /* Must be kept compact -- no holes and well documented */
777 * struct drm_i915_getparam - Driver parameter query structure.
779 struct drm_i915_getparam {
780 /** @param: Driver parameter to query. */
784 * @value: Address of memory where queried value should be put.
786 * WARNING: Using pointers instead of fixed-size u64 means we need to write
787 * compat32 code. Don't repeat this mistake.
793 * typedef drm_i915_getparam_t - Driver parameter query structure.
794 * See struct drm_i915_getparam.
796 typedef struct drm_i915_getparam drm_i915_getparam_t;
798 /* Ioctl to set kernel params:
800 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
801 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
802 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3
803 #define I915_SETPARAM_NUM_USED_FENCES 4
804 /* Must be kept compact -- no holes */
806 typedef struct drm_i915_setparam {
809 } drm_i915_setparam_t;
811 /* A memory manager for regions of shared memory:
813 #define I915_MEM_REGION_AGP 1
815 typedef struct drm_i915_mem_alloc {
819 int __user *region_offset; /* offset from start of fb or agp */
820 } drm_i915_mem_alloc_t;
822 typedef struct drm_i915_mem_free {
825 } drm_i915_mem_free_t;
827 typedef struct drm_i915_mem_init_heap {
831 } drm_i915_mem_init_heap_t;
833 /* Allow memory manager to be torn down and re-initialized (eg on
836 typedef struct drm_i915_mem_destroy_heap {
838 } drm_i915_mem_destroy_heap_t;
840 /* Allow X server to configure which pipes to monitor for vblank signals
842 #define DRM_I915_VBLANK_PIPE_A 1
843 #define DRM_I915_VBLANK_PIPE_B 2
845 typedef struct drm_i915_vblank_pipe {
847 } drm_i915_vblank_pipe_t;
849 /* Schedule buffer swap at given vertical blank:
851 typedef struct drm_i915_vblank_swap {
852 drm_drawable_t drawable;
853 enum drm_vblank_seq_type seqtype;
854 unsigned int sequence;
855 } drm_i915_vblank_swap_t;
857 typedef struct drm_i915_hws_addr {
859 } drm_i915_hws_addr_t;
861 struct drm_i915_gem_init {
863 * Beginning offset in the GTT to be managed by the DRM memory
868 * Ending offset in the GTT to be managed by the DRM memory
874 struct drm_i915_gem_create {
876 * Requested size for the object.
878 * The (page-aligned) allocated size for the object will be returned.
882 * Returned handle for the object.
884 * Object handles are nonzero.
890 struct drm_i915_gem_pread {
891 /** Handle for the object being read. */
894 /** Offset into the object to read from */
896 /** Length of data to read */
899 * Pointer to write the data into.
901 * This is a fixed-size type for 32/64 compatibility.
906 struct drm_i915_gem_pwrite {
907 /** Handle for the object being written to. */
910 /** Offset into the object to write to */
912 /** Length of data to write */
915 * Pointer to read the data from.
917 * This is a fixed-size type for 32/64 compatibility.
922 struct drm_i915_gem_mmap {
923 /** Handle for the object being mapped. */
926 /** Offset in the object to map. */
929 * Length of data to map.
931 * The value will be page-aligned.
935 * Returned pointer the data was mapped at.
937 * This is a fixed-size type for 32/64 compatibility.
942 * Flags for extended behaviour.
944 * Added in version 2.
947 #define I915_MMAP_WC 0x1
950 struct drm_i915_gem_mmap_gtt {
951 /** Handle for the object being mapped. */
955 * Fake offset to use for subsequent mmap call
957 * This is a fixed-size type for 32/64 compatibility.
963 * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
965 * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
966 * and is used to retrieve the fake offset to mmap an object specified by &handle.
968 * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
969 * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
970 * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
972 struct drm_i915_gem_mmap_offset {
973 /** @handle: Handle for the object being mapped. */
975 /** @pad: Must be zero */
978 * @offset: The fake offset to use for subsequent mmap call
980 * This is a fixed-size type for 32/64 compatibility.
985 * @flags: Flags for extended behaviour.
987 * It is mandatory that one of the `MMAP_OFFSET` types
988 * should be included:
990 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
991 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
992 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
993 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
995 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
996 * type. On devices without local memory, this caching mode is invalid.
998 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
999 * be used, depending on the object placement on creation. WB will be used
1000 * when the object can only exist in system memory, WC otherwise.
1004 #define I915_MMAP_OFFSET_GTT 0
1005 #define I915_MMAP_OFFSET_WC 1
1006 #define I915_MMAP_OFFSET_WB 2
1007 #define I915_MMAP_OFFSET_UC 3
1008 #define I915_MMAP_OFFSET_FIXED 4
1011 * @extensions: Zero-terminated chain of extensions.
1013 * No current extensions defined; mbz.
1019 * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
1020 * preparation for accessing the pages via some CPU domain.
1022 * Specifying a new write or read domain will flush the object out of the
1023 * previous domain(if required), before then updating the objects domain
1024 * tracking with the new domain.
1026 * Note this might involve waiting for the object first if it is still active on
1029 * Supported values for @read_domains and @write_domain:
1031 * - I915_GEM_DOMAIN_WC: Uncached write-combined domain
1032 * - I915_GEM_DOMAIN_CPU: CPU cache domain
1033 * - I915_GEM_DOMAIN_GTT: Mappable aperture domain
1035 * All other domains are rejected.
1037 * Note that for discrete, starting from DG1, this is no longer supported, and
1038 * is instead rejected. On such platforms the CPU domain is effectively static,
1039 * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
1040 * which can't be set explicitly and instead depends on the object placements,
1043 * Implicit caching rules, starting from DG1:
1045 * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1046 * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1047 * mapped as write-combined only.
1049 * - Everything else is always allocated and mapped as write-back, with the
1050 * guarantee that everything is also coherent with the GPU.
1052 * Note that this is likely to change in the future again, where we might need
1053 * more flexibility on future devices, so making this all explicit as part of a
1054 * new &drm_i915_gem_create_ext extension is probable.
1056 struct drm_i915_gem_set_domain {
1057 /** @handle: Handle for the object. */
1060 /** @read_domains: New read domains. */
1064 * @write_domain: New write domain.
1066 * Note that having something in the write domain implies it's in the
1067 * read domain, and only that read domain.
1072 struct drm_i915_gem_sw_finish {
1073 /** Handle for the object */
1077 struct drm_i915_gem_relocation_entry {
1079 * Handle of the buffer being pointed to by this relocation entry.
1081 * It's appealing to make this be an index into the mm_validate_entry
1082 * list to refer to the buffer, but this allows the driver to create
1083 * a relocation list for state buffers and not re-write it per
1084 * exec using the buffer.
1086 __u32 target_handle;
1089 * Value to be added to the offset of the target buffer to make up
1090 * the relocation entry.
1094 /** Offset in the buffer the relocation entry will be written into */
1098 * Offset value of the target buffer that the relocation entry was last
1101 * If the buffer has the same offset as last time, we can skip syncing
1102 * and writing the relocation. This value is written back out by
1103 * the execbuffer ioctl when the relocation is written.
1105 __u64 presumed_offset;
1108 * Target memory domains read by this operation.
1113 * Target memory domains written by this operation.
1115 * Note that only one domain may be written by the whole
1116 * execbuffer operation, so that where there are conflicts,
1117 * the application will get -EINVAL back.
1123 * Intel memory domains
1125 * Most of these just align with the various caches in
1126 * the system and are used to flush and invalidate as
1127 * objects end up cached in different domains.
1130 #define I915_GEM_DOMAIN_CPU 0x00000001
1131 /** Render cache, used by 2D and 3D drawing */
1132 #define I915_GEM_DOMAIN_RENDER 0x00000002
1133 /** Sampler cache, used by texture engine */
1134 #define I915_GEM_DOMAIN_SAMPLER 0x00000004
1135 /** Command queue, used to load batch buffers */
1136 #define I915_GEM_DOMAIN_COMMAND 0x00000008
1137 /** Instruction cache, used by shader programs */
1138 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
1139 /** Vertex address cache */
1140 #define I915_GEM_DOMAIN_VERTEX 0x00000020
1141 /** GTT domain - aperture and scanout */
1142 #define I915_GEM_DOMAIN_GTT 0x00000040
1143 /** WC domain - uncached access */
1144 #define I915_GEM_DOMAIN_WC 0x00000080
1147 struct drm_i915_gem_exec_object {
1149 * User's handle for a buffer to be bound into the GTT for this
1154 /** Number of relocations to be performed on this buffer */
1155 __u32 relocation_count;
1157 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1158 * the relocations to be performed in this buffer.
1162 /** Required alignment in graphics aperture */
1166 * Returned value of the updated offset of the object, for future
1167 * presumed_offset writes.
1172 /* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
1173 struct drm_i915_gem_execbuffer {
1175 * List of buffers to be validated with their relocations to be
1176 * performend on them.
1178 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
1180 * These buffers must be listed in an order such that all relocations
1181 * a buffer is performing refer to buffers that have already appeared
1182 * in the validate list.
1187 /** Offset in the batchbuffer to start execution from. */
1188 __u32 batch_start_offset;
1189 /** Bytes used in batchbuffer from batch_start_offset */
1193 __u32 num_cliprects;
1194 /** This is a struct drm_clip_rect *cliprects */
1195 __u64 cliprects_ptr;
1198 struct drm_i915_gem_exec_object2 {
1200 * User's handle for a buffer to be bound into the GTT for this
1205 /** Number of relocations to be performed on this buffer */
1206 __u32 relocation_count;
1208 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1209 * the relocations to be performed in this buffer.
1213 /** Required alignment in graphics aperture */
1217 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
1218 * the user with the GTT offset at which this object will be pinned.
1220 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
1221 * presumed_offset of the object.
1223 * During execbuffer2 the kernel populates it with the value of the
1224 * current GTT offset of the object, for future presumed_offset writes.
1226 * See struct drm_i915_gem_create_ext for the rules when dealing with
1227 * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
1228 * minimum page sizes, like DG2.
1232 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
1233 #define EXEC_OBJECT_NEEDS_GTT (1<<1)
1234 #define EXEC_OBJECT_WRITE (1<<2)
1235 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1236 #define EXEC_OBJECT_PINNED (1<<4)
1237 #define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
1238 /* The kernel implicitly tracks GPU activity on all GEM objects, and
1239 * synchronises operations with outstanding rendering. This includes
1240 * rendering on other devices if exported via dma-buf. However, sometimes
1241 * this tracking is too coarse and the user knows better. For example,
1242 * if the object is split into non-overlapping ranges shared between different
1243 * clients or engines (i.e. suballocating objects), the implicit tracking
1244 * by kernel assumes that each operation affects the whole object rather
1245 * than an individual range, causing needless synchronisation between clients.
1246 * The kernel will also forgo any CPU cache flushes prior to rendering from
1247 * the object as the client is expected to be also handling such domain
1250 * The kernel maintains the implicit tracking in order to manage resources
1251 * used by the GPU - this flag only disables the synchronisation prior to
1252 * rendering with this object in this execbuf.
1254 * Opting out of implicit synhronisation requires the user to do its own
1255 * explicit tracking to avoid rendering corruption. See, for example,
1256 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
1258 #define EXEC_OBJECT_ASYNC (1<<6)
1259 /* Request that the contents of this execobject be copied into the error
1260 * state upon a GPU hang involving this batch for post-mortem debugging.
1261 * These buffers are recorded in no particular order as "user" in
1262 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1263 * if the kernel supports this flag.
1265 #define EXEC_OBJECT_CAPTURE (1<<7)
1266 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1267 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1278 * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
1281 * The request will wait for input fence to signal before submission.
1283 * The returned output fence will be signaled after the completion of the
1286 struct drm_i915_gem_exec_fence {
1287 /** @handle: User's handle for a drm_syncobj to wait on or signal. */
1291 * @flags: Supported flags are:
1293 * I915_EXEC_FENCE_WAIT:
1294 * Wait for the input fence before request submission.
1296 * I915_EXEC_FENCE_SIGNAL:
1297 * Return request completion fence as output
1300 #define I915_EXEC_FENCE_WAIT (1<<0)
1301 #define I915_EXEC_FENCE_SIGNAL (1<<1)
1302 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1306 * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
1307 * for execbuf ioctl.
1309 * This structure describes an array of drm_syncobj and associated points for
1310 * timeline variants of drm_syncobj. It is invalid to append this structure to
1311 * the execbuf if I915_EXEC_FENCE_ARRAY is set.
1313 struct drm_i915_gem_execbuffer_ext_timeline_fences {
1314 #define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
1315 /** @base: Extension link. See struct i915_user_extension. */
1316 struct i915_user_extension base;
1319 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
1325 * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
1326 * of length @fence_count.
1331 * @values_ptr: Pointer to an array of u64 values of length
1333 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
1334 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
1341 * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
1344 struct drm_i915_gem_execbuffer2 {
1345 /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
1348 /** @buffer_count: Number of elements in @buffers_ptr array */
1352 * @batch_start_offset: Offset in the batchbuffer to start execution
1355 __u32 batch_start_offset;
1358 * @batch_len: Length in bytes of the batch buffer, starting from the
1359 * @batch_start_offset. If 0, length is assumed to be the batch buffer
1364 /** @DR1: deprecated */
1367 /** @DR4: deprecated */
1370 /** @num_cliprects: See @cliprects_ptr */
1371 __u32 num_cliprects;
1374 * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
1376 * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
1377 * I915_EXEC_USE_EXTENSIONS flags are not set.
1379 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1380 * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
1383 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1384 * single &i915_user_extension and num_cliprects is 0.
1386 __u64 cliprects_ptr;
1388 /** @flags: Execbuf flags */
1390 #define I915_EXEC_RING_MASK (0x3f)
1391 #define I915_EXEC_DEFAULT (0<<0)
1392 #define I915_EXEC_RENDER (1<<0)
1393 #define I915_EXEC_BSD (2<<0)
1394 #define I915_EXEC_BLT (3<<0)
1395 #define I915_EXEC_VEBOX (4<<0)
1397 /* Used for switching the constants addressing mode on gen4+ RENDER ring.
1398 * Gen6+ only supports relative addressing to dynamic state (default) and
1399 * absolute addressing.
1401 * These flags are ignored for the BSD and BLT rings.
1403 #define I915_EXEC_CONSTANTS_MASK (3<<6)
1404 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1405 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
1406 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1408 /** Resets the SO write offset registers for transform feedback on gen7. */
1409 #define I915_EXEC_GEN7_SOL_RESET (1<<8)
1411 /** Request a privileged ("secure") batch buffer. Note only available for
1412 * DRM_ROOT_ONLY | DRM_MASTER processes.
1414 #define I915_EXEC_SECURE (1<<9)
1416 /** Inform the kernel that the batch is and will always be pinned. This
1417 * negates the requirement for a workaround to be performed to avoid
1418 * an incoherent CS (such as can be found on 830/845). If this flag is
1419 * not passed, the kernel will endeavour to make sure the batch is
1420 * coherent with the CS before execution. If this flag is passed,
1421 * userspace assumes the responsibility for ensuring the same.
1423 #define I915_EXEC_IS_PINNED (1<<10)
1425 /** Provide a hint to the kernel that the command stream and auxiliary
1426 * state buffers already holds the correct presumed addresses and so the
1427 * relocation process may be skipped if no buffers need to be moved in
1428 * preparation for the execbuffer.
1430 #define I915_EXEC_NO_RELOC (1<<11)
1432 /** Use the reloc.handle as an index into the exec object array rather
1433 * than as the per-file handle.
1435 #define I915_EXEC_HANDLE_LUT (1<<12)
1437 /** Used for switching BSD rings on the platforms with two BSD rings */
1438 #define I915_EXEC_BSD_SHIFT (13)
1439 #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
1440 /* default ping-pong mode */
1441 #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
1442 #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
1443 #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
1445 /** Tell the kernel that the batchbuffer is processed by
1446 * the resource streamer.
1448 #define I915_EXEC_RESOURCE_STREAMER (1<<15)
1450 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1451 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1454 * Returns -EINVAL if the sync_file fd cannot be found.
1456 #define I915_EXEC_FENCE_IN (1<<16)
1458 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1459 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1460 * to the caller, and it should be close() after use. (The fd is a regular
1461 * file descriptor and will be cleaned up on process termination. It holds
1462 * a reference to the request, but nothing else.)
1464 * The sync_file fd can be combined with other sync_file and passed either
1465 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1466 * will only occur after this request completes), or to other devices.
1468 * Using I915_EXEC_FENCE_OUT requires use of
1469 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1470 * back to userspace. Failure to do so will cause the out-fence to always
1471 * be reported as zero, and the real fence fd to be leaked.
1473 #define I915_EXEC_FENCE_OUT (1<<17)
1476 * Traditionally the execbuf ioctl has only considered the final element in
1477 * the execobject[] to be the executable batch. Often though, the client
1478 * will known the batch object prior to construction and being able to place
1479 * it into the execobject[] array first can simplify the relocation tracking.
1480 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1481 * execobject[] as the * batch instead (the default is to use the last
1484 #define I915_EXEC_BATCH_FIRST (1<<18)
1486 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1487 * define an array of i915_gem_exec_fence structures which specify a set of
1488 * dma fences to wait upon or signal.
1490 #define I915_EXEC_FENCE_ARRAY (1<<19)
1493 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1494 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1497 * Returns -EINVAL if the sync_file fd cannot be found.
1499 #define I915_EXEC_FENCE_SUBMIT (1 << 20)
1502 * Setting I915_EXEC_USE_EXTENSIONS implies that
1503 * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1504 * list of i915_user_extension. Each i915_user_extension node is the base of a
1505 * larger structure. The list of supported structures are listed in the
1506 * drm_i915_gem_execbuffer_ext enum.
1508 #define I915_EXEC_USE_EXTENSIONS (1 << 21)
1509 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1511 /** @rsvd1: Context id */
1515 * @rsvd2: in and out sync_file file descriptors.
1517 * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
1518 * lower 32 bits of this field will have the in sync_file fd (input).
1520 * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
1521 * field will have the out sync_file fd (output).
1526 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
1527 #define i915_execbuffer2_set_context_id(eb2, context) \
1528 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1529 #define i915_execbuffer2_get_context_id(eb2) \
1530 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1532 struct drm_i915_gem_pin {
1533 /** Handle of the buffer to be pinned. */
1537 /** alignment required within the aperture */
1540 /** Returned GTT offset of the buffer. */
1544 struct drm_i915_gem_unpin {
1545 /** Handle of the buffer to be unpinned. */
1550 struct drm_i915_gem_busy {
1551 /** Handle of the buffer to check for busy */
1554 /** Return busy status
1556 * A return of 0 implies that the object is idle (after
1557 * having flushed any pending activity), and a non-zero return that
1558 * the object is still in-flight on the GPU. (The GPU has not yet
1559 * signaled completion for all pending requests that reference the
1560 * object.) An object is guaranteed to become idle eventually (so
1561 * long as no new GPU commands are executed upon it). Due to the
1562 * asynchronous nature of the hardware, an object reported
1563 * as busy may become idle before the ioctl is completed.
1565 * Furthermore, if the object is busy, which engine is busy is only
1566 * provided as a guide and only indirectly by reporting its class
1567 * (there may be more than one engine in each class). There are race
1568 * conditions which prevent the report of which engines are busy from
1569 * being always accurate. However, the converse is not true. If the
1570 * object is idle, the result of the ioctl, that all engines are idle,
1573 * The returned dword is split into two fields to indicate both
1574 * the engine classess on which the object is being read, and the
1575 * engine class on which it is currently being written (if any).
1577 * The low word (bits 0:15) indicate if the object is being written
1578 * to by any engine (there can only be one, as the GEM implicit
1579 * synchronisation rules force writes to be serialised). Only the
1580 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1581 * 1 not 0 etc) for the last write is reported.
1583 * The high word (bits 16:31) are a bitmask of which engines classes
1584 * are currently reading from the object. Multiple engines may be
1585 * reading from the object simultaneously.
1587 * The value of each engine class is the same as specified in the
1588 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
1589 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1590 * Some hardware may have parallel execution engines, e.g. multiple
1591 * media engines, which are mapped to the same class identifier and so
1592 * are not separately reported for busyness.
1595 * Only the boolean result of this query is reliable; that is whether
1596 * the object is idle or busy. The report of which engines are busy
1597 * should be only used as a heuristic.
1603 * struct drm_i915_gem_caching - Set or get the caching for given object
1606 * Allow userspace to control the GTT caching bits for a given object when the
1607 * object is later mapped through the ppGTT(or GGTT on older platforms lacking
1608 * ppGTT support, or if the object is used for scanout). Note that this might
1609 * require unbinding the object from the GTT first, if its current caching value
1612 * Note that this all changes on discrete platforms, starting from DG1, the
1613 * set/get caching is no longer supported, and is now rejected. Instead the CPU
1614 * caching attributes(WB vs WC) will become an immutable creation time property
1615 * for the object, along with the GTT caching level. For now we don't expose any
1616 * new uAPI for this, instead on DG1 this is all implicit, although this largely
1617 * shouldn't matter since DG1 is coherent by default(without any way of
1620 * Implicit caching rules, starting from DG1:
1622 * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1623 * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1624 * mapped as write-combined only.
1626 * - Everything else is always allocated and mapped as write-back, with the
1627 * guarantee that everything is also coherent with the GPU.
1629 * Note that this is likely to change in the future again, where we might need
1630 * more flexibility on future devices, so making this all explicit as part of a
1631 * new &drm_i915_gem_create_ext extension is probable.
1633 * Side note: Part of the reason for this is that changing the at-allocation-time CPU
1634 * caching attributes for the pages might be required(and is expensive) if we
1635 * need to then CPU map the pages later with different caching attributes. This
1636 * inconsistent caching behaviour, while supported on x86, is not universally
1637 * supported on other architectures. So for simplicity we opt for setting
1638 * everything at creation time, whilst also making it immutable, on discrete
1641 struct drm_i915_gem_caching {
1643 * @handle: Handle of the buffer to set/get the caching level.
1648 * @caching: The GTT caching level to apply or possible return value.
1650 * The supported @caching values:
1652 * I915_CACHING_NONE:
1654 * GPU access is not coherent with CPU caches. Default for machines
1655 * without an LLC. This means manual flushing might be needed, if we
1656 * want GPU access to be coherent.
1658 * I915_CACHING_CACHED:
1660 * GPU access is coherent with CPU caches and furthermore the data is
1661 * cached in last-level caches shared between CPU cores and the GPU GT.
1663 * I915_CACHING_DISPLAY:
1665 * Special GPU caching mode which is coherent with the scanout engines.
1666 * Transparently falls back to I915_CACHING_NONE on platforms where no
1667 * special cache mode (like write-through or gfdt flushing) is
1668 * available. The kernel automatically sets this mode when using a
1669 * buffer as a scanout target. Userspace can manually set this mode to
1670 * avoid a costly stall and clflush in the hotpath of drawing the first
1673 #define I915_CACHING_NONE 0
1674 #define I915_CACHING_CACHED 1
1675 #define I915_CACHING_DISPLAY 2
1679 #define I915_TILING_NONE 0
1680 #define I915_TILING_X 1
1681 #define I915_TILING_Y 2
1683 * Do not add new tiling types here. The I915_TILING_* values are for
1684 * de-tiling fence registers that no longer exist on modern platforms. Although
1685 * the hardware may support new types of tiling in general (e.g., Tile4), we
1686 * do not need to add them to the uapi that is specific to now-defunct ioctls.
1688 #define I915_TILING_LAST I915_TILING_Y
1690 #define I915_BIT_6_SWIZZLE_NONE 0
1691 #define I915_BIT_6_SWIZZLE_9 1
1692 #define I915_BIT_6_SWIZZLE_9_10 2
1693 #define I915_BIT_6_SWIZZLE_9_11 3
1694 #define I915_BIT_6_SWIZZLE_9_10_11 4
1695 /* Not seen by userland */
1696 #define I915_BIT_6_SWIZZLE_UNKNOWN 5
1697 /* Seen by userland. */
1698 #define I915_BIT_6_SWIZZLE_9_17 6
1699 #define I915_BIT_6_SWIZZLE_9_10_17 7
1701 struct drm_i915_gem_set_tiling {
1702 /** Handle of the buffer to have its tiling state updated */
1706 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1709 * This value is to be set on request, and will be updated by the
1710 * kernel on successful return with the actual chosen tiling layout.
1712 * The tiling mode may be demoted to I915_TILING_NONE when the system
1713 * has bit 6 swizzling that can't be managed correctly by GEM.
1715 * Buffer contents become undefined when changing tiling_mode.
1720 * Stride in bytes for the object when in I915_TILING_X or
1726 * Returned address bit 6 swizzling required for CPU access through
1732 struct drm_i915_gem_get_tiling {
1733 /** Handle of the buffer to get tiling state for. */
1737 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1743 * Returned address bit 6 swizzling required for CPU access through
1749 * Returned address bit 6 swizzling required for CPU access through
1750 * mmap mapping whilst bound.
1752 __u32 phys_swizzle_mode;
1755 struct drm_i915_gem_get_aperture {
1756 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1760 * Available space in the aperture used by i915_gem_execbuffer, in
1763 __u64 aper_available_size;
1766 struct drm_i915_get_pipe_from_crtc_id {
1767 /** ID of CRTC being requested **/
1770 /** pipe of requested CRTC **/
1774 #define I915_MADV_WILLNEED 0
1775 #define I915_MADV_DONTNEED 1
1776 #define __I915_MADV_PURGED 2 /* internal state */
1778 struct drm_i915_gem_madvise {
1779 /** Handle of the buffer to change the backing store advice */
1782 /* Advice: either the buffer will be needed again in the near future,
1783 * or wont be and could be discarded under memory pressure.
1787 /** Whether the backing store still exists. */
1792 #define I915_OVERLAY_TYPE_MASK 0xff
1793 #define I915_OVERLAY_YUV_PLANAR 0x01
1794 #define I915_OVERLAY_YUV_PACKED 0x02
1795 #define I915_OVERLAY_RGB 0x03
1797 #define I915_OVERLAY_DEPTH_MASK 0xff00
1798 #define I915_OVERLAY_RGB24 0x1000
1799 #define I915_OVERLAY_RGB16 0x2000
1800 #define I915_OVERLAY_RGB15 0x3000
1801 #define I915_OVERLAY_YUV422 0x0100
1802 #define I915_OVERLAY_YUV411 0x0200
1803 #define I915_OVERLAY_YUV420 0x0300
1804 #define I915_OVERLAY_YUV410 0x0400
1806 #define I915_OVERLAY_SWAP_MASK 0xff0000
1807 #define I915_OVERLAY_NO_SWAP 0x000000
1808 #define I915_OVERLAY_UV_SWAP 0x010000
1809 #define I915_OVERLAY_Y_SWAP 0x020000
1810 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
1812 #define I915_OVERLAY_FLAGS_MASK 0xff000000
1813 #define I915_OVERLAY_ENABLE 0x01000000
1815 struct drm_intel_overlay_put_image {
1816 /* various flags and src format description */
1818 /* source picture description */
1820 /* stride values and offsets are in bytes, buffer relative */
1821 __u16 stride_Y; /* stride for packed formats */
1823 __u32 offset_Y; /* offset for packet formats */
1829 /* to compensate the scaling factors for partially covered surfaces */
1830 __u16 src_scan_width;
1831 __u16 src_scan_height;
1832 /* output crtc description */
1841 #define I915_OVERLAY_UPDATE_ATTRS (1<<0)
1842 #define I915_OVERLAY_UPDATE_GAMMA (1<<1)
1843 #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
1844 struct drm_intel_overlay_attrs {
1859 * Intel sprite handling
1861 * Color keying works with a min/mask/max tuple. Both source and destination
1862 * color keying is allowed.
1865 * Sprite pixels within the min & max values, masked against the color channels
1866 * specified in the mask field, will be transparent. All other pixels will
1867 * be displayed on top of the primary plane. For RGB surfaces, only the min
1868 * and mask fields will be used; ranged compares are not allowed.
1870 * Destination keying:
1871 * Primary plane pixels that match the min value, masked against the color
1872 * channels specified in the mask field, will be replaced by corresponding
1873 * pixels from the sprite plane.
1875 * Note that source & destination keying are exclusive; only one can be
1876 * active on a given plane.
1879 #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1880 * flags==0 to disable colorkeying.
1882 #define I915_SET_COLORKEY_DESTINATION (1<<1)
1883 #define I915_SET_COLORKEY_SOURCE (1<<2)
1884 struct drm_intel_sprite_colorkey {
1892 struct drm_i915_gem_wait {
1893 /** Handle of BO we shall wait on */
1896 /** Number of nanoseconds to wait, Returns time remaining. */
1900 struct drm_i915_gem_context_create {
1901 __u32 ctx_id; /* output: id of new context*/
1906 * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
1908 struct drm_i915_gem_context_create_ext {
1909 /** @ctx_id: Id of the created context (output) */
1913 * @flags: Supported flags are:
1915 * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
1917 * Extensions may be appended to this structure and driver must check
1918 * for those. See @extensions.
1920 * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
1922 * Created context will have single timeline.
1925 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
1926 #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
1927 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1928 (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1931 * @extensions: Zero-terminated chain of extensions.
1933 * I915_CONTEXT_CREATE_EXT_SETPARAM:
1934 * Context parameter to set or query during context creation.
1935 * See struct drm_i915_gem_context_create_ext_setparam.
1937 * I915_CONTEXT_CREATE_EXT_CLONE:
1938 * This extension has been removed. On the off chance someone somewhere
1939 * has attempted to use it, never re-use this extension number.
1942 #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1943 #define I915_CONTEXT_CREATE_EXT_CLONE 1
1947 * struct drm_i915_gem_context_param - Context parameter to set or query.
1949 struct drm_i915_gem_context_param {
1950 /** @ctx_id: Context id */
1953 /** @size: Size of the parameter @value */
1956 /** @param: Parameter to set or query */
1958 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1959 /* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
1960 * someone somewhere has attempted to use it, never re-use this context
1963 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1964 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1965 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1966 #define I915_CONTEXT_PARAM_BANNABLE 0x5
1967 #define I915_CONTEXT_PARAM_PRIORITY 0x6
1968 #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1969 #define I915_CONTEXT_DEFAULT_PRIORITY 0
1970 #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1972 * When using the following param, value should be a pointer to
1973 * drm_i915_gem_context_param_sseu.
1975 #define I915_CONTEXT_PARAM_SSEU 0x7
1978 * Not all clients may want to attempt automatic recover of a context after
1979 * a hang (for example, some clients may only submit very small incremental
1980 * batches relying on known logical state of previous batches which will never
1981 * recover correctly and each attempt will hang), and so would prefer that
1982 * the context is forever banned instead.
1984 * If set to false (0), after a reset, subsequent (and in flight) rendering
1985 * from this context is discarded, and the client will need to create a new
1986 * context to use instead.
1988 * If set to true (1), the kernel will automatically attempt to recover the
1989 * context by skipping the hanging batch and executing the next batch starting
1990 * from the default context state (discarding the incomplete logical context
1991 * state lost due to the reset).
1993 * On creation, all new contexts are marked as recoverable.
1995 #define I915_CONTEXT_PARAM_RECOVERABLE 0x8
1998 * The id of the associated virtual memory address space (ppGTT) of
1999 * this context. Can be retrieved and passed to another context
2000 * (on the same fd) for both to use the same ppGTT and so share
2001 * address layouts, and avoid reloading the page tables on context
2002 * switches between themselves.
2004 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
2006 #define I915_CONTEXT_PARAM_VM 0x9
2009 * I915_CONTEXT_PARAM_ENGINES:
2011 * Bind this context to operate on this subset of available engines. Henceforth,
2012 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
2013 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
2014 * and upwards. Slots 0...N are filled in using the specified (class, instance).
2016 * engine_class: I915_ENGINE_CLASS_INVALID,
2017 * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
2018 * to specify a gap in the array that can be filled in later, e.g. by a
2019 * virtual engine used for load balancing.
2021 * Setting the number of engines bound to the context to 0, by passing a zero
2022 * sized argument, will revert back to default settings.
2024 * See struct i915_context_param_engines.
2027 * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
2028 * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
2029 * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
2031 #define I915_CONTEXT_PARAM_ENGINES 0xa
2034 * I915_CONTEXT_PARAM_PERSISTENCE:
2036 * Allow the context and active rendering to survive the process until
2037 * completion. Persistence allows fire-and-forget clients to queue up a
2038 * bunch of work, hand the output over to a display server and then quit.
2039 * If the context is marked as not persistent, upon closing (either via
2040 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
2041 * or process termination), the context and any outstanding requests will be
2042 * cancelled (and exported fences for cancelled requests marked as -EIO).
2044 * By default, new contexts allow persistence.
2046 #define I915_CONTEXT_PARAM_PERSISTENCE 0xb
2048 /* This API has been removed. On the off chance someone somewhere has
2049 * attempted to use it, never re-use this context param number.
2051 #define I915_CONTEXT_PARAM_RINGSIZE 0xc
2054 * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2056 * Mark that the context makes use of protected content, which will result
2057 * in the context being invalidated when the protected content session is.
2058 * Given that the protected content session is killed on suspend, the device
2059 * is kept awake for the lifetime of a protected context, so the user should
2060 * make sure to dispose of them once done.
2061 * This flag can only be set at context creation time and, when set to true,
2062 * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
2063 * to false. This flag can't be set to true in conjunction with setting the
2064 * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
2068 * struct drm_i915_gem_context_create_ext_setparam p_protected = {
2070 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2073 * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
2077 * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
2079 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2080 * .next_extension = to_user_pointer(&p_protected),
2083 * .param = I915_CONTEXT_PARAM_RECOVERABLE,
2087 * struct drm_i915_gem_context_create_ext create = {
2088 * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2089 * .extensions = to_user_pointer(&p_norecover);
2092 * ctx_id = gem_context_create_ext(drm_fd, &create);
2094 * In addition to the normal failure cases, setting this flag during context
2095 * creation can result in the following errors:
2097 * -ENODEV: feature not available
2098 * -EPERM: trying to mark a recoverable or not bannable context as protected
2100 #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
2101 /* Must be kept compact -- no holes and well documented */
2103 /** @value: Context parameter value to be set or queried */
2108 * Context SSEU programming
2110 * It may be necessary for either functional or performance reason to configure
2111 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
2114 * This is done by configuring SSEU configuration using the below
2115 * @struct drm_i915_gem_context_param_sseu for every supported engine which
2116 * userspace intends to use.
2118 * Not all GPUs or engines support this functionality in which case an error
2119 * code -ENODEV will be returned.
2121 * Also, flexibility of possible SSEU configuration permutations varies between
2122 * GPU generations and software imposed limitations. Requesting such a
2123 * combination will return an error code of -EINVAL.
2125 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
2126 * favour of a single global setting.
2128 struct drm_i915_gem_context_param_sseu {
2130 * Engine class & instance to be configured or queried.
2132 struct i915_engine_class_instance engine;
2135 * Unknown flags must be cleared to zero.
2138 #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
2141 * Mask of slices to enable for the context. Valid values are a subset
2142 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
2147 * Mask of subslices to enable for the context. Valid values are a
2148 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
2150 __u64 subslice_mask;
2153 * Minimum/Maximum number of EUs to enable per subslice for the
2154 * context. min_eus_per_subslice must be inferior or equal to
2155 * max_eus_per_subslice.
2157 __u16 min_eus_per_subslice;
2158 __u16 max_eus_per_subslice;
2161 * Unused for now. Must be cleared to zero.
2167 * DOC: Virtual Engine uAPI
2169 * Virtual engine is a concept where userspace is able to configure a set of
2170 * physical engines, submit a batch buffer, and let the driver execute it on any
2171 * engine from the set as it sees fit.
2173 * This is primarily useful on parts which have multiple instances of a same
2174 * class engine, like for example GT3+ Skylake parts with their two VCS engines.
2176 * For instance userspace can enumerate all engines of a certain class using the
2177 * previously described `Engine Discovery uAPI`_. After that userspace can
2178 * create a GEM context with a placeholder slot for the virtual engine (using
2179 * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
2180 * and instance respectively) and finally using the
2181 * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
2182 * the same reserved slot.
2184 * Example of creating a virtual engine and submitting a batch buffer to it:
2188 * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
2189 * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
2190 * .engine_index = 0, // Place this virtual engine into engine map slot 0
2191 * .num_siblings = 2,
2192 * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
2193 * { I915_ENGINE_CLASS_VIDEO, 1 }, },
2195 * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
2196 * .engines = { { I915_ENGINE_CLASS_INVALID,
2197 * I915_ENGINE_CLASS_INVALID_NONE } },
2198 * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
2200 * struct drm_i915_gem_context_create_ext_setparam p_engines = {
2202 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2205 * .param = I915_CONTEXT_PARAM_ENGINES,
2206 * .value = to_user_pointer(&engines),
2207 * .size = sizeof(engines),
2210 * struct drm_i915_gem_context_create_ext create = {
2211 * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2212 * .extensions = to_user_pointer(&p_engines);
2215 * ctx_id = gem_context_create_ext(drm_fd, &create);
2217 * // Now we have created a GEM context with its engine map containing a
2218 * // single virtual engine. Submissions to this slot can go either to
2219 * // vcs0 or vcs1, depending on the load balancing algorithm used inside
2220 * // the driver. The load balancing is dynamic from one batch buffer to
2221 * // another and transparent to userspace.
2224 * execbuf.rsvd1 = ctx_id;
2225 * execbuf.flags = 0; // Submits to index 0 which is the virtual engine
2226 * gem_execbuf(drm_fd, &execbuf);
2230 * i915_context_engines_load_balance:
2232 * Enable load balancing across this set of engines.
2234 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
2235 * used will proxy the execbuffer request onto one of the set of engines
2236 * in such a way as to distribute the load evenly across the set.
2238 * The set of engines must be compatible (e.g. the same HW class) as they
2239 * will share the same logical GPU context and ring.
2241 * To intermix rendering with the virtual engine and direct rendering onto
2242 * the backing engines (bypassing the load balancing proxy), the context must
2243 * be defined to use a single timeline for all engines.
2245 struct i915_context_engines_load_balance {
2246 struct i915_user_extension base;
2250 __u32 flags; /* all undefined flags must be zero */
2252 __u64 mbz64; /* reserved for future use; must be zero */
2254 struct i915_engine_class_instance engines[];
2255 } __attribute__((packed));
2257 #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
2258 struct i915_user_extension base; \
2259 __u16 engine_index; \
2260 __u16 num_siblings; \
2263 struct i915_engine_class_instance engines[N__]; \
2264 } __attribute__((packed)) name__
2267 * i915_context_engines_bond:
2269 * Constructed bonded pairs for execution within a virtual engine.
2271 * All engines are equal, but some are more equal than others. Given
2272 * the distribution of resources in the HW, it may be preferable to run
2273 * a request on a given subset of engines in parallel to a request on a
2274 * specific engine. We enable this selection of engines within a virtual
2275 * engine by specifying bonding pairs, for any given master engine we will
2276 * only execute on one of the corresponding siblings within the virtual engine.
2278 * To execute a request in parallel on the master engine and a sibling requires
2279 * coordination with a I915_EXEC_FENCE_SUBMIT.
2281 struct i915_context_engines_bond {
2282 struct i915_user_extension base;
2284 struct i915_engine_class_instance master;
2286 __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
2289 __u64 flags; /* all undefined flags must be zero */
2290 __u64 mbz64[4]; /* reserved for future use; must be zero */
2292 struct i915_engine_class_instance engines[];
2293 } __attribute__((packed));
2295 #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
2296 struct i915_user_extension base; \
2297 struct i915_engine_class_instance master; \
2298 __u16 virtual_index; \
2302 struct i915_engine_class_instance engines[N__]; \
2303 } __attribute__((packed)) name__
2306 * struct i915_context_engines_parallel_submit - Configure engine for
2307 * parallel submission.
2309 * Setup a slot in the context engine map to allow multiple BBs to be submitted
2310 * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
2311 * in parallel. Multiple hardware contexts are created internally in the i915 to
2312 * run these BBs. Once a slot is configured for N BBs only N BBs can be
2313 * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
2314 * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
2315 * many BBs there are based on the slot's configuration. The N BBs are the last
2316 * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
2318 * The default placement behavior is to create implicit bonds between each
2319 * context if each context maps to more than 1 physical engine (e.g. context is
2320 * a virtual engine). Also we only allow contexts of same engine class and these
2321 * contexts must be in logically contiguous order. Examples of the placement
2322 * behavior are described below. Lastly, the default is to not allow BBs to be
2323 * preempted mid-batch. Rather insert coordinated preemption points on all
2324 * hardware contexts between each set of BBs. Flags could be added in the future
2325 * to change both of these default behaviors.
2327 * Returns -EINVAL if hardware context placement configuration is invalid or if
2328 * the placement configuration isn't supported on the platform / submission
2330 * Returns -ENODEV if extension isn't supported on the platform / submission
2333 * .. code-block:: none
2336 * CS[X] = generic engine of same class, logical instance X
2337 * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
2339 * Example 1 pseudo code:
2340 * set_engines(INVALID)
2341 * set_parallel(engine_index=0, width=2, num_siblings=1,
2342 * engines=CS[0],CS[1])
2344 * Results in the following valid placement:
2347 * Example 2 pseudo code:
2348 * set_engines(INVALID)
2349 * set_parallel(engine_index=0, width=2, num_siblings=2,
2350 * engines=CS[0],CS[2],CS[1],CS[3])
2352 * Results in the following valid placements:
2356 * This can be thought of as two virtual engines, each containing two
2357 * engines thereby making a 2D array. However, there are bonds tying the
2358 * entries together and placing restrictions on how they can be scheduled.
2359 * Specifically, the scheduler can choose only vertical columns from the 2D
2360 * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
2361 * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
2362 * versa. Same for CS[2] requires also using CS[3].
2363 * VE[0] = CS[0], CS[2]
2364 * VE[1] = CS[1], CS[3]
2366 * Example 3 pseudo code:
2367 * set_engines(INVALID)
2368 * set_parallel(engine_index=0, width=2, num_siblings=2,
2369 * engines=CS[0],CS[1],CS[1],CS[3])
2371 * Results in the following valid and invalid placements:
2373 * CS[1], CS[3] - Not logically contiguous, return -EINVAL
2375 struct i915_context_engines_parallel_submit {
2377 * @base: base user extension.
2379 struct i915_user_extension base;
2382 * @engine_index: slot for parallel engine
2387 * @width: number of contexts per parallel engine or in other words the
2388 * number of batches in each submission
2393 * @num_siblings: number of siblings per context or in other words the
2394 * number of possible placements for each submission
2399 * @mbz16: reserved for future use; must be zero
2404 * @flags: all undefined flags must be zero, currently not defined flags
2409 * @mbz64: reserved for future use; must be zero
2414 * @engines: 2-d array of engine instances to configure parallel engine
2416 * length = width (i) * num_siblings (j)
2417 * index = j + i * num_siblings
2419 struct i915_engine_class_instance engines[];
2423 #define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
2424 struct i915_user_extension base; \
2425 __u16 engine_index; \
2427 __u16 num_siblings; \
2431 struct i915_engine_class_instance engines[N__]; \
2432 } __attribute__((packed)) name__
2435 * DOC: Context Engine Map uAPI
2437 * Context engine map is a new way of addressing engines when submitting batch-
2438 * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
2439 * inside the flags field of `struct drm_i915_gem_execbuffer2`.
2441 * To use it created GEM contexts need to be configured with a list of engines
2442 * the user is intending to submit to. This is accomplished using the
2443 * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
2444 * i915_context_param_engines`.
2446 * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
2449 * Example of creating such context and submitting against it:
2453 * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
2454 * .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
2455 * { I915_ENGINE_CLASS_COPY, 0 } }
2457 * struct drm_i915_gem_context_create_ext_setparam p_engines = {
2459 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2462 * .param = I915_CONTEXT_PARAM_ENGINES,
2463 * .value = to_user_pointer(&engines),
2464 * .size = sizeof(engines),
2467 * struct drm_i915_gem_context_create_ext create = {
2468 * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2469 * .extensions = to_user_pointer(&p_engines);
2472 * ctx_id = gem_context_create_ext(drm_fd, &create);
2474 * // We have now created a GEM context with two engines in the map:
2475 * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
2476 * // will not be accessible from this context.
2479 * execbuf.rsvd1 = ctx_id;
2480 * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
2481 * gem_execbuf(drm_fd, &execbuf);
2484 * execbuf.rsvd1 = ctx_id;
2485 * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
2486 * gem_execbuf(drm_fd, &execbuf);
2489 struct i915_context_param_engines {
2490 __u64 extensions; /* linked chain of extension blocks, 0 terminates */
2491 #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
2492 #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
2493 #define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
2494 struct i915_engine_class_instance engines[];
2495 } __attribute__((packed));
2497 #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
2499 struct i915_engine_class_instance engines[N__]; \
2500 } __attribute__((packed)) name__
2503 * struct drm_i915_gem_context_create_ext_setparam - Context parameter
2504 * to set or query during context creation.
2506 struct drm_i915_gem_context_create_ext_setparam {
2507 /** @base: Extension link. See struct i915_user_extension. */
2508 struct i915_user_extension base;
2511 * @param: Context parameter to set or query.
2512 * See struct drm_i915_gem_context_param.
2514 struct drm_i915_gem_context_param param;
2517 struct drm_i915_gem_context_destroy {
2523 * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
2525 * DRM_I915_GEM_VM_CREATE -
2527 * Create a new virtual memory address space (ppGTT) for use within a context
2528 * on the same file. Extensions can be provided to configure exactly how the
2529 * address space is setup upon creation.
2531 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
2532 * returned in the outparam @id.
2534 * An extension chain maybe provided, starting with @extensions, and terminated
2535 * by the @next_extension being 0. Currently, no extensions are defined.
2537 * DRM_I915_GEM_VM_DESTROY -
2539 * Destroys a previously created VM id, specified in @vm_id.
2541 * No extensions or flags are allowed currently, and so must be zero.
2543 struct drm_i915_gem_vm_control {
2544 /** @extensions: Zero-terminated chain of extensions. */
2547 /** @flags: reserved for future usage, currently MBZ */
2550 /** @vm_id: Id of the VM created or to be destroyed */
2554 struct drm_i915_reg_read {
2557 * For 64bit wide registers where the upper 32bits don't immediately
2558 * follow the lower 32bits, the offset of the lower 32bits must
2562 #define I915_REG_READ_8B_WA (1ul << 0)
2564 __u64 val; /* Return value */
2569 * Render engine timestamp - 0x2358 + 64bit - gen7+
2570 * - Note this register returns an invalid value if using the default
2571 * single instruction 8byte read, in order to workaround that pass
2572 * flag I915_REG_READ_8B_WA in offset field.
2576 struct drm_i915_reset_stats {
2580 /* All resets since boot/module reload, for all contexts */
2583 /* Number of batches lost when active in GPU, for this context */
2586 /* Number of batches lost pending for execution, for this context */
2587 __u32 batch_pending;
2593 * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
2595 * Userptr objects have several restrictions on what ioctls can be used with the
2598 struct drm_i915_gem_userptr {
2600 * @user_ptr: The pointer to the allocated memory.
2602 * Needs to be aligned to PAGE_SIZE.
2609 * The size in bytes for the allocated memory. This will also become the
2612 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
2622 * I915_USERPTR_READ_ONLY:
2624 * Mark the object as readonly, this also means GPU access can only be
2625 * readonly. This is only supported on HW which supports readonly access
2626 * through the GTT. If the HW can't support readonly access, an error is
2629 * I915_USERPTR_PROBE:
2631 * Probe the provided @user_ptr range and validate that the @user_ptr is
2632 * indeed pointing to normal memory and that the range is also valid.
2633 * For example if some garbage address is given to the kernel, then this
2636 * Returns -EFAULT if the probe failed.
2638 * Note that this doesn't populate the backing pages, and also doesn't
2639 * guarantee that the object will remain valid when the object is
2642 * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
2643 * returns a non-zero value.
2645 * I915_USERPTR_UNSYNCHRONIZED:
2647 * NOT USED. Setting this flag will result in an error.
2650 #define I915_USERPTR_READ_ONLY 0x1
2651 #define I915_USERPTR_PROBE 0x2
2652 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
2654 * @handle: Returned handle for the object.
2656 * Object handles are nonzero.
2661 enum drm_i915_oa_format {
2662 I915_OA_FORMAT_A13 = 1, /* HSW only */
2663 I915_OA_FORMAT_A29, /* HSW only */
2664 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
2665 I915_OA_FORMAT_B4_C8, /* HSW only */
2666 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
2667 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
2668 I915_OA_FORMAT_C4_B8, /* HSW+ */
2672 I915_OA_FORMAT_A12_B8_C8,
2673 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
2676 I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
2677 I915_OA_FORMAT_A24u40_A14u32_B8_C8,
2680 I915_OAM_FORMAT_MPEC8u64_B8_C8,
2681 I915_OAM_FORMAT_MPEC8u32_B8_C8,
2683 I915_OA_FORMAT_MAX /* non-ABI */
2686 enum drm_i915_perf_property_id {
2688 * Open the stream for a specific context handle (as used with
2689 * execbuffer2). A stream opened for a specific context this way
2690 * won't typically require root privileges.
2692 * This property is available in perf revision 1.
2694 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
2697 * A value of 1 requests the inclusion of raw OA unit reports as
2698 * part of stream samples.
2700 * This property is available in perf revision 1.
2702 DRM_I915_PERF_PROP_SAMPLE_OA,
2705 * The value specifies which set of OA unit metrics should be
2706 * configured, defining the contents of any OA unit reports.
2708 * This property is available in perf revision 1.
2710 DRM_I915_PERF_PROP_OA_METRICS_SET,
2713 * The value specifies the size and layout of OA unit reports.
2715 * This property is available in perf revision 1.
2717 DRM_I915_PERF_PROP_OA_FORMAT,
2720 * Specifying this property implicitly requests periodic OA unit
2721 * sampling and (at least on Haswell) the sampling frequency is derived
2722 * from this exponent as follows:
2724 * 80ns * 2^(period_exponent + 1)
2726 * This property is available in perf revision 1.
2728 DRM_I915_PERF_PROP_OA_EXPONENT,
2731 * Specifying this property is only valid when specify a context to
2732 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
2733 * will hold preemption of the particular context we want to gather
2734 * performance data about. The execbuf2 submissions must include a
2735 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
2737 * This property is available in perf revision 3.
2739 DRM_I915_PERF_PROP_HOLD_PREEMPTION,
2742 * Specifying this pins all contexts to the specified SSEU power
2743 * configuration for the duration of the recording.
2745 * This parameter's value is a pointer to a struct
2746 * drm_i915_gem_context_param_sseu.
2748 * This property is available in perf revision 4.
2750 DRM_I915_PERF_PROP_GLOBAL_SSEU,
2753 * This optional parameter specifies the timer interval in nanoseconds
2754 * at which the i915 driver will check the OA buffer for available data.
2755 * Minimum allowed value is 100 microseconds. A default value is used by
2756 * the driver if this parameter is not specified. Note that larger timer
2757 * values will reduce cpu consumption during OA perf captures. However,
2758 * excessively large values would potentially result in OA buffer
2759 * overwrites as captures reach end of the OA buffer.
2761 * This property is available in perf revision 5.
2763 DRM_I915_PERF_PROP_POLL_OA_PERIOD,
2766 * Multiple engines may be mapped to the same OA unit. The OA unit is
2767 * identified by class:instance of any engine mapped to it.
2769 * This parameter specifies the engine class and must be passed along
2770 * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
2772 * This property is available in perf revision 6.
2774 DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
2777 * This parameter specifies the engine instance and must be passed along
2778 * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
2780 * This property is available in perf revision 6.
2782 DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
2784 DRM_I915_PERF_PROP_MAX /* non-ABI */
2787 struct drm_i915_perf_open_param {
2789 #define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
2790 #define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
2791 #define I915_PERF_FLAG_DISABLED (1<<2)
2793 /** The number of u64 (id, value) pairs */
2794 __u32 num_properties;
2797 * Pointer to array of u64 (id, value) pairs configuring the stream
2800 __u64 properties_ptr;
2804 * Enable data capture for a stream that was either opened in a disabled state
2805 * via I915_PERF_FLAG_DISABLED or was later disabled via
2806 * I915_PERF_IOCTL_DISABLE.
2808 * It is intended to be cheaper to disable and enable a stream than it may be
2809 * to close and re-open a stream with the same configuration.
2811 * It's undefined whether any pending data for the stream will be lost.
2813 * This ioctl is available in perf revision 1.
2815 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
2818 * Disable data capture for a stream.
2820 * It is an error to try and read a stream that is disabled.
2822 * This ioctl is available in perf revision 1.
2824 #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
2827 * Change metrics_set captured by a stream.
2829 * If the stream is bound to a specific context, the configuration change
2830 * will performed inline with that context such that it takes effect before
2831 * the next execbuf submission.
2833 * Returns the previously bound metrics set id, or a negative error code.
2835 * This ioctl is available in perf revision 2.
2837 #define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
2840 * Common to all i915 perf records
2842 struct drm_i915_perf_record_header {
2848 enum drm_i915_perf_record_type {
2851 * Samples are the work horse record type whose contents are extensible
2852 * and defined when opening an i915 perf stream based on the given
2855 * Boolean properties following the naming convention
2856 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2859 * The order of these sample properties given by userspace has no
2860 * affect on the ordering of data within a sample. The order is
2864 * struct drm_i915_perf_record_header header;
2866 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2869 DRM_I915_PERF_RECORD_SAMPLE = 1,
2872 * Indicates that one or more OA reports were not written by the
2873 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2874 * command collides with periodic sampling - which would be more likely
2875 * at higher sampling frequencies.
2877 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2880 * An error occurred that resulted in all pending OA reports being lost.
2882 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2884 DRM_I915_PERF_RECORD_MAX /* non-ABI */
2888 * struct drm_i915_perf_oa_config
2890 * Structure to upload perf dynamic configuration into the kernel.
2892 struct drm_i915_perf_oa_config {
2896 * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
2903 * Number of mux regs in &mux_regs_ptr.
2910 * Number of boolean regs in &boolean_regs_ptr.
2912 __u32 n_boolean_regs;
2917 * Number of flex regs in &flex_regs_ptr.
2924 * Pointer to tuples of u32 values (register address, value) for mux
2925 * registers. Expected length of buffer is (2 * sizeof(u32) *
2931 * @boolean_regs_ptr:
2933 * Pointer to tuples of u32 values (register address, value) for mux
2934 * registers. Expected length of buffer is (2 * sizeof(u32) *
2937 __u64 boolean_regs_ptr;
2942 * Pointer to tuples of u32 values (register address, value) for mux
2943 * registers. Expected length of buffer is (2 * sizeof(u32) *
2946 __u64 flex_regs_ptr;
2950 * struct drm_i915_query_item - An individual query for the kernel to process.
2952 * The behaviour is determined by the @query_id. Note that exactly what
2953 * @data_ptr is also depends on the specific @query_id.
2955 struct drm_i915_query_item {
2959 * The id for this query. Currently accepted query IDs are:
2960 * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
2961 * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
2962 * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
2963 * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
2964 * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
2965 * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
2968 #define DRM_I915_QUERY_TOPOLOGY_INFO 1
2969 #define DRM_I915_QUERY_ENGINE_INFO 2
2970 #define DRM_I915_QUERY_PERF_CONFIG 3
2971 #define DRM_I915_QUERY_MEMORY_REGIONS 4
2972 #define DRM_I915_QUERY_HWCONFIG_BLOB 5
2973 #define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
2974 /* Must be kept compact -- no holes and well documented */
2979 * When set to zero by userspace, this is filled with the size of the
2980 * data to be written at the @data_ptr pointer. The kernel sets this
2981 * value to a negative value to signal an error on a particular query
2989 * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
2991 * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
2994 * - %DRM_I915_QUERY_PERF_CONFIG_LIST
2995 * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
2996 * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
2998 * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
2999 * a struct i915_engine_class_instance that references a render engine.
3002 #define DRM_I915_QUERY_PERF_CONFIG_LIST 1
3003 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
3004 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
3009 * Data will be written at the location pointed by @data_ptr when the
3010 * value of @length matches the length of the data to be written by the
3017 * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
3018 * kernel to fill out.
3020 * Note that this is generally a two step process for each struct
3021 * drm_i915_query_item in the array:
3023 * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
3024 * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
3025 * kernel will then fill in the size, in bytes, which tells userspace how
3026 * memory it needs to allocate for the blob(say for an array of properties).
3028 * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
3029 * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
3030 * the &drm_i915_query_item.length should still be the same as what the
3031 * kernel previously set. At this point the kernel can fill in the blob.
3033 * Note that for some query items it can make sense for userspace to just pass
3034 * in a buffer/blob equal to or larger than the required size. In this case only
3035 * a single ioctl call is needed. For some smaller query items this can work
3039 struct drm_i915_query {
3040 /** @num_items: The number of elements in the @items_ptr array */
3044 * @flags: Unused for now. Must be cleared to zero.
3051 * Pointer to an array of struct drm_i915_query_item. The number of
3052 * array elements is @num_items.
3058 * struct drm_i915_query_topology_info
3060 * Describes slice/subslice/EU information queried by
3061 * %DRM_I915_QUERY_TOPOLOGY_INFO
3063 struct drm_i915_query_topology_info {
3067 * Unused for now. Must be cleared to zero.
3074 * The number of bits used to express the slice mask.
3081 * The number of bits used to express the subslice mask.
3083 __u16 max_subslices;
3086 * @max_eus_per_subslice:
3088 * The number of bits in the EU mask that correspond to a single
3091 __u16 max_eus_per_subslice;
3096 * Offset in data[] at which the subslice masks are stored.
3098 __u16 subslice_offset;
3103 * Stride at which each of the subslice masks for each slice are
3106 __u16 subslice_stride;
3111 * Offset in data[] at which the EU masks are stored.
3118 * Stride at which each of the EU masks for each subslice are stored.
3125 * Contains 3 pieces of information :
3127 * - The slice mask with one bit per slice telling whether a slice is
3128 * available. The availability of slice X can be queried with the
3129 * following formula :
3133 * (data[X / 8] >> (X % 8)) & 1
3135 * Starting with Xe_HP platforms, Intel hardware no longer has
3136 * traditional slices so i915 will always report a single slice
3137 * (hardcoded slicemask = 0x1) which contains all of the platform's
3138 * subslices. I.e., the mask here does not reflect any of the newer
3139 * hardware concepts such as "gslices" or "cslices" since userspace
3140 * is capable of inferring those from the subslice mask.
3142 * - The subslice mask for each slice with one bit per subslice telling
3143 * whether a subslice is available. Starting with Gen12 we use the
3144 * term "subslice" to refer to what the hardware documentation
3145 * describes as a "dual-subslices." The availability of subslice Y
3146 * in slice X can be queried with the following formula :
3150 * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
3152 * - The EU mask for each subslice in each slice, with one bit per EU
3153 * telling whether an EU is available. The availability of EU Z in
3154 * subslice Y in slice X can be queried with the following formula :
3159 * (X * max_subslices + Y) * eu_stride +
3167 * DOC: Engine Discovery uAPI
3169 * Engine discovery uAPI is a way of enumerating physical engines present in a
3170 * GPU associated with an open i915 DRM file descriptor. This supersedes the old
3171 * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
3172 * `I915_PARAM_HAS_BLT`.
3174 * The need for this interface came starting with Icelake and newer GPUs, which
3175 * started to establish a pattern of having multiple engines of a same class,
3176 * where not all instances were always completely functionally equivalent.
3178 * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
3179 * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
3181 * Example for getting the list of engines:
3185 * struct drm_i915_query_engine_info *info;
3186 * struct drm_i915_query_item item = {
3187 * .query_id = DRM_I915_QUERY_ENGINE_INFO;
3189 * struct drm_i915_query query = {
3191 * .items_ptr = (uintptr_t)&item,
3195 * // First query the size of the blob we need, this needs to be large
3196 * // enough to hold our array of engines. The kernel will fill out the
3197 * // item.length for us, which is the number of bytes we need.
3199 * // Alternatively a large buffer can be allocated straight away enabling
3200 * // querying in one pass, in which case item.length should contain the
3201 * // length of the provided buffer.
3202 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3205 * info = calloc(1, item.length);
3206 * // Now that we allocated the required number of bytes, we call the ioctl
3207 * // again, this time with the data_ptr pointing to our newly allocated
3208 * // blob, which the kernel can then populate with info on all engines.
3209 * item.data_ptr = (uintptr_t)&info,
3211 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3214 * // We can now access each engine in the array
3215 * for (i = 0; i < info->num_engines; i++) {
3216 * struct drm_i915_engine_info einfo = info->engines[i];
3217 * u16 class = einfo.engine.class;
3218 * u16 instance = einfo.engine.instance;
3224 * Each of the enumerated engines, apart from being defined by its class and
3225 * instance (see `struct i915_engine_class_instance`), also can have flags and
3226 * capabilities defined as documented in i915_drm.h.
3228 * For instance video engines which support HEVC encoding will have the
3229 * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
3231 * Engine discovery only fully comes to its own when combined with the new way
3232 * of addressing engines when submitting batch buffers using contexts with
3233 * engine maps configured.
3237 * struct drm_i915_engine_info
3239 * Describes one engine and it's capabilities as known to the driver.
3241 struct drm_i915_engine_info {
3242 /** @engine: Engine class and instance. */
3243 struct i915_engine_class_instance engine;
3245 /** @rsvd0: Reserved field. */
3248 /** @flags: Engine flags. */
3250 #define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
3252 /** @capabilities: Capabilities of this engine. */
3254 #define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
3255 #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
3257 /** @logical_instance: Logical instance of engine */
3258 __u16 logical_instance;
3260 /** @rsvd1: Reserved fields. */
3262 /** @rsvd2: Reserved fields. */
3267 * struct drm_i915_query_engine_info
3269 * Engine info query enumerates all engines known to the driver by filling in
3270 * an array of struct drm_i915_engine_info structures.
3272 struct drm_i915_query_engine_info {
3273 /** @num_engines: Number of struct drm_i915_engine_info structs following. */
3279 /** @engines: Marker for drm_i915_engine_info structures. */
3280 struct drm_i915_engine_info engines[];
3284 * struct drm_i915_query_perf_config
3286 * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
3287 * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
3289 struct drm_i915_query_perf_config {
3294 * When &drm_i915_query_item.flags ==
3295 * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
3296 * the number of configurations available.
3303 * When &drm_i915_query_item.flags ==
3304 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
3305 * value in this field as configuration identifier to decide
3306 * what data to write into config_ptr.
3313 * When &drm_i915_query_item.flags ==
3314 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
3315 * value in this field as configuration identifier to decide
3316 * what data to write into config_ptr.
3318 * String formatted like "%08x-%04x-%04x-%04x-%012x"
3326 * Unused for now. Must be cleared to zero.
3333 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
3334 * i915 will write an array of __u64 of configuration identifiers.
3336 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
3337 * i915 will write a struct drm_i915_perf_oa_config. If the following
3338 * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
3339 * write into the associated pointers the values of submitted when the
3340 * configuration was created :
3342 * - &drm_i915_perf_oa_config.n_mux_regs
3343 * - &drm_i915_perf_oa_config.n_boolean_regs
3344 * - &drm_i915_perf_oa_config.n_flex_regs
3350 * enum drm_i915_gem_memory_class - Supported memory classes
3352 enum drm_i915_gem_memory_class {
3353 /** @I915_MEMORY_CLASS_SYSTEM: System memory */
3354 I915_MEMORY_CLASS_SYSTEM = 0,
3355 /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
3356 I915_MEMORY_CLASS_DEVICE,
3360 * struct drm_i915_gem_memory_class_instance - Identify particular memory region
3362 struct drm_i915_gem_memory_class_instance {
3363 /** @memory_class: See enum drm_i915_gem_memory_class */
3366 /** @memory_instance: Which instance */
3367 __u16 memory_instance;
3371 * struct drm_i915_memory_region_info - Describes one region as known to the
3374 * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
3375 * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
3376 * at &drm_i915_query_item.query_id.
3378 struct drm_i915_memory_region_info {
3379 /** @region: The class:instance pair encoding */
3380 struct drm_i915_gem_memory_class_instance region;
3386 * @probed_size: Memory probed by the driver
3388 * Note that it should not be possible to ever encounter a zero value
3389 * here, also note that no current region type will ever return -1 here.
3390 * Although for future region types, this might be a possibility. The
3391 * same applies to the other size fields.
3396 * @unallocated_size: Estimate of memory remaining
3398 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
3399 * Without this (or if this is an older kernel) the value here will
3400 * always equal the @probed_size. Note this is only currently tracked
3401 * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
3402 * will always equal the @probed_size).
3404 __u64 unallocated_size;
3411 * @probed_cpu_visible_size: Memory probed by the driver
3412 * that is CPU accessible.
3414 * This will be always be <= @probed_size, and the
3415 * remainder (if there is any) will not be CPU
3418 * On systems without small BAR, the @probed_size will
3419 * always equal the @probed_cpu_visible_size, since all
3420 * of it will be CPU accessible.
3422 * Note this is only tracked for
3423 * I915_MEMORY_CLASS_DEVICE regions (for other types the
3424 * value here will always equal the @probed_size).
3426 * Note that if the value returned here is zero, then
3427 * this must be an old kernel which lacks the relevant
3428 * small-bar uAPI support (including
3429 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
3430 * such systems we should never actually end up with a
3431 * small BAR configuration, assuming we are able to load
3432 * the kernel module. Hence it should be safe to treat
3433 * this the same as when @probed_cpu_visible_size ==
3436 __u64 probed_cpu_visible_size;
3439 * @unallocated_cpu_visible_size: Estimate of CPU
3440 * visible memory remaining.
3442 * Note this is only tracked for
3443 * I915_MEMORY_CLASS_DEVICE regions (for other types the
3444 * value here will always equal the
3445 * @probed_cpu_visible_size).
3447 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
3448 * accounting. Without this the value here will always
3449 * equal the @probed_cpu_visible_size. Note this is only
3450 * currently tracked for I915_MEMORY_CLASS_DEVICE
3451 * regions (for other types the value here will also
3452 * always equal the @probed_cpu_visible_size).
3454 * If this is an older kernel the value here will be
3455 * zero, see also @probed_cpu_visible_size.
3457 __u64 unallocated_cpu_visible_size;
3463 * struct drm_i915_query_memory_regions
3465 * The region info query enumerates all regions known to the driver by filling
3466 * in an array of struct drm_i915_memory_region_info structures.
3468 * Example for getting the list of supported regions:
3472 * struct drm_i915_query_memory_regions *info;
3473 * struct drm_i915_query_item item = {
3474 * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
3476 * struct drm_i915_query query = {
3478 * .items_ptr = (uintptr_t)&item,
3482 * // First query the size of the blob we need, this needs to be large
3483 * // enough to hold our array of regions. The kernel will fill out the
3484 * // item.length for us, which is the number of bytes we need.
3485 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3488 * info = calloc(1, item.length);
3489 * // Now that we allocated the required number of bytes, we call the ioctl
3490 * // again, this time with the data_ptr pointing to our newly allocated
3491 * // blob, which the kernel can then populate with the all the region info.
3492 * item.data_ptr = (uintptr_t)&info,
3494 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3497 * // We can now access each region in the array
3498 * for (i = 0; i < info->num_regions; i++) {
3499 * struct drm_i915_memory_region_info mr = info->regions[i];
3500 * u16 class = mr.region.class;
3501 * u16 instance = mr.region.instance;
3508 struct drm_i915_query_memory_regions {
3509 /** @num_regions: Number of supported regions */
3515 /** @regions: Info about each supported region */
3516 struct drm_i915_memory_region_info regions[];
3520 * DOC: GuC HWCONFIG blob uAPI
3522 * The GuC produces a blob with information about the current device.
3523 * i915 reads this blob from GuC and makes it available via this uAPI.
3525 * The format and meaning of the blob content are documented in the
3526 * Programmer's Reference Manual.
3530 * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
3531 * extension support using struct i915_user_extension.
3533 * Note that new buffer flags should be added here, at least for the stuff that
3534 * is immutable. Previously we would have two ioctls, one to create the object
3535 * with gem_create, and another to apply various parameters, however this
3536 * creates some ambiguity for the params which are considered immutable. Also in
3537 * general we're phasing out the various SET/GET ioctls.
3539 struct drm_i915_gem_create_ext {
3541 * @size: Requested size for the object.
3543 * The (page-aligned) allocated size for the object will be returned.
3545 * On platforms like DG2/ATS the kernel will always use 64K or larger
3546 * pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a
3547 * minimum of 64K GTT alignment for such objects.
3549 * NOTE: Previously the ABI here required a minimum GTT alignment of 2M
3550 * on DG2/ATS, due to how the hardware implemented 64K GTT page support,
3551 * where we had the following complications:
3553 * 1) The entire PDE (which covers a 2MB virtual address range), must
3554 * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
3555 * PDE is forbidden by the hardware.
3557 * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
3560 * However on actual production HW this was completely changed to now
3561 * allow setting a TLB hint at the PTE level (see PS64), which is a lot
3562 * more flexible than the above. With this the 2M restriction was
3563 * dropped where we now only require 64K.
3568 * @handle: Returned handle for the object.
3570 * Object handles are nonzero.
3575 * @flags: Optional flags.
3579 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
3580 * the object will need to be accessed via the CPU.
3582 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
3583 * strictly required on configurations where some subset of the device
3584 * memory is directly visible/mappable through the CPU (which we also
3585 * call small BAR), like on some DG2+ systems. Note that this is quite
3586 * undesirable, but due to various factors like the client CPU, BIOS etc
3587 * it's something we can expect to see in the wild. See
3588 * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
3589 * determine if this system applies.
3591 * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
3592 * ensure the kernel can always spill the allocation to system memory,
3593 * if the object can't be allocated in the mappable part of
3594 * I915_MEMORY_CLASS_DEVICE.
3596 * Also note that since the kernel only supports flat-CCS on objects
3597 * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
3598 * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
3601 * Without this hint, the kernel will assume that non-mappable
3602 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
3603 * kernel can still migrate the object to the mappable part, as a last
3604 * resort, if userspace ever CPU faults this object, but this might be
3605 * expensive, and so ideally should be avoided.
3607 * On older kernels which lack the relevant small-bar uAPI support (see
3608 * also &drm_i915_memory_region_info.probed_cpu_visible_size),
3609 * usage of the flag will result in an error, but it should NEVER be
3610 * possible to end up with a small BAR configuration, assuming we can
3611 * also successfully load the i915 kernel module. In such cases the
3612 * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
3613 * such there are zero restrictions on where the object can be placed.
3615 #define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
3619 * @extensions: The chain of extensions to apply to this object.
3621 * This will be useful in the future when we need to support several
3622 * different extensions, and we need to apply more than one when
3623 * creating the object. See struct i915_user_extension.
3625 * If we don't supply any extensions then we get the same old gem_create
3628 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
3629 * struct drm_i915_gem_create_ext_memory_regions.
3631 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
3632 * struct drm_i915_gem_create_ext_protected_content.
3634 #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
3635 #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
3640 * struct drm_i915_gem_create_ext_memory_regions - The
3641 * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
3643 * Set the object with the desired set of placements/regions in priority
3644 * order. Each entry must be unique and supported by the device.
3646 * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
3647 * an equivalent layout of class:instance pair encodings. See struct
3648 * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
3649 * query the supported regions for a device.
3651 * As an example, on discrete devices, if we wish to set the placement as
3652 * device local-memory we can do something like:
3656 * struct drm_i915_gem_memory_class_instance region_lmem = {
3657 * .memory_class = I915_MEMORY_CLASS_DEVICE,
3658 * .memory_instance = 0,
3660 * struct drm_i915_gem_create_ext_memory_regions regions = {
3661 * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
3662 * .regions = (uintptr_t)®ion_lmem,
3665 * struct drm_i915_gem_create_ext create_ext = {
3666 * .size = 16 * PAGE_SIZE,
3667 * .extensions = (uintptr_t)®ions,
3670 * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3673 * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
3674 * along with the final object size in &drm_i915_gem_create_ext.size, which
3675 * should account for any rounding up, if required.
3677 * Note that userspace has no means of knowing the current backing region
3678 * for objects where @num_regions is larger than one. The kernel will only
3679 * ensure that the priority order of the @regions array is honoured, either
3680 * when initially placing the object, or when moving memory around due to
3683 * On Flat-CCS capable HW, compression is supported for the objects residing
3684 * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
3685 * memory class in @regions and migrated (by i915, due to memory
3686 * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
3687 * decompress the content. But i915 doesn't have the required information to
3688 * decompress the userspace compressed objects.
3690 * So i915 supports Flat-CCS, on the objects which can reside only on
3691 * I915_MEMORY_CLASS_DEVICE regions.
3693 struct drm_i915_gem_create_ext_memory_regions {
3694 /** @base: Extension link. See struct i915_user_extension. */
3695 struct i915_user_extension base;
3699 /** @num_regions: Number of elements in the @regions array. */
3702 * @regions: The regions/placements array.
3704 * An array of struct drm_i915_gem_memory_class_instance.
3710 * struct drm_i915_gem_create_ext_protected_content - The
3711 * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
3713 * If this extension is provided, buffer contents are expected to be protected
3714 * by PXP encryption and require decryption for scan out and processing. This
3715 * is only possible on platforms that have PXP enabled, on all other scenarios
3716 * using this extension will cause the ioctl to fail and return -ENODEV. The
3717 * flags parameter is reserved for future expansion and must currently be set
3720 * The buffer contents are considered invalid after a PXP session teardown.
3722 * The encryption is guaranteed to be processed correctly only if the object
3723 * is submitted with a context created using the
3724 * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
3725 * at submission time on the validity of the objects involved.
3727 * Below is an example on how to create a protected object:
3731 * struct drm_i915_gem_create_ext_protected_content protected_ext = {
3732 * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
3735 * struct drm_i915_gem_create_ext create_ext = {
3736 * .size = PAGE_SIZE,
3737 * .extensions = (uintptr_t)&protected_ext,
3740 * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3743 struct drm_i915_gem_create_ext_protected_content {
3744 /** @base: Extension link. See struct i915_user_extension. */
3745 struct i915_user_extension base;
3746 /** @flags: reserved for future usage, currently MBZ */
3750 /* ID of the protected content session managed by i915 when PXP is active */
3751 #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
3753 #if defined(__cplusplus)
3757 #endif /* _UAPI_I915_DRM_H_ */