1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <drm/drm_fourcc.h>
8 #include "display/intel_display.h"
9 #include "gem/i915_gem_ioctls.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "gem/i915_gem_region.h"
12 #include "pxp/intel_pxp.h"
15 #include "i915_gem_create.h"
16 #include "i915_trace.h"
17 #include "i915_user_extensions.h"
19 static u32 object_max_page_size(struct intel_memory_region **placements,
20 unsigned int n_placements)
22 u32 max_page_size = 0;
25 for (i = 0; i < n_placements; i++) {
26 struct intel_memory_region *mr = placements[i];
28 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
29 max_page_size = max_t(u32, max_page_size, mr->min_page_size);
32 GEM_BUG_ON(!max_page_size);
36 static int object_set_placements(struct drm_i915_gem_object *obj,
37 struct intel_memory_region **placements,
38 unsigned int n_placements)
40 struct intel_memory_region **arr;
43 GEM_BUG_ON(!n_placements);
46 * For the common case of one memory region, skip storing an
47 * allocated array and just point at the region directly.
49 if (n_placements == 1) {
50 struct intel_memory_region *mr = placements[0];
51 struct drm_i915_private *i915 = mr->i915;
53 obj->mm.placements = &i915->mm.regions[mr->id];
54 obj->mm.n_placements = 1;
56 arr = kmalloc_array(n_placements,
57 sizeof(struct intel_memory_region *),
62 for (i = 0; i < n_placements; i++)
63 arr[i] = placements[i];
65 obj->mm.placements = arr;
66 obj->mm.n_placements = n_placements;
72 static int i915_gem_publish(struct drm_i915_gem_object *obj,
73 struct drm_file *file,
77 u64 size = obj->base.size;
80 ret = drm_gem_handle_create(file, &obj->base, handle_p);
81 /* drop reference from allocate - handle holds it now */
82 i915_gem_object_put(obj);
90 static struct drm_i915_gem_object *
91 __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
92 struct intel_memory_region **placements,
93 unsigned int n_placements,
94 unsigned int ext_flags)
96 struct intel_memory_region *mr = placements[0];
97 struct drm_i915_gem_object *obj;
101 i915_gem_flush_free_objects(i915);
103 size = round_up(size, object_max_page_size(placements, n_placements));
105 return ERR_PTR(-EINVAL);
107 /* For most of the ABI (e.g. mmap) we think in system pages */
108 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
110 if (i915_gem_object_size_2big(size))
111 return ERR_PTR(-E2BIG);
113 obj = i915_gem_object_alloc();
115 return ERR_PTR(-ENOMEM);
117 ret = object_set_placements(obj, placements, n_placements);
122 * I915_BO_ALLOC_USER will make sure the object is cleared before
125 flags = I915_BO_ALLOC_USER;
127 ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
131 GEM_BUG_ON(size != obj->base.size);
133 /* Add any flag set by create_ext options */
134 obj->flags |= ext_flags;
136 trace_i915_gem_object_create(obj);
140 if (obj->mm.n_placements > 1)
141 kfree(obj->mm.placements);
142 i915_gem_object_free(obj);
147 * __i915_gem_object_create_user - Creates a new object using the same path as
148 * DRM_I915_GEM_CREATE_EXT
149 * @i915: i915 private
150 * @size: size of the buffer, in bytes
151 * @placements: possible placement regions, in priority order
152 * @n_placements: number of possible placement regions
154 * This function is exposed primarily for selftests and does very little
155 * error checking. It is assumed that the set of placement regions has
156 * already been verified to be valid.
158 struct drm_i915_gem_object *
159 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
160 struct intel_memory_region **placements,
161 unsigned int n_placements)
163 return __i915_gem_object_create_user_ext(i915, size, placements,
168 i915_gem_dumb_create(struct drm_file *file,
169 struct drm_device *dev,
170 struct drm_mode_create_dumb *args)
172 struct drm_i915_gem_object *obj;
173 struct intel_memory_region *mr;
174 enum intel_memory_type mem_type;
175 int cpp = DIV_ROUND_UP(args->bpp, 8);
180 format = DRM_FORMAT_C8;
183 format = DRM_FORMAT_RGB565;
186 format = DRM_FORMAT_XRGB8888;
192 /* have to work out size/pitch and return them */
193 args->pitch = ALIGN(args->width * cpp, 64);
195 /* align stride to page size so that we can remap */
196 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
197 DRM_FORMAT_MOD_LINEAR))
198 args->pitch = ALIGN(args->pitch, 4096);
200 if (args->pitch < args->width)
203 args->size = mul_u32_u32(args->pitch, args->height);
205 mem_type = INTEL_MEMORY_SYSTEM;
206 if (HAS_LMEM(to_i915(dev)))
207 mem_type = INTEL_MEMORY_LOCAL;
209 mr = intel_memory_region_by_type(to_i915(dev), mem_type);
211 obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
215 return i915_gem_publish(obj, file, &args->size, &args->handle);
219 * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
220 * @dev: drm device pointer
221 * @data: ioctl data blob
222 * @file: drm file pointer
225 i915_gem_create_ioctl(struct drm_device *dev, void *data,
226 struct drm_file *file)
228 struct drm_i915_private *i915 = to_i915(dev);
229 struct drm_i915_gem_create *args = data;
230 struct drm_i915_gem_object *obj;
231 struct intel_memory_region *mr;
233 mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
235 obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
239 return i915_gem_publish(obj, file, &args->size, &args->handle);
243 struct drm_i915_private *i915;
244 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
245 unsigned int n_placements;
246 unsigned int placement_mask;
248 unsigned int pat_index;
251 static void repr_placements(char *buf, size_t size,
252 struct intel_memory_region **placements,
259 for (i = 0; i < n_placements; i++) {
260 struct intel_memory_region *mr = placements[i];
263 r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
264 mr->name, mr->type, mr->instance);
273 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
274 struct create_ext *ext_data)
276 struct drm_i915_private *i915 = ext_data->i915;
277 struct drm_i915_gem_memory_class_instance __user *uregions =
278 u64_to_user_ptr(args->regions);
279 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
284 drm_dbg(&i915->drm, "pad should be zero\n");
288 if (!args->num_regions) {
289 drm_dbg(&i915->drm, "num_regions is zero\n");
293 BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
294 BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
295 if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
296 drm_dbg(&i915->drm, "num_regions is too large\n");
304 for (i = 0; i < args->num_regions; i++) {
305 struct drm_i915_gem_memory_class_instance region;
306 struct intel_memory_region *mr;
308 if (copy_from_user(®ion, uregions, sizeof(region)))
311 mr = intel_memory_region_lookup(i915,
313 region.memory_instance);
314 if (!mr || mr->private) {
315 drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
316 region.memory_class, region.memory_instance, i);
321 if (mask & BIT(mr->id)) {
322 drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
323 mr->name, region.memory_class,
324 region.memory_instance, i);
335 if (ext_data->n_placements) {
340 ext_data->n_placements = args->num_regions;
341 for (i = 0; i < args->num_regions; i++)
342 ext_data->placements[i] = placements[i];
344 ext_data->placement_mask = mask;
351 if (ext_data->n_placements) {
354 ext_data->placements,
355 ext_data->n_placements);
357 "Placements were already set in previous EXT. Existing placements: %s\n",
361 repr_placements(buf, sizeof(buf), placements, i);
362 drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
368 static int ext_set_placements(struct i915_user_extension __user *base,
371 struct drm_i915_gem_create_ext_memory_regions ext;
373 if (copy_from_user(&ext, base, sizeof(ext)))
376 return set_placements(&ext, data);
379 static int ext_set_protected(struct i915_user_extension __user *base, void *data)
381 struct drm_i915_gem_create_ext_protected_content ext;
382 struct create_ext *ext_data = data;
384 if (copy_from_user(&ext, base, sizeof(ext)))
390 if (!intel_pxp_is_enabled(ext_data->i915->pxp))
393 ext_data->flags |= I915_BO_PROTECTED;
398 static int ext_set_pat(struct i915_user_extension __user *base, void *data)
400 struct create_ext *ext_data = data;
401 struct drm_i915_private *i915 = ext_data->i915;
402 struct drm_i915_gem_create_ext_set_pat ext;
403 unsigned int max_pat_index;
405 BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
406 offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
408 /* Limiting the extension only to Xe_LPG and beyond */
409 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))
412 if (copy_from_user(&ext, base, sizeof(ext)))
415 max_pat_index = INTEL_INFO(i915)->max_pat_index;
417 if (ext.pat_index > max_pat_index) {
418 drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
423 ext_data->pat_index = ext.pat_index;
428 static const i915_user_extension_fn create_extensions[] = {
429 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
430 [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
431 [I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
434 #define PAT_INDEX_NOT_SET 0xffff
436 * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
437 * @dev: drm device pointer
438 * @data: ioctl data blob
439 * @file: drm file pointer
442 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
443 struct drm_file *file)
445 struct drm_i915_private *i915 = to_i915(dev);
446 struct drm_i915_gem_create_ext *args = data;
447 struct create_ext ext_data = { .i915 = i915 };
448 struct drm_i915_gem_object *obj;
451 if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
454 ext_data.pat_index = PAT_INDEX_NOT_SET;
455 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
457 ARRAY_SIZE(create_extensions),
462 if (!ext_data.n_placements) {
463 ext_data.placements[0] =
464 intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
465 ext_data.n_placements = 1;
468 if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
469 if (ext_data.n_placements == 1)
473 * We always need to be able to spill to system memory, if we
474 * can't place in the mappable part of LMEM.
476 if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
479 if (ext_data.n_placements > 1 ||
480 ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
481 ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
484 obj = __i915_gem_object_create_user_ext(i915, args->size,
486 ext_data.n_placements,
491 if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
492 i915_gem_object_set_pat_index(obj, ext_data.pat_index);
493 /* Mark pat_index is set by UMD */
494 obj->pat_set_by_user = true;
497 return i915_gem_publish(obj, file, &args->size, &args->handle);