1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <drm/drm_fourcc.h>
8 #include "gem/i915_gem_ioctls.h"
9 #include "gem/i915_gem_lmem.h"
10 #include "gem/i915_gem_region.h"
11 #include "pxp/intel_pxp.h"
14 #include "i915_gem_create.h"
15 #include "i915_trace.h"
16 #include "i915_user_extensions.h"
18 static u32 object_max_page_size(struct intel_memory_region **placements,
19 unsigned int n_placements)
21 u32 max_page_size = 0;
24 for (i = 0; i < n_placements; i++) {
25 struct intel_memory_region *mr = placements[i];
27 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
28 max_page_size = max_t(u32, max_page_size, mr->min_page_size);
31 GEM_BUG_ON(!max_page_size);
35 static int object_set_placements(struct drm_i915_gem_object *obj,
36 struct intel_memory_region **placements,
37 unsigned int n_placements)
39 struct intel_memory_region **arr;
42 GEM_BUG_ON(!n_placements);
45 * For the common case of one memory region, skip storing an
46 * allocated array and just point at the region directly.
48 if (n_placements == 1) {
49 struct intel_memory_region *mr = placements[0];
50 struct drm_i915_private *i915 = mr->i915;
52 obj->mm.placements = &i915->mm.regions[mr->id];
53 obj->mm.n_placements = 1;
55 arr = kmalloc_array(n_placements,
56 sizeof(struct intel_memory_region *),
61 for (i = 0; i < n_placements; i++)
62 arr[i] = placements[i];
64 obj->mm.placements = arr;
65 obj->mm.n_placements = n_placements;
71 static int i915_gem_publish(struct drm_i915_gem_object *obj,
72 struct drm_file *file,
76 u64 size = obj->base.size;
79 ret = drm_gem_handle_create(file, &obj->base, handle_p);
80 /* drop reference from allocate - handle holds it now */
81 i915_gem_object_put(obj);
89 static struct drm_i915_gem_object *
90 __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
91 struct intel_memory_region **placements,
92 unsigned int n_placements,
93 unsigned int ext_flags)
95 struct intel_memory_region *mr = placements[0];
96 struct drm_i915_gem_object *obj;
100 i915_gem_flush_free_objects(i915);
102 size = round_up(size, object_max_page_size(placements, n_placements));
104 return ERR_PTR(-EINVAL);
106 /* For most of the ABI (e.g. mmap) we think in system pages */
107 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
109 if (i915_gem_object_size_2big(size))
110 return ERR_PTR(-E2BIG);
112 obj = i915_gem_object_alloc();
114 return ERR_PTR(-ENOMEM);
116 ret = object_set_placements(obj, placements, n_placements);
121 * I915_BO_ALLOC_USER will make sure the object is cleared before
124 flags = I915_BO_ALLOC_USER;
126 ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
130 GEM_BUG_ON(size != obj->base.size);
132 /* Add any flag set by create_ext options */
133 obj->flags |= ext_flags;
135 trace_i915_gem_object_create(obj);
139 if (obj->mm.n_placements > 1)
140 kfree(obj->mm.placements);
141 i915_gem_object_free(obj);
146 * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
147 * @i915: i915 private
148 * @size: size of the buffer, in bytes
149 * @placements: possible placement regions, in priority order
150 * @n_placements: number of possible placement regions
152 * This function is exposed primarily for selftests and does very little
153 * error checking. It is assumed that the set of placement regions has
154 * already been verified to be valid.
156 struct drm_i915_gem_object *
157 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
158 struct intel_memory_region **placements,
159 unsigned int n_placements)
161 return __i915_gem_object_create_user_ext(i915, size, placements,
166 i915_gem_dumb_create(struct drm_file *file,
167 struct drm_device *dev,
168 struct drm_mode_create_dumb *args)
170 struct drm_i915_gem_object *obj;
171 struct intel_memory_region *mr;
172 enum intel_memory_type mem_type;
173 int cpp = DIV_ROUND_UP(args->bpp, 8);
178 format = DRM_FORMAT_C8;
181 format = DRM_FORMAT_RGB565;
184 format = DRM_FORMAT_XRGB8888;
190 /* have to work out size/pitch and return them */
191 args->pitch = ALIGN(args->width * cpp, 64);
193 /* align stride to page size so that we can remap */
194 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
195 DRM_FORMAT_MOD_LINEAR))
196 args->pitch = ALIGN(args->pitch, 4096);
198 if (args->pitch < args->width)
201 args->size = mul_u32_u32(args->pitch, args->height);
203 mem_type = INTEL_MEMORY_SYSTEM;
204 if (HAS_LMEM(to_i915(dev)))
205 mem_type = INTEL_MEMORY_LOCAL;
207 mr = intel_memory_region_by_type(to_i915(dev), mem_type);
209 obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
213 return i915_gem_publish(obj, file, &args->size, &args->handle);
217 * Creates a new mm object and returns a handle to it.
218 * @dev: drm device pointer
219 * @data: ioctl data blob
220 * @file: drm file pointer
223 i915_gem_create_ioctl(struct drm_device *dev, void *data,
224 struct drm_file *file)
226 struct drm_i915_private *i915 = to_i915(dev);
227 struct drm_i915_gem_create *args = data;
228 struct drm_i915_gem_object *obj;
229 struct intel_memory_region *mr;
231 mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
233 obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
237 return i915_gem_publish(obj, file, &args->size, &args->handle);
241 struct drm_i915_private *i915;
242 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
243 unsigned int n_placements;
244 unsigned int placement_mask;
248 static void repr_placements(char *buf, size_t size,
249 struct intel_memory_region **placements,
256 for (i = 0; i < n_placements; i++) {
257 struct intel_memory_region *mr = placements[i];
260 r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
261 mr->name, mr->type, mr->instance);
270 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
271 struct create_ext *ext_data)
273 struct drm_i915_private *i915 = ext_data->i915;
274 struct drm_i915_gem_memory_class_instance __user *uregions =
275 u64_to_user_ptr(args->regions);
276 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
281 drm_dbg(&i915->drm, "pad should be zero\n");
285 if (!args->num_regions) {
286 drm_dbg(&i915->drm, "num_regions is zero\n");
290 BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
291 BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
292 if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
293 drm_dbg(&i915->drm, "num_regions is too large\n");
301 for (i = 0; i < args->num_regions; i++) {
302 struct drm_i915_gem_memory_class_instance region;
303 struct intel_memory_region *mr;
305 if (copy_from_user(®ion, uregions, sizeof(region)))
308 mr = intel_memory_region_lookup(i915,
310 region.memory_instance);
311 if (!mr || mr->private) {
312 drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
313 region.memory_class, region.memory_instance, i);
318 if (mask & BIT(mr->id)) {
319 drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
320 mr->name, region.memory_class,
321 region.memory_instance, i);
332 if (ext_data->n_placements) {
337 ext_data->n_placements = args->num_regions;
338 for (i = 0; i < args->num_regions; i++)
339 ext_data->placements[i] = placements[i];
341 ext_data->placement_mask = mask;
348 if (ext_data->n_placements) {
351 ext_data->placements,
352 ext_data->n_placements);
354 "Placements were already set in previous EXT. Existing placements: %s\n",
358 repr_placements(buf, sizeof(buf), placements, i);
359 drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
365 static int ext_set_placements(struct i915_user_extension __user *base,
368 struct drm_i915_gem_create_ext_memory_regions ext;
370 if (copy_from_user(&ext, base, sizeof(ext)))
373 return set_placements(&ext, data);
376 static int ext_set_protected(struct i915_user_extension __user *base, void *data)
378 struct drm_i915_gem_create_ext_protected_content ext;
379 struct create_ext *ext_data = data;
381 if (copy_from_user(&ext, base, sizeof(ext)))
387 if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
390 ext_data->flags |= I915_BO_PROTECTED;
395 static const i915_user_extension_fn create_extensions[] = {
396 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
397 [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
401 * Creates a new mm object and returns a handle to it.
402 * @dev: drm device pointer
403 * @data: ioctl data blob
404 * @file: drm file pointer
407 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
408 struct drm_file *file)
410 struct drm_i915_private *i915 = to_i915(dev);
411 struct drm_i915_gem_create_ext *args = data;
412 struct create_ext ext_data = { .i915 = i915 };
413 struct drm_i915_gem_object *obj;
416 if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
419 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
421 ARRAY_SIZE(create_extensions),
426 if (!ext_data.n_placements) {
427 ext_data.placements[0] =
428 intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
429 ext_data.n_placements = 1;
432 if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
433 if (ext_data.n_placements == 1)
437 * We always need to be able to spill to system memory, if we
438 * can't place in the mappable part of LMEM.
440 if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
443 if (ext_data.n_placements > 1 ||
444 ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
445 ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
448 obj = __i915_gem_object_create_user_ext(i915, args->size,
450 ext_data.n_placements,
455 return i915_gem_publish(obj, file, &args->size, &args->handle);