2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/nospec.h>
10 #include "i915_perf.h"
11 #include "i915_query.h"
12 #include "gt/intel_engine_user.h"
13 #include <uapi/drm/i915_drm.h>
15 static int copy_query_item(void *query_hdr, size_t query_sz,
17 struct drm_i915_query_item *query_item)
19 if (query_item->length == 0)
22 if (query_item->length < total_length)
25 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
32 static int fill_topology_info(const struct sseu_dev_info *sseu,
33 struct drm_i915_query_item *query_item,
34 const u8 *subslice_mask)
36 struct drm_i915_query_topology_info topo;
37 u32 slice_length, subslice_length, eu_length, total_length;
40 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
42 if (sseu->max_slices == 0)
45 slice_length = sizeof(sseu->slice_mask);
46 subslice_length = sseu->max_slices * sseu->ss_stride;
47 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
48 total_length = sizeof(topo) + slice_length + subslice_length +
51 ret = copy_query_item(&topo, sizeof(topo), total_length, query_item);
56 memset(&topo, 0, sizeof(topo));
57 topo.max_slices = sseu->max_slices;
58 topo.max_subslices = sseu->max_subslices;
59 topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
61 topo.subslice_offset = slice_length;
62 topo.subslice_stride = sseu->ss_stride;
63 topo.eu_offset = slice_length + subslice_length;
64 topo.eu_stride = sseu->eu_stride;
66 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
70 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
71 &sseu->slice_mask, slice_length))
74 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
75 sizeof(topo) + slice_length),
76 subslice_mask, subslice_length))
79 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
81 slice_length + subslice_length),
82 sseu->eu_mask, eu_length))
88 static int query_topology_info(struct drm_i915_private *dev_priv,
89 struct drm_i915_query_item *query_item)
91 const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
93 if (query_item->flags != 0)
96 return fill_topology_info(sseu, query_item, sseu->subslice_mask);
99 static int query_geometry_subslices(struct drm_i915_private *i915,
100 struct drm_i915_query_item *query_item)
102 const struct sseu_dev_info *sseu;
103 struct intel_engine_cs *engine;
104 struct i915_engine_class_instance classinstance;
106 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
109 classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
111 engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class,
112 (u8)classinstance.engine_instance);
117 if (engine->class != RENDER_CLASS)
120 sseu = &engine->gt->info.sseu;
122 return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask);
126 query_engine_info(struct drm_i915_private *i915,
127 struct drm_i915_query_item *query_item)
129 struct drm_i915_query_engine_info __user *query_ptr =
130 u64_to_user_ptr(query_item->data_ptr);
131 struct drm_i915_engine_info __user *info_ptr;
132 struct drm_i915_query_engine_info query;
133 struct drm_i915_engine_info info = { };
134 unsigned int num_uabi_engines = 0;
135 struct intel_engine_cs *engine;
138 if (query_item->flags)
141 for_each_uabi_engine(engine, i915)
144 len = struct_size(query_ptr, engines, num_uabi_engines);
146 ret = copy_query_item(&query, sizeof(query), len, query_item);
150 if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
154 info_ptr = &query_ptr->engines[0];
156 for_each_uabi_engine(engine, i915) {
157 info.engine.engine_class = engine->uabi_class;
158 info.engine.engine_instance = engine->uabi_instance;
159 info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE;
160 info.capabilities = engine->uabi_capabilities;
161 info.logical_instance = ilog2(engine->logical_mask);
163 if (copy_to_user(info_ptr, &info, sizeof(info)))
170 if (copy_to_user(query_ptr, &query, sizeof(query)))
176 static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
181 * We'll just put the number of registers, and won't copy the
184 if (user_n_regs == 0)
187 if (user_n_regs < kernel_n_regs)
193 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
198 u32 __user *p = u64_to_user_ptr(user_regs_ptr);
201 if (*user_n_regs == 0) {
202 *user_n_regs = kernel_n_regs;
206 *user_n_regs = kernel_n_regs;
208 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
211 for (r = 0; r < kernel_n_regs; r++, p += 2) {
212 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
214 unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
216 user_write_access_end();
219 user_write_access_end();
223 static int query_perf_config_data(struct drm_i915_private *i915,
224 struct drm_i915_query_item *query_item,
227 struct drm_i915_query_perf_config __user *user_query_config_ptr =
228 u64_to_user_ptr(query_item->data_ptr);
229 struct drm_i915_perf_oa_config __user *user_config_ptr =
230 u64_to_user_ptr(query_item->data_ptr +
231 sizeof(struct drm_i915_query_perf_config));
232 struct drm_i915_perf_oa_config user_config;
233 struct i915_perf *perf = &i915->perf;
234 struct i915_oa_config *oa_config;
235 char uuid[UUID_STRING_LEN + 1];
237 u32 flags, total_size;
244 sizeof(struct drm_i915_query_perf_config) +
245 sizeof(struct drm_i915_perf_oa_config);
247 if (query_item->length == 0)
250 if (query_item->length < total_size) {
251 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
252 query_item->length, total_size);
256 if (get_user(flags, &user_query_config_ptr->flags))
263 struct i915_oa_config *tmp;
266 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
268 memset(&uuid, 0, sizeof(uuid));
269 if (copy_from_user(uuid, user_query_config_ptr->uuid,
270 sizeof(user_query_config_ptr->uuid)))
275 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
276 if (!strcmp(tmp->uuid, uuid)) {
277 oa_config = i915_oa_config_get(tmp);
283 if (get_user(config_id, &user_query_config_ptr->config))
286 oa_config = i915_perf_get_oa_config(perf, config_id);
291 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
296 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
297 user_config.boolean_regs_ptr,
298 oa_config->b_counter_regs_len);
302 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
303 user_config.flex_regs_ptr,
304 oa_config->flex_regs_len);
308 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
309 user_config.mux_regs_ptr,
310 oa_config->mux_regs_len);
314 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
315 oa_config->b_counter_regs_len,
316 user_config.boolean_regs_ptr,
317 &user_config.n_boolean_regs);
321 ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
322 oa_config->flex_regs_len,
323 user_config.flex_regs_ptr,
324 &user_config.n_flex_regs);
328 ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
329 oa_config->mux_regs_len,
330 user_config.mux_regs_ptr,
331 &user_config.n_mux_regs);
335 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
337 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
345 i915_oa_config_put(oa_config);
349 static size_t sizeof_perf_config_list(size_t count)
351 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
354 static size_t sizeof_perf_metrics(struct i915_perf *perf)
356 struct i915_oa_config *tmp;
362 idr_for_each_entry(&perf->metrics_idr, tmp, id)
366 return sizeof_perf_config_list(i);
369 static int query_perf_config_list(struct drm_i915_private *i915,
370 struct drm_i915_query_item *query_item)
372 struct drm_i915_query_perf_config __user *user_query_config_ptr =
373 u64_to_user_ptr(query_item->data_ptr);
374 struct i915_perf *perf = &i915->perf;
375 u64 *oa_config_ids = NULL;
376 int alloc, n_configs;
383 if (query_item->length == 0)
384 return sizeof_perf_metrics(perf);
386 if (get_user(flags, &user_query_config_ptr->flags))
394 struct i915_oa_config *tmp;
398 ids = krealloc(oa_config_ids,
399 n_configs * sizeof(*oa_config_ids),
404 alloc = fetch_and_zero(&n_configs);
406 ids[n_configs++] = 1ull; /* reserved for test_config */
408 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
409 if (n_configs < alloc)
416 } while (n_configs > alloc);
418 if (query_item->length < sizeof_perf_config_list(n_configs)) {
419 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
421 sizeof_perf_config_list(n_configs));
422 kfree(oa_config_ids);
426 if (put_user(n_configs, &user_query_config_ptr->config)) {
427 kfree(oa_config_ids);
431 ret = copy_to_user(user_query_config_ptr + 1,
433 n_configs * sizeof(*oa_config_ids));
434 kfree(oa_config_ids);
438 return sizeof_perf_config_list(n_configs);
441 static int query_perf_config(struct drm_i915_private *i915,
442 struct drm_i915_query_item *query_item)
444 switch (query_item->flags) {
445 case DRM_I915_QUERY_PERF_CONFIG_LIST:
446 return query_perf_config_list(i915, query_item);
447 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
448 return query_perf_config_data(i915, query_item, true);
449 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
450 return query_perf_config_data(i915, query_item, false);
456 static int query_memregion_info(struct drm_i915_private *i915,
457 struct drm_i915_query_item *query_item)
459 struct drm_i915_query_memory_regions __user *query_ptr =
460 u64_to_user_ptr(query_item->data_ptr);
461 struct drm_i915_memory_region_info __user *info_ptr =
462 &query_ptr->regions[0];
463 struct drm_i915_memory_region_info info = { };
464 struct drm_i915_query_memory_regions query;
465 struct intel_memory_region *mr;
469 if (query_item->flags != 0)
472 total_length = sizeof(query);
473 for_each_memory_region(mr, i915, id) {
477 total_length += sizeof(info);
480 ret = copy_query_item(&query, sizeof(query), total_length, query_item);
484 if (query.num_regions)
487 for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) {
492 for_each_memory_region(mr, i915, id) {
496 info.region.memory_class = mr->type;
497 info.region.memory_instance = mr->instance;
498 info.probed_size = mr->total;
499 info.unallocated_size = mr->avail;
501 if (__copy_to_user(info_ptr, &info, sizeof(info)))
508 if (__copy_to_user(query_ptr, &query, sizeof(query)))
514 static int query_hwconfig_blob(struct drm_i915_private *i915,
515 struct drm_i915_query_item *query_item)
517 struct intel_gt *gt = to_gt(i915);
518 struct intel_hwconfig *hwconfig = >->info.hwconfig;
520 if (!hwconfig->size || !hwconfig->ptr)
523 if (query_item->length == 0)
524 return hwconfig->size;
526 if (query_item->length < hwconfig->size)
529 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
530 hwconfig->ptr, hwconfig->size))
533 return hwconfig->size;
536 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
537 struct drm_i915_query_item *query_item) = {
541 query_memregion_info,
543 query_geometry_subslices,
546 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
548 struct drm_i915_private *dev_priv = to_i915(dev);
549 struct drm_i915_query *args = data;
550 struct drm_i915_query_item __user *user_item_ptr =
551 u64_to_user_ptr(args->items_ptr);
554 if (args->flags != 0)
557 for (i = 0; i < args->num_items; i++, user_item_ptr++) {
558 struct drm_i915_query_item item;
559 unsigned long func_idx;
562 if (copy_from_user(&item, user_item_ptr, sizeof(item)))
565 if (item.query_id == 0)
568 if (overflows_type(item.query_id - 1, unsigned long))
571 func_idx = item.query_id - 1;
574 if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
575 func_idx = array_index_nospec(func_idx,
576 ARRAY_SIZE(i915_query_funcs));
577 ret = i915_query_funcs[func_idx](dev_priv, &item);
580 /* Only write the length back to userspace if they differ. */
581 if (ret != item.length && put_user(ret, &user_item_ptr->length))