2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/nospec.h>
10 #include "i915_perf.h"
11 #include "i915_query.h"
12 #include <uapi/drm/i915_drm.h>
14 static int copy_query_item(void *query_hdr, size_t query_sz,
16 struct drm_i915_query_item *query_item)
18 if (query_item->length == 0)
21 if (query_item->length < total_length)
24 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
31 static int query_topology_info(struct drm_i915_private *dev_priv,
32 struct drm_i915_query_item *query_item)
34 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
35 struct drm_i915_query_topology_info topo;
36 u32 slice_length, subslice_length, eu_length, total_length;
39 if (query_item->flags != 0)
42 if (sseu->max_slices == 0)
45 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
47 slice_length = sizeof(sseu->slice_mask);
48 subslice_length = sseu->max_slices * sseu->ss_stride;
49 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
50 total_length = sizeof(topo) + slice_length + subslice_length +
53 ret = copy_query_item(&topo, sizeof(topo), total_length,
61 memset(&topo, 0, sizeof(topo));
62 topo.max_slices = sseu->max_slices;
63 topo.max_subslices = sseu->max_subslices;
64 topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
66 topo.subslice_offset = slice_length;
67 topo.subslice_stride = sseu->ss_stride;
68 topo.eu_offset = slice_length + subslice_length;
69 topo.eu_stride = sseu->eu_stride;
71 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
75 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
76 &sseu->slice_mask, slice_length))
79 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
80 sizeof(topo) + slice_length),
81 sseu->subslice_mask, subslice_length))
84 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
86 slice_length + subslice_length),
87 sseu->eu_mask, eu_length))
94 query_engine_info(struct drm_i915_private *i915,
95 struct drm_i915_query_item *query_item)
97 struct drm_i915_query_engine_info __user *query_ptr =
98 u64_to_user_ptr(query_item->data_ptr);
99 struct drm_i915_engine_info __user *info_ptr;
100 struct drm_i915_query_engine_info query;
101 struct drm_i915_engine_info info = { };
102 unsigned int num_uabi_engines = 0;
103 struct intel_engine_cs *engine;
106 if (query_item->flags)
109 for_each_uabi_engine(engine, i915)
112 len = sizeof(struct drm_i915_query_engine_info) +
113 num_uabi_engines * sizeof(struct drm_i915_engine_info);
115 ret = copy_query_item(&query, sizeof(query), len, query_item);
119 if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
123 info_ptr = &query_ptr->engines[0];
125 for_each_uabi_engine(engine, i915) {
126 info.engine.engine_class = engine->uabi_class;
127 info.engine.engine_instance = engine->uabi_instance;
128 info.capabilities = engine->uabi_capabilities;
130 if (copy_to_user(info_ptr, &info, sizeof(info)))
137 if (copy_to_user(query_ptr, &query, sizeof(query)))
143 static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
148 * We'll just put the number of registers, and won't copy the
151 if (user_n_regs == 0)
154 if (user_n_regs < kernel_n_regs)
160 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
165 u32 __user *p = u64_to_user_ptr(user_regs_ptr);
168 if (*user_n_regs == 0) {
169 *user_n_regs = kernel_n_regs;
173 *user_n_regs = kernel_n_regs;
175 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
178 for (r = 0; r < kernel_n_regs; r++, p += 2) {
179 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
181 unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
183 user_write_access_end();
186 user_write_access_end();
190 static int query_perf_config_data(struct drm_i915_private *i915,
191 struct drm_i915_query_item *query_item,
194 struct drm_i915_query_perf_config __user *user_query_config_ptr =
195 u64_to_user_ptr(query_item->data_ptr);
196 struct drm_i915_perf_oa_config __user *user_config_ptr =
197 u64_to_user_ptr(query_item->data_ptr +
198 sizeof(struct drm_i915_query_perf_config));
199 struct drm_i915_perf_oa_config user_config;
200 struct i915_perf *perf = &i915->perf;
201 struct i915_oa_config *oa_config;
202 char uuid[UUID_STRING_LEN + 1];
204 u32 flags, total_size;
211 sizeof(struct drm_i915_query_perf_config) +
212 sizeof(struct drm_i915_perf_oa_config);
214 if (query_item->length == 0)
217 if (query_item->length < total_size) {
218 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
219 query_item->length, total_size);
223 if (get_user(flags, &user_query_config_ptr->flags))
230 struct i915_oa_config *tmp;
233 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
235 memset(&uuid, 0, sizeof(uuid));
236 if (copy_from_user(uuid, user_query_config_ptr->uuid,
237 sizeof(user_query_config_ptr->uuid)))
242 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
243 if (!strcmp(tmp->uuid, uuid)) {
244 oa_config = i915_oa_config_get(tmp);
250 if (get_user(config_id, &user_query_config_ptr->config))
253 oa_config = i915_perf_get_oa_config(perf, config_id);
258 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
263 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
264 user_config.boolean_regs_ptr,
265 oa_config->b_counter_regs_len);
269 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
270 user_config.flex_regs_ptr,
271 oa_config->flex_regs_len);
275 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
276 user_config.mux_regs_ptr,
277 oa_config->mux_regs_len);
281 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
282 oa_config->b_counter_regs_len,
283 user_config.boolean_regs_ptr,
284 &user_config.n_boolean_regs);
288 ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
289 oa_config->flex_regs_len,
290 user_config.flex_regs_ptr,
291 &user_config.n_flex_regs);
295 ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
296 oa_config->mux_regs_len,
297 user_config.mux_regs_ptr,
298 &user_config.n_mux_regs);
302 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
304 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
312 i915_oa_config_put(oa_config);
316 static size_t sizeof_perf_config_list(size_t count)
318 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
321 static size_t sizeof_perf_metrics(struct i915_perf *perf)
323 struct i915_oa_config *tmp;
329 idr_for_each_entry(&perf->metrics_idr, tmp, id)
333 return sizeof_perf_config_list(i);
336 static int query_perf_config_list(struct drm_i915_private *i915,
337 struct drm_i915_query_item *query_item)
339 struct drm_i915_query_perf_config __user *user_query_config_ptr =
340 u64_to_user_ptr(query_item->data_ptr);
341 struct i915_perf *perf = &i915->perf;
342 u64 *oa_config_ids = NULL;
343 int alloc, n_configs;
350 if (query_item->length == 0)
351 return sizeof_perf_metrics(perf);
353 if (get_user(flags, &user_query_config_ptr->flags))
361 struct i915_oa_config *tmp;
365 ids = krealloc(oa_config_ids,
366 n_configs * sizeof(*oa_config_ids),
371 alloc = fetch_and_zero(&n_configs);
373 ids[n_configs++] = 1ull; /* reserved for test_config */
375 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
376 if (n_configs < alloc)
383 } while (n_configs > alloc);
385 if (query_item->length < sizeof_perf_config_list(n_configs)) {
386 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
388 sizeof_perf_config_list(n_configs));
389 kfree(oa_config_ids);
393 if (put_user(n_configs, &user_query_config_ptr->config)) {
394 kfree(oa_config_ids);
398 ret = copy_to_user(user_query_config_ptr + 1,
400 n_configs * sizeof(*oa_config_ids));
401 kfree(oa_config_ids);
405 return sizeof_perf_config_list(n_configs);
408 static int query_perf_config(struct drm_i915_private *i915,
409 struct drm_i915_query_item *query_item)
411 switch (query_item->flags) {
412 case DRM_I915_QUERY_PERF_CONFIG_LIST:
413 return query_perf_config_list(i915, query_item);
414 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
415 return query_perf_config_data(i915, query_item, true);
416 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
417 return query_perf_config_data(i915, query_item, false);
423 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
424 struct drm_i915_query_item *query_item) = {
430 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
432 struct drm_i915_private *dev_priv = to_i915(dev);
433 struct drm_i915_query *args = data;
434 struct drm_i915_query_item __user *user_item_ptr =
435 u64_to_user_ptr(args->items_ptr);
438 if (args->flags != 0)
441 for (i = 0; i < args->num_items; i++, user_item_ptr++) {
442 struct drm_i915_query_item item;
443 unsigned long func_idx;
446 if (copy_from_user(&item, user_item_ptr, sizeof(item)))
449 if (item.query_id == 0)
452 if (overflows_type(item.query_id - 1, unsigned long))
455 func_idx = item.query_id - 1;
458 if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
459 func_idx = array_index_nospec(func_idx,
460 ARRAY_SIZE(i915_query_funcs));
461 ret = i915_query_funcs[func_idx](dev_priv, &item);
464 /* Only write the length back to userspace if they differ. */
465 if (ret != item.length && put_user(ret, &user_item_ptr->length))