2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
27 #include "qxl_object.h"
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
33 static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34 struct drm_file *file_priv)
36 struct qxl_device *qdev = dev->dev_private;
37 struct drm_qxl_alloc *qxl_alloc = data;
41 u32 domain = QXL_GEM_DOMAIN_VRAM;
43 if (qxl_alloc->size == 0) {
44 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
47 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
53 DRM_ERROR("%s: failed to create gem ret=%d\n",
57 qxl_alloc->handle = handle;
61 static int qxl_map_ioctl(struct drm_device *dev, void *data,
62 struct drm_file *file_priv)
64 struct qxl_device *qdev = dev->dev_private;
65 struct drm_qxl_map *qxl_map = data;
67 return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
71 struct qxl_reloc_info {
73 struct qxl_bo *dst_bo;
75 struct qxl_bo *src_bo;
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
85 apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
89 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
90 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
93 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
97 apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
102 if (info->src_bo && !info->src_bo->is_primary)
103 id = info->src_bo->surface_id;
105 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
106 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
107 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
110 /* return holding the reference to this object */
111 static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
112 struct qxl_release *release, struct qxl_bo **qbo_p)
114 struct drm_gem_object *gobj;
118 gobj = drm_gem_object_lookup(file_priv, handle);
122 qobj = gem_to_qxl_bo(gobj);
124 ret = qxl_release_list_add(release, qobj);
125 drm_gem_object_put_unlocked(gobj);
134 * Usage of execbuffer:
135 * Relocations need to take into account the full QXLDrawable size.
136 * However, the command as passed from user space must *not* contain the initial
137 * QXLReleaseInfo struct (first XXX bytes)
139 static int qxl_process_single_command(struct qxl_device *qdev,
140 struct drm_qxl_command *cmd,
141 struct drm_file *file_priv)
143 struct qxl_reloc_info *reloc_info;
145 struct qxl_release *release;
146 struct qxl_bo *cmd_bo;
148 int i, ret, num_relocs;
153 release_type = QXL_RELEASE_DRAWABLE;
155 case QXL_CMD_SURFACE:
158 DRM_DEBUG("Only draw commands in execbuffers\n");
163 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
166 if (!access_ok(VERIFY_READ,
167 u64_to_user_ptr(cmd->command),
171 reloc_info = kmalloc_array(cmd->relocs_num,
172 sizeof(struct qxl_reloc_info), GFP_KERNEL);
176 ret = qxl_alloc_release_reserved(qdev,
177 sizeof(union qxl_release_info) +
185 /* TODO copy slow path code from i915 */
186 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
187 unwritten = __copy_from_user_inatomic_nocache
188 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
189 u64_to_user_ptr(cmd->command), cmd->command_size);
192 struct qxl_drawable *draw = fb_cmd;
194 draw->mm_time = qdev->rom->mm_clock;
197 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
199 DRM_ERROR("got unwritten %d\n", unwritten);
201 goto out_free_release;
204 /* fill out reloc info structs */
206 for (i = 0; i < cmd->relocs_num; ++i) {
207 struct drm_qxl_reloc reloc;
208 struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
210 if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
215 /* add the bos to the list of bos to validate -
216 need to validate first then process relocs? */
217 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
218 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
223 reloc_info[i].type = reloc.reloc_type;
225 if (reloc.dst_handle) {
226 ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
227 &reloc_info[i].dst_bo);
230 reloc_info[i].dst_offset = reloc.dst_offset;
232 reloc_info[i].dst_bo = cmd_bo;
233 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
237 /* reserve and validate the reloc dst bo */
238 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
239 ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
240 &reloc_info[i].src_bo);
243 reloc_info[i].src_offset = reloc.src_offset;
245 reloc_info[i].src_bo = NULL;
246 reloc_info[i].src_offset = 0;
250 /* validate all buffers */
251 ret = qxl_release_reserve_list(release, false);
255 for (i = 0; i < cmd->relocs_num; ++i) {
256 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
257 apply_reloc(qdev, &reloc_info[i]);
258 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
259 apply_surf_reloc(qdev, &reloc_info[i]);
262 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
264 qxl_release_backoff_reserve_list(release);
266 qxl_release_fence_buffer_objects(release);
271 qxl_release_free(qdev, release);
277 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
278 struct drm_file *file_priv)
280 struct qxl_device *qdev = dev->dev_private;
281 struct drm_qxl_execbuffer *execbuffer = data;
282 struct drm_qxl_command user_cmd;
286 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
288 struct drm_qxl_command __user *commands =
289 u64_to_user_ptr(execbuffer->commands);
291 if (copy_from_user(&user_cmd, commands + cmd_num,
295 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
302 static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
303 struct drm_file *file)
305 struct qxl_device *qdev = dev->dev_private;
306 struct drm_qxl_update_area *update_area = data;
307 struct qxl_rect area = {.left = update_area->left,
308 .top = update_area->top,
309 .right = update_area->right,
310 .bottom = update_area->bottom};
312 struct drm_gem_object *gobj = NULL;
313 struct qxl_bo *qobj = NULL;
314 struct ttm_operation_ctx ctx = { true, false };
316 if (update_area->left >= update_area->right ||
317 update_area->top >= update_area->bottom)
320 gobj = drm_gem_object_lookup(file, update_area->handle);
324 qobj = gem_to_qxl_bo(gobj);
326 ret = qxl_bo_reserve(qobj, false);
330 if (!qobj->pin_count) {
331 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
332 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
337 ret = qxl_bo_check_id(qdev, qobj);
340 if (!qobj->surface_id)
341 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
342 ret = qxl_io_update_area(qdev, qobj, &area);
345 qxl_bo_unreserve(qobj);
348 drm_gem_object_put_unlocked(gobj);
352 static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
353 struct drm_file *file_priv)
355 struct qxl_device *qdev = dev->dev_private;
356 struct drm_qxl_getparam *param = data;
358 switch (param->param) {
359 case QXL_PARAM_NUM_SURFACES:
360 param->value = qdev->rom->n_surfaces;
362 case QXL_PARAM_MAX_RELOCS:
363 param->value = QXL_MAX_RES;
371 static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
372 struct drm_file *file_priv)
374 struct qxl_device *qdev = dev->dev_private;
375 struct drm_qxl_clientcap *param = data;
378 byte = param->index / 8;
379 idx = param->index % 8;
381 if (dev->pdev->revision < 4)
387 if (qdev->rom->client_capabilities[byte] & (1 << idx))
392 static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *file)
395 struct qxl_device *qdev = dev->dev_private;
396 struct drm_qxl_alloc_surf *param = data;
400 int size, actual_stride;
401 struct qxl_surface surf;
403 /* work out size allocate bo with handle */
404 actual_stride = param->stride < 0 ? -param->stride : param->stride;
405 size = actual_stride * param->height + actual_stride;
407 surf.format = param->format;
408 surf.width = param->width;
409 surf.height = param->height;
410 surf.stride = param->stride;
413 ret = qxl_gem_object_create_with_handle(qdev, file,
414 QXL_GEM_DOMAIN_SURFACE,
419 DRM_ERROR("%s: failed to create gem ret=%d\n",
423 param->handle = handle;
427 const struct drm_ioctl_desc qxl_ioctls[] = {
428 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
430 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
432 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
434 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
436 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
438 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
441 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
445 int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);