1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
28 #include <linux/hashtable.h>
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_reg.h"
32 #include <drm/ttm/ttm_bo.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include "vmwgfx_so.h"
35 #include "vmwgfx_binding.h"
36 #include "vmwgfx_mksstat.h"
40 * Helper macro to get dx_ctx_node if available otherwise print an error
41 * message. This is for use in command verifier function where if dx_ctx_node
42 * is not set then command is invalid.
44 #define VMW_GET_CTX_NODE(__sw_context) \
46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
47 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
48 __sw_context->dx_ctx_node; \
52 #define VMW_DECLARE_CMD_VAR(__var, __type) \
54 SVGA3dCmdHeader header; \
59 * struct vmw_relocation - Buffer object relocation
61 * @head: List head for the command submission context's relocation list
62 * @vbo: Non ref-counted pointer to buffer object
63 * @mob_loc: Pointer to location for mob id to be modified
64 * @location: Pointer to location for guest pointer to be modified
66 struct vmw_relocation {
67 struct list_head head;
68 struct vmw_buffer_object *vbo;
71 SVGAGuestPtr *location;
76 * enum vmw_resource_relocation_type - Relocation type for resources
78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79 * command stream is replaced with the actual id after validation.
80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 * @vmw_res_rel_max: Last value in the enum - used for error checking
86 enum vmw_resource_relocation_type {
94 * struct vmw_resource_relocation - Relocation info for resources
96 * @head: List head for the software context's relocation list.
97 * @res: Non-ref-counted pointer to the resource.
98 * @offset: Offset of single byte entries into the command buffer where the id
99 * that needs fixup is located.
100 * @rel_type: Type of relocation.
102 struct vmw_resource_relocation {
103 struct list_head head;
104 const struct vmw_resource *res;
106 enum vmw_resource_relocation_type rel_type:3;
110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
112 * @head: List head of context list
113 * @ctx: The context resource
114 * @cur: The context's persistent binding state
115 * @staged: The binding state changes of this command buffer
117 struct vmw_ctx_validation_info {
118 struct list_head head;
119 struct vmw_resource *ctx;
120 struct vmw_ctx_binding_state *cur;
121 struct vmw_ctx_binding_state *staged;
125 * struct vmw_cmd_entry - Describe a command for the verifier
127 * @func: Call-back to handle the command.
128 * @user_allow: Whether allowed from the execbuf ioctl.
129 * @gb_disable: Whether disabled if guest-backed objects are available.
130 * @gb_enable: Whether enabled iff guest-backed objects are available.
131 * @cmd_name: Name of the command.
133 struct vmw_cmd_entry {
134 int (*func) (struct vmw_private *, struct vmw_sw_context *,
139 const char *cmd_name;
142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 (_gb_disable), (_gb_enable), #_cmd}
146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 struct vmw_sw_context *sw_context,
148 struct vmw_resource *ctx);
149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 struct vmw_sw_context *sw_context,
152 struct vmw_buffer_object **vmw_bo_p);
154 * vmw_ptr_diff - Compute the offset from a to b in bytes
156 * @a: A starting pointer.
157 * @b: A pointer offset in the same address space.
159 * Returns: The offset in bytes between the two pointers.
161 static size_t vmw_ptr_diff(void *a, void *b)
163 return (unsigned long) b - (unsigned long) a;
167 * vmw_execbuf_bindings_commit - Commit modified binding state
169 * @sw_context: The command submission context
170 * @backoff: Whether this is part of the error path and binding state changes
173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
176 struct vmw_ctx_validation_info *entry;
178 list_for_each_entry(entry, &sw_context->ctx_list, head) {
180 vmw_binding_state_commit(entry->cur, entry->staged);
182 if (entry->staged != sw_context->staged_bindings)
183 vmw_binding_state_free(entry->staged);
185 sw_context->staged_bindings_inuse = false;
188 /* List entries are freed with the validation context */
189 INIT_LIST_HEAD(&sw_context->ctx_list);
193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
195 * @sw_context: The command submission context
197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
199 if (sw_context->dx_query_mob)
200 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 sw_context->dx_query_mob);
205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
208 * @dev_priv: Pointer to the device private:
209 * @sw_context: The command submission context
210 * @res: Pointer to the resource
211 * @node: The validation node holding the context resource metadata
213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 struct vmw_sw_context *sw_context,
215 struct vmw_resource *res,
216 struct vmw_ctx_validation_info *node)
220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 if (unlikely(ret != 0))
224 if (!sw_context->staged_bindings) {
225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 if (IS_ERR(sw_context->staged_bindings)) {
227 ret = PTR_ERR(sw_context->staged_bindings);
228 sw_context->staged_bindings = NULL;
233 if (sw_context->staged_bindings_inuse) {
234 node->staged = vmw_binding_state_alloc(dev_priv);
235 if (IS_ERR(node->staged)) {
236 ret = PTR_ERR(node->staged);
241 node->staged = sw_context->staged_bindings;
242 sw_context->staged_bindings_inuse = true;
246 node->cur = vmw_context_binding_state(res);
247 list_add_tail(&node->head, &sw_context->ctx_list);
256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
265 * Returns: The extra size requirement based on resource type.
267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 enum vmw_res_type res_type)
270 return (res_type == vmw_res_dx_context ||
271 (res_type == vmw_res_context && dev_priv->has_mob)) ?
272 sizeof(struct vmw_ctx_validation_info) : 0;
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
280 * @private: Pointer to the execbuf-private space in the resource validation
283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 struct vmw_resource *res,
288 rcache->private = private;
290 rcache->valid_handle = 0;
293 enum vmw_val_add_flags {
294 vmw_val_add_flag_none = 0,
295 vmw_val_add_flag_noctx = 1 << 0,
299 * vmw_execbuf_res_val_add - Add a resource to the validation list.
301 * @sw_context: Pointer to the software context.
302 * @res: Unreferenced rcu-protected pointer to the resource.
303 * @dirty: Whether to change dirty status.
304 * @flags: specifies whether to use the context or not
306 * Returns: 0 on success. Negative error code on failure. Typical error codes
307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
309 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
310 struct vmw_resource *res,
314 struct vmw_private *dev_priv = res->dev_priv;
316 enum vmw_res_type res_type = vmw_res_type(res);
317 struct vmw_res_cache_entry *rcache;
318 struct vmw_ctx_validation_info *ctx_info;
320 unsigned int priv_size;
322 rcache = &sw_context->res_cache[res_type];
323 if (likely(rcache->valid && rcache->res == res)) {
325 vmw_validation_res_set_dirty(sw_context->ctx,
326 rcache->private, dirty);
330 if ((flags & vmw_val_add_flag_noctx) != 0) {
331 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
332 (void **)&ctx_info, NULL);
337 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
338 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
339 dirty, (void **)&ctx_info,
344 if (priv_size && first_usage) {
345 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
348 VMW_DEBUG_USER("Failed first usage context setup.\n");
354 vmw_execbuf_rcache_update(rcache, res, ctx_info);
359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
362 * @sw_context: The software context holding the validation list.
363 * @view: Pointer to the view resource.
365 * Returns 0 if success, negative error code otherwise.
367 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
368 struct vmw_resource *view)
373 * First add the resource the view is pointing to, otherwise it may be
374 * swapped out when the view is validated.
376 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
377 vmw_view_dirtying(view), vmw_val_add_flag_noctx);
381 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
382 vmw_val_add_flag_noctx);
386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
387 * to to the validation list.
389 * @sw_context: The software context holding the validation list.
390 * @view_type: The view type to look up.
391 * @id: view id of the view.
393 * The view is represented by a view id and the DX context it's created on, or
394 * scheduled for creation on. If there is no DX context set, the function will
395 * return an -EINVAL error pointer.
397 * Returns: Unreferenced pointer to the resource on success, negative error
398 * pointer on failure.
400 static struct vmw_resource *
401 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
402 enum vmw_view_type view_type, u32 id)
404 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
405 struct vmw_resource *view;
409 return ERR_PTR(-EINVAL);
411 view = vmw_view_lookup(sw_context->man, view_type, id);
415 ret = vmw_view_res_val_add(sw_context, view);
423 * vmw_resource_context_res_add - Put resources previously bound to a context on
424 * the validation list
426 * @dev_priv: Pointer to a device private structure
427 * @sw_context: Pointer to a software context used for this command submission
428 * @ctx: Pointer to the context resource
430 * This function puts all resources that were previously bound to @ctx on the
431 * resource validation list. This is part of the context state reemission
433 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
434 struct vmw_sw_context *sw_context,
435 struct vmw_resource *ctx)
437 struct list_head *binding_list;
438 struct vmw_ctx_bindinfo *entry;
440 struct vmw_resource *res;
442 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
443 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
445 /* Add all cotables to the validation list. */
446 if (has_sm4_context(dev_priv) &&
447 vmw_res_type(ctx) == vmw_res_dx_context) {
448 for (i = 0; i < cotable_max; ++i) {
449 res = vmw_context_cotable(ctx, i);
453 ret = vmw_execbuf_res_val_add(sw_context, res,
455 vmw_val_add_flag_noctx);
456 if (unlikely(ret != 0))
461 /* Add all resources bound to the context to the validation list */
462 mutex_lock(&dev_priv->binding_mutex);
463 binding_list = vmw_context_binding_list(ctx);
465 list_for_each_entry(entry, binding_list, ctx_list) {
466 if (vmw_res_type(entry->res) == vmw_res_view)
467 ret = vmw_view_res_val_add(sw_context, entry->res);
469 ret = vmw_execbuf_res_val_add(sw_context, entry->res,
470 vmw_binding_dirtying(entry->bt),
471 vmw_val_add_flag_noctx);
472 if (unlikely(ret != 0))
476 if (has_sm4_context(dev_priv) &&
477 vmw_res_type(ctx) == vmw_res_dx_context) {
478 struct vmw_buffer_object *dx_query_mob;
480 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
482 ret = vmw_validation_add_bo(sw_context->ctx,
483 dx_query_mob, true, false);
486 mutex_unlock(&dev_priv->binding_mutex);
491 * vmw_resource_relocation_add - Add a relocation to the relocation list
493 * @sw_context: Pointer to the software context.
494 * @res: The resource.
495 * @offset: Offset into the command buffer currently being parsed where the id
496 * that needs fixup is located. Granularity is one byte.
497 * @rel_type: Relocation type.
499 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
500 const struct vmw_resource *res,
501 unsigned long offset,
502 enum vmw_resource_relocation_type
505 struct vmw_resource_relocation *rel;
507 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
508 if (unlikely(!rel)) {
509 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
514 rel->offset = offset;
515 rel->rel_type = rel_type;
516 list_add_tail(&rel->head, &sw_context->res_relocations);
522 * vmw_resource_relocations_free - Free all relocations on a list
524 * @list: Pointer to the head of the relocation list
526 static void vmw_resource_relocations_free(struct list_head *list)
528 /* Memory is validation context memory, so no need to free it */
529 INIT_LIST_HEAD(list);
533 * vmw_resource_relocations_apply - Apply all relocations on a list
535 * @cb: Pointer to the start of the command buffer bein patch. This need not be
536 * the same buffer as the one being parsed when the relocation list was built,
537 * but the contents must be the same modulo the resource ids.
538 * @list: Pointer to the head of the relocation list.
540 static void vmw_resource_relocations_apply(uint32_t *cb,
541 struct list_head *list)
543 struct vmw_resource_relocation *rel;
545 /* Validate the struct vmw_resource_relocation member size */
546 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
547 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
549 list_for_each_entry(rel, list, head) {
550 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
551 switch (rel->rel_type) {
552 case vmw_res_rel_normal:
553 *addr = rel->res->id;
555 case vmw_res_rel_nop:
556 *addr = SVGA_3D_CMD_NOP;
559 if (rel->res->id == -1)
560 *addr = SVGA_3D_CMD_NOP;
566 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
567 struct vmw_sw_context *sw_context,
568 SVGA3dCmdHeader *header)
573 static int vmw_cmd_ok(struct vmw_private *dev_priv,
574 struct vmw_sw_context *sw_context,
575 SVGA3dCmdHeader *header)
581 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
584 * @sw_context: Pointer to the software context.
586 * Note that since vmware's command submission currently is protected by the
587 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
588 * only a single thread at once will attempt this.
590 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
594 ret = vmw_validation_res_reserve(sw_context->ctx, true);
598 if (sw_context->dx_query_mob) {
599 struct vmw_buffer_object *expected_dx_query_mob;
601 expected_dx_query_mob =
602 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
603 if (expected_dx_query_mob &&
604 expected_dx_query_mob != sw_context->dx_query_mob) {
613 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
614 * resource validate list unless it's already there.
616 * @dev_priv: Pointer to a device private structure.
617 * @sw_context: Pointer to the software context.
618 * @res_type: Resource type.
619 * @dirty: Whether to change dirty status.
620 * @converter: User-space visisble type specific information.
621 * @id_loc: Pointer to the location in the command buffer currently being parsed
622 * from where the user-space resource id handle is located.
623 * @p_res: Pointer to pointer to resource validalidation node. Populated on
627 vmw_cmd_res_check(struct vmw_private *dev_priv,
628 struct vmw_sw_context *sw_context,
629 enum vmw_res_type res_type,
631 const struct vmw_user_resource_conv *converter,
633 struct vmw_resource **p_res)
635 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
636 struct vmw_resource *res;
638 bool needs_unref = false;
643 if (*id_loc == SVGA3D_INVALID_ID) {
644 if (res_type == vmw_res_context) {
645 VMW_DEBUG_USER("Illegal context invalid id.\n");
651 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
654 vmw_validation_res_set_dirty(sw_context->ctx,
655 rcache->private, dirty);
657 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
659 ret = vmw_validation_preload_res(sw_context->ctx, size);
663 ret = vmw_user_resource_lookup_handle
664 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
666 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
667 (unsigned int) *id_loc);
672 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
673 if (unlikely(ret != 0))
676 if (rcache->valid && rcache->res == res) {
677 rcache->valid_handle = true;
678 rcache->handle = *id_loc;
682 ret = vmw_resource_relocation_add(sw_context, res,
683 vmw_ptr_diff(sw_context->buf_start,
691 vmw_resource_unreference(&res);
697 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
699 * @ctx_res: context the query belongs to
701 * This function assumes binding_mutex is held.
703 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
705 struct vmw_private *dev_priv = ctx_res->dev_priv;
706 struct vmw_buffer_object *dx_query_mob;
707 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
709 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
711 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
714 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
718 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
719 cmd->header.size = sizeof(cmd->body);
720 cmd->body.cid = ctx_res->id;
721 cmd->body.mobid = dx_query_mob->base.resource->start;
722 vmw_cmd_commit(dev_priv, sizeof(*cmd));
724 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
730 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
733 * @sw_context: Pointer to the software context.
735 * Rebind context binding points that have been scrubbed because of eviction.
737 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
739 struct vmw_ctx_validation_info *val;
742 list_for_each_entry(val, &sw_context->ctx_list, head) {
743 ret = vmw_binding_rebind_all(val->cur);
744 if (unlikely(ret != 0)) {
745 if (ret != -ERESTARTSYS)
746 VMW_DEBUG_USER("Failed to rebind context.\n");
750 ret = vmw_rebind_all_dx_query(val->ctx);
752 VMW_DEBUG_USER("Failed to rebind queries.\n");
761 * vmw_view_bindings_add - Add an array of view bindings to a context binding
764 * @sw_context: The execbuf state used for this command.
765 * @view_type: View type for the bindings.
766 * @binding_type: Binding type for the bindings.
767 * @shader_slot: The shader slot to user for the bindings.
768 * @view_ids: Array of view ids to be bound.
769 * @num_views: Number of view ids in @view_ids.
770 * @first_slot: The binding slot to be used for the first view id in @view_ids.
772 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
773 enum vmw_view_type view_type,
774 enum vmw_ctx_binding_type binding_type,
776 uint32 view_ids[], u32 num_views,
779 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
785 for (i = 0; i < num_views; ++i) {
786 struct vmw_ctx_bindinfo_view binding;
787 struct vmw_resource *view = NULL;
789 if (view_ids[i] != SVGA3D_INVALID_ID) {
790 view = vmw_view_id_val_add(sw_context, view_type,
793 VMW_DEBUG_USER("View not found.\n");
794 return PTR_ERR(view);
797 binding.bi.ctx = ctx_node->ctx;
798 binding.bi.res = view;
799 binding.bi.bt = binding_type;
800 binding.shader_slot = shader_slot;
801 binding.slot = first_slot + i;
802 vmw_binding_add(ctx_node->staged, &binding.bi,
803 shader_slot, binding.slot);
810 * vmw_cmd_cid_check - Check a command header for valid context information.
812 * @dev_priv: Pointer to a device private structure.
813 * @sw_context: Pointer to the software context.
814 * @header: A command header with an embedded user-space context handle.
816 * Convenience function: Call vmw_cmd_res_check with the user-space context
817 * handle embedded in @header.
819 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
820 struct vmw_sw_context *sw_context,
821 SVGA3dCmdHeader *header)
823 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
824 container_of(header, typeof(*cmd), header);
826 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
827 VMW_RES_DIRTY_SET, user_context_converter,
832 * vmw_execbuf_info_from_res - Get the private validation metadata for a
833 * recently validated resource
835 * @sw_context: Pointer to the command submission context
838 * The resource pointed to by @res needs to be present in the command submission
839 * context's resource cache and hence the last resource of that type to be
840 * processed by the validation code.
842 * Return: a pointer to the private metadata of the resource, or NULL if it
845 static struct vmw_ctx_validation_info *
846 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
847 struct vmw_resource *res)
849 struct vmw_res_cache_entry *rcache =
850 &sw_context->res_cache[vmw_res_type(res)];
852 if (rcache->valid && rcache->res == res)
853 return rcache->private;
859 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
860 struct vmw_sw_context *sw_context,
861 SVGA3dCmdHeader *header)
863 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
864 struct vmw_resource *ctx;
865 struct vmw_resource *res;
868 cmd = container_of(header, typeof(*cmd), header);
870 if (cmd->body.type >= SVGA3D_RT_MAX) {
871 VMW_DEBUG_USER("Illegal render target type %u.\n",
872 (unsigned int) cmd->body.type);
876 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
877 VMW_RES_DIRTY_SET, user_context_converter,
878 &cmd->body.cid, &ctx);
879 if (unlikely(ret != 0))
882 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
883 VMW_RES_DIRTY_SET, user_surface_converter,
884 &cmd->body.target.sid, &res);
888 if (dev_priv->has_mob) {
889 struct vmw_ctx_bindinfo_view binding;
890 struct vmw_ctx_validation_info *node;
892 node = vmw_execbuf_info_from_res(sw_context, ctx);
896 binding.bi.ctx = ctx;
897 binding.bi.res = res;
898 binding.bi.bt = vmw_ctx_binding_rt;
899 binding.slot = cmd->body.type;
900 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
906 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
907 struct vmw_sw_context *sw_context,
908 SVGA3dCmdHeader *header)
910 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
913 cmd = container_of(header, typeof(*cmd), header);
915 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
916 VMW_RES_DIRTY_NONE, user_surface_converter,
917 &cmd->body.src.sid, NULL);
921 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
922 VMW_RES_DIRTY_SET, user_surface_converter,
923 &cmd->body.dest.sid, NULL);
926 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
927 struct vmw_sw_context *sw_context,
928 SVGA3dCmdHeader *header)
930 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
933 cmd = container_of(header, typeof(*cmd), header);
934 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
935 VMW_RES_DIRTY_NONE, user_surface_converter,
936 &cmd->body.src, NULL);
940 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
941 VMW_RES_DIRTY_SET, user_surface_converter,
942 &cmd->body.dest, NULL);
945 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
946 struct vmw_sw_context *sw_context,
947 SVGA3dCmdHeader *header)
949 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
952 cmd = container_of(header, typeof(*cmd), header);
953 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
954 VMW_RES_DIRTY_NONE, user_surface_converter,
955 &cmd->body.srcSid, NULL);
959 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
960 VMW_RES_DIRTY_SET, user_surface_converter,
961 &cmd->body.dstSid, NULL);
964 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
965 struct vmw_sw_context *sw_context,
966 SVGA3dCmdHeader *header)
968 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
971 cmd = container_of(header, typeof(*cmd), header);
972 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
973 VMW_RES_DIRTY_NONE, user_surface_converter,
974 &cmd->body.src.sid, NULL);
975 if (unlikely(ret != 0))
978 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
979 VMW_RES_DIRTY_SET, user_surface_converter,
980 &cmd->body.dest.sid, NULL);
983 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
984 struct vmw_sw_context *sw_context,
985 SVGA3dCmdHeader *header)
987 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
988 container_of(header, typeof(*cmd), header);
990 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
991 VMW_RES_DIRTY_NONE, user_surface_converter,
992 &cmd->body.srcImage.sid, NULL);
995 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
996 struct vmw_sw_context *sw_context,
997 SVGA3dCmdHeader *header)
999 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1000 container_of(header, typeof(*cmd), header);
1002 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1003 VMW_RES_DIRTY_NONE, user_surface_converter,
1004 &cmd->body.sid, NULL);
1008 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1010 * @dev_priv: The device private structure.
1011 * @new_query_bo: The new buffer holding query results.
1012 * @sw_context: The software context used for this command submission.
1014 * This function checks whether @new_query_bo is suitable for holding query
1015 * results, and if another buffer currently is pinned for query results. If so,
1016 * the function prepares the state of @sw_context for switching pinned buffers
1017 * after successful submission of the current command batch.
1019 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1020 struct vmw_buffer_object *new_query_bo,
1021 struct vmw_sw_context *sw_context)
1023 struct vmw_res_cache_entry *ctx_entry =
1024 &sw_context->res_cache[vmw_res_context];
1027 BUG_ON(!ctx_entry->valid);
1028 sw_context->last_query_ctx = ctx_entry->res;
1030 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1032 if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
1033 VMW_DEBUG_USER("Query buffer too large.\n");
1037 if (unlikely(sw_context->cur_query_bo != NULL)) {
1038 sw_context->needs_post_query_barrier = true;
1039 ret = vmw_validation_add_bo(sw_context->ctx,
1040 sw_context->cur_query_bo,
1041 dev_priv->has_mob, false);
1042 if (unlikely(ret != 0))
1045 sw_context->cur_query_bo = new_query_bo;
1047 ret = vmw_validation_add_bo(sw_context->ctx,
1048 dev_priv->dummy_query_bo,
1049 dev_priv->has_mob, false);
1050 if (unlikely(ret != 0))
1058 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1060 * @dev_priv: The device private structure.
1061 * @sw_context: The software context used for this command submission batch.
1063 * This function will check if we're switching query buffers, and will then,
1064 * issue a dummy occlusion query wait used as a query barrier. When the fence
1065 * object following that query wait has signaled, we are sure that all preceding
1066 * queries have finished, and the old query buffer can be unpinned. However,
1067 * since both the new query buffer and the old one are fenced with that fence,
1068 * we can do an asynchronus unpin now, and be sure that the old query buffer
1069 * won't be moved until the fence has signaled.
1071 * As mentioned above, both the new - and old query buffers need to be fenced
1072 * using a sequence emitted *after* calling this function.
1074 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1075 struct vmw_sw_context *sw_context)
1078 * The validate list should still hold references to all
1081 if (sw_context->needs_post_query_barrier) {
1082 struct vmw_res_cache_entry *ctx_entry =
1083 &sw_context->res_cache[vmw_res_context];
1084 struct vmw_resource *ctx;
1087 BUG_ON(!ctx_entry->valid);
1088 ctx = ctx_entry->res;
1090 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1092 if (unlikely(ret != 0))
1093 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1096 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1097 if (dev_priv->pinned_bo) {
1098 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1099 vmw_bo_unreference(&dev_priv->pinned_bo);
1102 if (!sw_context->needs_post_query_barrier) {
1103 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1106 * We pin also the dummy_query_bo buffer so that we
1107 * don't need to validate it when emitting dummy queries
1108 * in context destroy paths.
1110 if (!dev_priv->dummy_query_bo_pinned) {
1111 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1113 dev_priv->dummy_query_bo_pinned = true;
1116 BUG_ON(sw_context->last_query_ctx == NULL);
1117 dev_priv->query_cid = sw_context->last_query_ctx->id;
1118 dev_priv->query_cid_valid = true;
1119 dev_priv->pinned_bo =
1120 vmw_bo_reference(sw_context->cur_query_bo);
1126 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1129 * @dev_priv: Pointer to a device private structure.
1130 * @sw_context: The software context used for this command batch validation.
1131 * @id: Pointer to the user-space handle to be translated.
1132 * @vmw_bo_p: Points to a location that, on successful return will carry a
1133 * non-reference-counted pointer to the buffer object identified by the
1134 * user-space handle in @id.
1136 * This function saves information needed to translate a user-space buffer
1137 * handle to a MOB id. The translation does not take place immediately, but
1138 * during a call to vmw_apply_relocations().
1140 * This function builds a relocation list and a list of buffers to validate. The
1141 * former needs to be freed using either vmw_apply_relocations() or
1142 * vmw_free_relocations(). The latter needs to be freed using
1143 * vmw_clear_validations.
1145 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1146 struct vmw_sw_context *sw_context,
1148 struct vmw_buffer_object **vmw_bo_p)
1150 struct vmw_buffer_object *vmw_bo;
1151 uint32_t handle = *id;
1152 struct vmw_relocation *reloc;
1155 vmw_validation_preload_bo(sw_context->ctx);
1156 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1158 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1159 return PTR_ERR(vmw_bo);
1161 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1162 ttm_bo_put(&vmw_bo->base);
1163 drm_gem_object_put(&vmw_bo->base.base);
1164 if (unlikely(ret != 0))
1167 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1171 reloc->mob_loc = id;
1172 reloc->vbo = vmw_bo;
1175 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1181 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1182 * to a valid SVGAGuestPtr
1184 * @dev_priv: Pointer to a device private structure.
1185 * @sw_context: The software context used for this command batch validation.
1186 * @ptr: Pointer to the user-space handle to be translated.
1187 * @vmw_bo_p: Points to a location that, on successful return will carry a
1188 * non-reference-counted pointer to the DMA buffer identified by the user-space
1191 * This function saves information needed to translate a user-space buffer
1192 * handle to a valid SVGAGuestPtr. The translation does not take place
1193 * immediately, but during a call to vmw_apply_relocations().
1195 * This function builds a relocation list and a list of buffers to validate.
1196 * The former needs to be freed using either vmw_apply_relocations() or
1197 * vmw_free_relocations(). The latter needs to be freed using
1198 * vmw_clear_validations.
1200 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1201 struct vmw_sw_context *sw_context,
1203 struct vmw_buffer_object **vmw_bo_p)
1205 struct vmw_buffer_object *vmw_bo;
1206 uint32_t handle = ptr->gmrId;
1207 struct vmw_relocation *reloc;
1210 vmw_validation_preload_bo(sw_context->ctx);
1211 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1213 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1214 return PTR_ERR(vmw_bo);
1216 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1217 ttm_bo_put(&vmw_bo->base);
1218 drm_gem_object_put(&vmw_bo->base.base);
1219 if (unlikely(ret != 0))
1222 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1226 reloc->location = ptr;
1227 reloc->vbo = vmw_bo;
1229 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1235 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1237 * @dev_priv: Pointer to a device private struct.
1238 * @sw_context: The software context used for this command submission.
1239 * @header: Pointer to the command header in the command stream.
1241 * This function adds the new query into the query COTABLE
1243 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1244 struct vmw_sw_context *sw_context,
1245 SVGA3dCmdHeader *header)
1247 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1248 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1249 struct vmw_resource *cotable_res;
1255 cmd = container_of(header, typeof(*cmd), header);
1257 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1258 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1261 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1262 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1268 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1270 * @dev_priv: Pointer to a device private struct.
1271 * @sw_context: The software context used for this command submission.
1272 * @header: Pointer to the command header in the command stream.
1274 * The query bind operation will eventually associate the query ID with its
1275 * backing MOB. In this function, we take the user mode MOB ID and use
1276 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1278 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1279 struct vmw_sw_context *sw_context,
1280 SVGA3dCmdHeader *header)
1282 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1283 struct vmw_buffer_object *vmw_bo;
1286 cmd = container_of(header, typeof(*cmd), header);
1289 * Look up the buffer pointed to by q.mobid, put it on the relocation
1290 * list so its kernel mode MOB ID can be filled in later
1292 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1298 sw_context->dx_query_mob = vmw_bo;
1299 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1304 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1306 * @dev_priv: Pointer to a device private struct.
1307 * @sw_context: The software context used for this command submission.
1308 * @header: Pointer to the command header in the command stream.
1310 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1311 struct vmw_sw_context *sw_context,
1312 SVGA3dCmdHeader *header)
1314 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1315 container_of(header, typeof(*cmd), header);
1317 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1318 VMW_RES_DIRTY_SET, user_context_converter,
1319 &cmd->body.cid, NULL);
1323 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1325 * @dev_priv: Pointer to a device private struct.
1326 * @sw_context: The software context used for this command submission.
1327 * @header: Pointer to the command header in the command stream.
1329 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1330 struct vmw_sw_context *sw_context,
1331 SVGA3dCmdHeader *header)
1333 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1334 container_of(header, typeof(*cmd), header);
1336 if (unlikely(dev_priv->has_mob)) {
1337 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1339 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1341 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1342 gb_cmd.header.size = cmd->header.size;
1343 gb_cmd.body.cid = cmd->body.cid;
1344 gb_cmd.body.type = cmd->body.type;
1346 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1347 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1350 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1351 VMW_RES_DIRTY_SET, user_context_converter,
1352 &cmd->body.cid, NULL);
1356 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1358 * @dev_priv: Pointer to a device private struct.
1359 * @sw_context: The software context used for this command submission.
1360 * @header: Pointer to the command header in the command stream.
1362 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1363 struct vmw_sw_context *sw_context,
1364 SVGA3dCmdHeader *header)
1366 struct vmw_buffer_object *vmw_bo;
1367 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1370 cmd = container_of(header, typeof(*cmd), header);
1371 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1372 if (unlikely(ret != 0))
1375 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1377 if (unlikely(ret != 0))
1380 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1386 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1388 * @dev_priv: Pointer to a device private struct.
1389 * @sw_context: The software context used for this command submission.
1390 * @header: Pointer to the command header in the command stream.
1392 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1393 struct vmw_sw_context *sw_context,
1394 SVGA3dCmdHeader *header)
1396 struct vmw_buffer_object *vmw_bo;
1397 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1400 cmd = container_of(header, typeof(*cmd), header);
1401 if (dev_priv->has_mob) {
1402 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1404 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1406 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1407 gb_cmd.header.size = cmd->header.size;
1408 gb_cmd.body.cid = cmd->body.cid;
1409 gb_cmd.body.type = cmd->body.type;
1410 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1411 gb_cmd.body.offset = cmd->body.guestResult.offset;
1413 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1414 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1417 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1418 if (unlikely(ret != 0))
1421 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1422 &cmd->body.guestResult, &vmw_bo);
1423 if (unlikely(ret != 0))
1426 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1432 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1434 * @dev_priv: Pointer to a device private struct.
1435 * @sw_context: The software context used for this command submission.
1436 * @header: Pointer to the command header in the command stream.
1438 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1439 struct vmw_sw_context *sw_context,
1440 SVGA3dCmdHeader *header)
1442 struct vmw_buffer_object *vmw_bo;
1443 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1446 cmd = container_of(header, typeof(*cmd), header);
1447 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1448 if (unlikely(ret != 0))
1451 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1453 if (unlikely(ret != 0))
1460 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1462 * @dev_priv: Pointer to a device private struct.
1463 * @sw_context: The software context used for this command submission.
1464 * @header: Pointer to the command header in the command stream.
1466 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1467 struct vmw_sw_context *sw_context,
1468 SVGA3dCmdHeader *header)
1470 struct vmw_buffer_object *vmw_bo;
1471 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1474 cmd = container_of(header, typeof(*cmd), header);
1475 if (dev_priv->has_mob) {
1476 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1478 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1480 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1481 gb_cmd.header.size = cmd->header.size;
1482 gb_cmd.body.cid = cmd->body.cid;
1483 gb_cmd.body.type = cmd->body.type;
1484 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1485 gb_cmd.body.offset = cmd->body.guestResult.offset;
1487 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1488 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1491 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1492 if (unlikely(ret != 0))
1495 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1496 &cmd->body.guestResult, &vmw_bo);
1497 if (unlikely(ret != 0))
1503 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1504 struct vmw_sw_context *sw_context,
1505 SVGA3dCmdHeader *header)
1507 struct vmw_buffer_object *vmw_bo = NULL;
1508 struct vmw_surface *srf = NULL;
1509 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1511 SVGA3dCmdSurfaceDMASuffix *suffix;
1515 cmd = container_of(header, typeof(*cmd), header);
1516 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1517 header->size - sizeof(*suffix));
1519 /* Make sure device and verifier stays in sync. */
1520 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1521 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1525 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1526 &cmd->body.guest.ptr, &vmw_bo);
1527 if (unlikely(ret != 0))
1530 /* Make sure DMA doesn't cross BO boundaries. */
1531 bo_size = vmw_bo->base.base.size;
1532 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1533 VMW_DEBUG_USER("Invalid DMA offset.\n");
1537 bo_size -= cmd->body.guest.ptr.offset;
1538 if (unlikely(suffix->maximumOffset > bo_size))
1539 suffix->maximumOffset = bo_size;
1541 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1542 VMW_RES_DIRTY_SET : 0;
1543 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1544 dirty, user_surface_converter,
1545 &cmd->body.host.sid, NULL);
1546 if (unlikely(ret != 0)) {
1547 if (unlikely(ret != -ERESTARTSYS))
1548 VMW_DEBUG_USER("could not find surface for DMA.\n");
1552 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1554 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1559 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1560 struct vmw_sw_context *sw_context,
1561 SVGA3dCmdHeader *header)
1563 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1564 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1565 (unsigned long)header + sizeof(*cmd));
1566 SVGA3dPrimitiveRange *range;
1571 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1572 if (unlikely(ret != 0))
1575 cmd = container_of(header, typeof(*cmd), header);
1576 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1578 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1579 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1583 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1584 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1586 user_surface_converter,
1587 &decl->array.surfaceId, NULL);
1588 if (unlikely(ret != 0))
1592 maxnum = (header->size - sizeof(cmd->body) -
1593 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1594 if (unlikely(cmd->body.numRanges > maxnum)) {
1595 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1599 range = (SVGA3dPrimitiveRange *) decl;
1600 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1601 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1603 user_surface_converter,
1604 &range->indexArray.surfaceId, NULL);
1605 if (unlikely(ret != 0))
1611 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1612 struct vmw_sw_context *sw_context,
1613 SVGA3dCmdHeader *header)
1615 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1616 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1617 ((unsigned long) header + header->size + sizeof(header));
1618 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1619 ((unsigned long) header + sizeof(*cmd));
1620 struct vmw_resource *ctx;
1621 struct vmw_resource *res;
1624 cmd = container_of(header, typeof(*cmd), header);
1626 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1627 VMW_RES_DIRTY_SET, user_context_converter,
1628 &cmd->body.cid, &ctx);
1629 if (unlikely(ret != 0))
1632 for (; cur_state < last_state; ++cur_state) {
1633 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1636 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1637 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1638 (unsigned int) cur_state->stage);
1642 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1644 user_surface_converter,
1645 &cur_state->value, &res);
1646 if (unlikely(ret != 0))
1649 if (dev_priv->has_mob) {
1650 struct vmw_ctx_bindinfo_tex binding;
1651 struct vmw_ctx_validation_info *node;
1653 node = vmw_execbuf_info_from_res(sw_context, ctx);
1657 binding.bi.ctx = ctx;
1658 binding.bi.res = res;
1659 binding.bi.bt = vmw_ctx_binding_tex;
1660 binding.texture_stage = cur_state->stage;
1661 vmw_binding_add(node->staged, &binding.bi, 0,
1662 binding.texture_stage);
1669 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1670 struct vmw_sw_context *sw_context,
1673 struct vmw_buffer_object *vmw_bo;
1677 SVGAFifoCmdDefineGMRFB body;
1680 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1685 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1688 * @dev_priv: Pointer to a device private struct.
1689 * @sw_context: The software context being used for this batch.
1690 * @res: Pointer to the resource.
1691 * @buf_id: Pointer to the user-space backup buffer handle in the command
1693 * @backup_offset: Offset of backup into MOB.
1695 * This function prepares for registering a switch of backup buffers in the
1696 * resource metadata just prior to unreserving. It's basically a wrapper around
1697 * vmw_cmd_res_switch_backup with a different interface.
1699 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1700 struct vmw_sw_context *sw_context,
1701 struct vmw_resource *res, uint32_t *buf_id,
1702 unsigned long backup_offset)
1704 struct vmw_buffer_object *vbo;
1708 info = vmw_execbuf_info_from_res(sw_context, res);
1712 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1716 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1722 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1724 * @dev_priv: Pointer to a device private struct.
1725 * @sw_context: The software context being used for this batch.
1726 * @res_type: The resource type.
1727 * @converter: Information about user-space binding for this resource type.
1728 * @res_id: Pointer to the user-space resource handle in the command stream.
1729 * @buf_id: Pointer to the user-space backup buffer handle in the command
1731 * @backup_offset: Offset of backup into MOB.
1733 * This function prepares for registering a switch of backup buffers in the
1734 * resource metadata just prior to unreserving. It's basically a wrapper around
1735 * vmw_cmd_res_switch_backup with a different interface.
1737 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1738 struct vmw_sw_context *sw_context,
1739 enum vmw_res_type res_type,
1740 const struct vmw_user_resource_conv
1741 *converter, uint32_t *res_id, uint32_t *buf_id,
1742 unsigned long backup_offset)
1744 struct vmw_resource *res;
1747 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1748 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1752 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1757 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1759 * @dev_priv: Pointer to a device private struct.
1760 * @sw_context: The software context being used for this batch.
1761 * @header: Pointer to the command header in the command stream.
1763 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1764 struct vmw_sw_context *sw_context,
1765 SVGA3dCmdHeader *header)
1767 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1768 container_of(header, typeof(*cmd), header);
1770 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1771 user_surface_converter, &cmd->body.sid,
1772 &cmd->body.mobid, 0);
1776 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1778 * @dev_priv: Pointer to a device private struct.
1779 * @sw_context: The software context being used for this batch.
1780 * @header: Pointer to the command header in the command stream.
1782 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1783 struct vmw_sw_context *sw_context,
1784 SVGA3dCmdHeader *header)
1786 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1787 container_of(header, typeof(*cmd), header);
1789 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1790 VMW_RES_DIRTY_NONE, user_surface_converter,
1791 &cmd->body.image.sid, NULL);
1795 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1797 * @dev_priv: Pointer to a device private struct.
1798 * @sw_context: The software context being used for this batch.
1799 * @header: Pointer to the command header in the command stream.
1801 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1802 struct vmw_sw_context *sw_context,
1803 SVGA3dCmdHeader *header)
1805 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1806 container_of(header, typeof(*cmd), header);
1808 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1809 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1810 &cmd->body.sid, NULL);
1814 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1816 * @dev_priv: Pointer to a device private struct.
1817 * @sw_context: The software context being used for this batch.
1818 * @header: Pointer to the command header in the command stream.
1820 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1821 struct vmw_sw_context *sw_context,
1822 SVGA3dCmdHeader *header)
1824 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1825 container_of(header, typeof(*cmd), header);
1827 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1828 VMW_RES_DIRTY_NONE, user_surface_converter,
1829 &cmd->body.image.sid, NULL);
1833 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1836 * @dev_priv: Pointer to a device private struct.
1837 * @sw_context: The software context being used for this batch.
1838 * @header: Pointer to the command header in the command stream.
1840 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1841 struct vmw_sw_context *sw_context,
1842 SVGA3dCmdHeader *header)
1844 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1845 container_of(header, typeof(*cmd), header);
1847 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1848 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1849 &cmd->body.sid, NULL);
1853 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1856 * @dev_priv: Pointer to a device private struct.
1857 * @sw_context: The software context being used for this batch.
1858 * @header: Pointer to the command header in the command stream.
1860 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1861 struct vmw_sw_context *sw_context,
1862 SVGA3dCmdHeader *header)
1864 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1865 container_of(header, typeof(*cmd), header);
1867 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1868 VMW_RES_DIRTY_NONE, user_surface_converter,
1869 &cmd->body.image.sid, NULL);
1873 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1876 * @dev_priv: Pointer to a device private struct.
1877 * @sw_context: The software context being used for this batch.
1878 * @header: Pointer to the command header in the command stream.
1880 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1881 struct vmw_sw_context *sw_context,
1882 SVGA3dCmdHeader *header)
1884 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1885 container_of(header, typeof(*cmd), header);
1887 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1888 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1889 &cmd->body.sid, NULL);
1893 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1895 * @dev_priv: Pointer to a device private struct.
1896 * @sw_context: The software context being used for this batch.
1897 * @header: Pointer to the command header in the command stream.
1899 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1900 struct vmw_sw_context *sw_context,
1901 SVGA3dCmdHeader *header)
1903 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1906 struct vmw_resource *ctx;
1908 cmd = container_of(header, typeof(*cmd), header);
1910 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1911 VMW_RES_DIRTY_SET, user_context_converter,
1912 &cmd->body.cid, &ctx);
1913 if (unlikely(ret != 0))
1916 if (unlikely(!dev_priv->has_mob))
1919 size = cmd->header.size - sizeof(cmd->body);
1920 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1921 cmd->body.shid, cmd + 1, cmd->body.type,
1922 size, &sw_context->staged_cmd_res);
1923 if (unlikely(ret != 0))
1926 return vmw_resource_relocation_add(sw_context, NULL,
1927 vmw_ptr_diff(sw_context->buf_start,
1933 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1935 * @dev_priv: Pointer to a device private struct.
1936 * @sw_context: The software context being used for this batch.
1937 * @header: Pointer to the command header in the command stream.
1939 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1940 struct vmw_sw_context *sw_context,
1941 SVGA3dCmdHeader *header)
1943 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1945 struct vmw_resource *ctx;
1947 cmd = container_of(header, typeof(*cmd), header);
1949 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1950 VMW_RES_DIRTY_SET, user_context_converter,
1951 &cmd->body.cid, &ctx);
1952 if (unlikely(ret != 0))
1955 if (unlikely(!dev_priv->has_mob))
1958 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1959 cmd->body.type, &sw_context->staged_cmd_res);
1960 if (unlikely(ret != 0))
1963 return vmw_resource_relocation_add(sw_context, NULL,
1964 vmw_ptr_diff(sw_context->buf_start,
1970 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1972 * @dev_priv: Pointer to a device private struct.
1973 * @sw_context: The software context being used for this batch.
1974 * @header: Pointer to the command header in the command stream.
1976 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1977 struct vmw_sw_context *sw_context,
1978 SVGA3dCmdHeader *header)
1980 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1981 struct vmw_ctx_bindinfo_shader binding;
1982 struct vmw_resource *ctx, *res = NULL;
1983 struct vmw_ctx_validation_info *ctx_info;
1986 cmd = container_of(header, typeof(*cmd), header);
1988 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1989 VMW_DEBUG_USER("Illegal shader type %u.\n",
1990 (unsigned int) cmd->body.type);
1994 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1995 VMW_RES_DIRTY_SET, user_context_converter,
1996 &cmd->body.cid, &ctx);
1997 if (unlikely(ret != 0))
2000 if (!dev_priv->has_mob)
2003 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2005 * This is the compat shader path - Per device guest-backed
2006 * shaders, but user-space thinks it's per context host-
2009 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2010 cmd->body.shid, cmd->body.type);
2012 ret = vmw_execbuf_res_val_add(sw_context, res,
2014 vmw_val_add_flag_noctx);
2015 if (unlikely(ret != 0))
2018 ret = vmw_resource_relocation_add
2020 vmw_ptr_diff(sw_context->buf_start,
2022 vmw_res_rel_normal);
2023 if (unlikely(ret != 0))
2028 if (IS_ERR_OR_NULL(res)) {
2029 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2031 user_shader_converter, &cmd->body.shid,
2033 if (unlikely(ret != 0))
2037 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2041 binding.bi.ctx = ctx;
2042 binding.bi.res = res;
2043 binding.bi.bt = vmw_ctx_binding_shader;
2044 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2045 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2051 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2053 * @dev_priv: Pointer to a device private struct.
2054 * @sw_context: The software context being used for this batch.
2055 * @header: Pointer to the command header in the command stream.
2057 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2058 struct vmw_sw_context *sw_context,
2059 SVGA3dCmdHeader *header)
2061 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2064 cmd = container_of(header, typeof(*cmd), header);
2066 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2067 VMW_RES_DIRTY_SET, user_context_converter,
2068 &cmd->body.cid, NULL);
2069 if (unlikely(ret != 0))
2072 if (dev_priv->has_mob)
2073 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2079 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2081 * @dev_priv: Pointer to a device private struct.
2082 * @sw_context: The software context being used for this batch.
2083 * @header: Pointer to the command header in the command stream.
2085 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2086 struct vmw_sw_context *sw_context,
2087 SVGA3dCmdHeader *header)
2089 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2090 container_of(header, typeof(*cmd), header);
2092 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2093 user_shader_converter, &cmd->body.shid,
2094 &cmd->body.mobid, cmd->body.offsetInBytes);
2098 * vmw_cmd_dx_set_single_constant_buffer - Validate
2099 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2101 * @dev_priv: Pointer to a device private struct.
2102 * @sw_context: The software context being used for this batch.
2103 * @header: Pointer to the command header in the command stream.
2106 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2107 struct vmw_sw_context *sw_context,
2108 SVGA3dCmdHeader *header)
2110 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2111 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2112 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2114 struct vmw_resource *res = NULL;
2115 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2116 struct vmw_ctx_bindinfo_cb binding;
2122 cmd = container_of(header, typeof(*cmd), header);
2123 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2124 VMW_RES_DIRTY_NONE, user_surface_converter,
2125 &cmd->body.sid, &res);
2126 if (unlikely(ret != 0))
2129 binding.bi.ctx = ctx_node->ctx;
2130 binding.bi.res = res;
2131 binding.bi.bt = vmw_ctx_binding_cb;
2132 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2133 binding.offset = cmd->body.offsetInBytes;
2134 binding.size = cmd->body.sizeInBytes;
2135 binding.slot = cmd->body.slot;
2137 if (binding.shader_slot >= max_shader_num ||
2138 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2139 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2140 (unsigned int) cmd->body.type,
2141 (unsigned int) binding.slot);
2145 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2152 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2153 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2155 * @dev_priv: Pointer to a device private struct.
2156 * @sw_context: The software context being used for this batch.
2157 * @header: Pointer to the command header in the command stream.
2160 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2161 struct vmw_sw_context *sw_context,
2162 SVGA3dCmdHeader *header)
2164 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2166 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2169 if (!has_sm5_context(dev_priv))
2175 cmd = container_of(header, typeof(*cmd), header);
2176 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2177 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2178 (unsigned int) cmd->body.slot);
2182 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2183 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2184 cmd->body.slot, cmd->body.offsetInBytes);
2190 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2193 * @dev_priv: Pointer to a device private struct.
2194 * @sw_context: The software context being used for this batch.
2195 * @header: Pointer to the command header in the command stream.
2197 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2198 struct vmw_sw_context *sw_context,
2199 SVGA3dCmdHeader *header)
2201 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2202 container_of(header, typeof(*cmd), header);
2203 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2204 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2206 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2207 sizeof(SVGA3dShaderResourceViewId);
2209 if ((u64) cmd->body.startView + (u64) num_sr_view >
2210 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2211 cmd->body.type >= max_allowed) {
2212 VMW_DEBUG_USER("Invalid shader binding.\n");
2216 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2218 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2219 (void *) &cmd[1], num_sr_view,
2220 cmd->body.startView);
2224 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2226 * @dev_priv: Pointer to a device private struct.
2227 * @sw_context: The software context being used for this batch.
2228 * @header: Pointer to the command header in the command stream.
2230 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2231 struct vmw_sw_context *sw_context,
2232 SVGA3dCmdHeader *header)
2234 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2235 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2236 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2237 struct vmw_resource *res = NULL;
2238 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2239 struct vmw_ctx_bindinfo_shader binding;
2245 cmd = container_of(header, typeof(*cmd), header);
2247 if (cmd->body.type >= max_allowed ||
2248 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2249 VMW_DEBUG_USER("Illegal shader type %u.\n",
2250 (unsigned int) cmd->body.type);
2254 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2255 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2257 VMW_DEBUG_USER("Could not find shader for binding.\n");
2258 return PTR_ERR(res);
2261 ret = vmw_execbuf_res_val_add(sw_context, res,
2263 vmw_val_add_flag_noctx);
2268 binding.bi.ctx = ctx_node->ctx;
2269 binding.bi.res = res;
2270 binding.bi.bt = vmw_ctx_binding_dx_shader;
2271 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2273 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2279 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2282 * @dev_priv: Pointer to a device private struct.
2283 * @sw_context: The software context being used for this batch.
2284 * @header: Pointer to the command header in the command stream.
2286 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2287 struct vmw_sw_context *sw_context,
2288 SVGA3dCmdHeader *header)
2290 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2291 struct vmw_ctx_bindinfo_vb binding;
2292 struct vmw_resource *res;
2294 SVGA3dCmdHeader header;
2295 SVGA3dCmdDXSetVertexBuffers body;
2296 SVGA3dVertexBuffer buf[];
2303 cmd = container_of(header, typeof(*cmd), header);
2304 num = (cmd->header.size - sizeof(cmd->body)) /
2305 sizeof(SVGA3dVertexBuffer);
2306 if ((u64)num + (u64)cmd->body.startBuffer >
2307 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2308 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2312 for (i = 0; i < num; i++) {
2313 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2315 user_surface_converter,
2316 &cmd->buf[i].sid, &res);
2317 if (unlikely(ret != 0))
2320 binding.bi.ctx = ctx_node->ctx;
2321 binding.bi.bt = vmw_ctx_binding_vb;
2322 binding.bi.res = res;
2323 binding.offset = cmd->buf[i].offset;
2324 binding.stride = cmd->buf[i].stride;
2325 binding.slot = i + cmd->body.startBuffer;
2327 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2334 * vmw_cmd_dx_set_index_buffer - Validate
2335 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2337 * @dev_priv: Pointer to a device private struct.
2338 * @sw_context: The software context being used for this batch.
2339 * @header: Pointer to the command header in the command stream.
2341 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2342 struct vmw_sw_context *sw_context,
2343 SVGA3dCmdHeader *header)
2345 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2346 struct vmw_ctx_bindinfo_ib binding;
2347 struct vmw_resource *res;
2348 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2354 cmd = container_of(header, typeof(*cmd), header);
2355 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2356 VMW_RES_DIRTY_NONE, user_surface_converter,
2357 &cmd->body.sid, &res);
2358 if (unlikely(ret != 0))
2361 binding.bi.ctx = ctx_node->ctx;
2362 binding.bi.res = res;
2363 binding.bi.bt = vmw_ctx_binding_ib;
2364 binding.offset = cmd->body.offset;
2365 binding.format = cmd->body.format;
2367 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2373 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2376 * @dev_priv: Pointer to a device private struct.
2377 * @sw_context: The software context being used for this batch.
2378 * @header: Pointer to the command header in the command stream.
2380 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2381 struct vmw_sw_context *sw_context,
2382 SVGA3dCmdHeader *header)
2384 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2385 container_of(header, typeof(*cmd), header);
2386 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2387 sizeof(SVGA3dRenderTargetViewId);
2390 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2391 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2395 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2396 0, &cmd->body.depthStencilViewId, 1, 0);
2400 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2401 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2406 * vmw_cmd_dx_clear_rendertarget_view - Validate
2407 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2409 * @dev_priv: Pointer to a device private struct.
2410 * @sw_context: The software context being used for this batch.
2411 * @header: Pointer to the command header in the command stream.
2413 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2414 struct vmw_sw_context *sw_context,
2415 SVGA3dCmdHeader *header)
2417 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2418 container_of(header, typeof(*cmd), header);
2419 struct vmw_resource *ret;
2421 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2422 cmd->body.renderTargetViewId);
2424 return PTR_ERR_OR_ZERO(ret);
2428 * vmw_cmd_dx_clear_depthstencil_view - Validate
2429 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2431 * @dev_priv: Pointer to a device private struct.
2432 * @sw_context: The software context being used for this batch.
2433 * @header: Pointer to the command header in the command stream.
2435 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2436 struct vmw_sw_context *sw_context,
2437 SVGA3dCmdHeader *header)
2439 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2440 container_of(header, typeof(*cmd), header);
2441 struct vmw_resource *ret;
2443 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2444 cmd->body.depthStencilViewId);
2446 return PTR_ERR_OR_ZERO(ret);
2449 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2450 struct vmw_sw_context *sw_context,
2451 SVGA3dCmdHeader *header)
2453 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2454 struct vmw_resource *srf;
2455 struct vmw_resource *res;
2456 enum vmw_view_type view_type;
2459 * This is based on the fact that all affected define commands have the
2460 * same initial command body layout.
2463 SVGA3dCmdHeader header;
2471 view_type = vmw_view_cmd_to_type(header->id);
2472 if (view_type == vmw_view_max)
2475 cmd = container_of(header, typeof(*cmd), header);
2476 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2477 VMW_DEBUG_USER("Invalid surface id.\n");
2480 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2481 VMW_RES_DIRTY_NONE, user_surface_converter,
2483 if (unlikely(ret != 0))
2486 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2487 ret = vmw_cotable_notify(res, cmd->defined_id);
2488 if (unlikely(ret != 0))
2491 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2492 cmd->defined_id, header,
2493 header->size + sizeof(*header),
2494 &sw_context->staged_cmd_res);
2498 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2500 * @dev_priv: Pointer to a device private struct.
2501 * @sw_context: The software context being used for this batch.
2502 * @header: Pointer to the command header in the command stream.
2504 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2505 struct vmw_sw_context *sw_context,
2506 SVGA3dCmdHeader *header)
2508 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2509 struct vmw_ctx_bindinfo_so_target binding;
2510 struct vmw_resource *res;
2512 SVGA3dCmdHeader header;
2513 SVGA3dCmdDXSetSOTargets body;
2514 SVGA3dSoTarget targets[];
2521 cmd = container_of(header, typeof(*cmd), header);
2522 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2524 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2525 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2529 for (i = 0; i < num; i++) {
2530 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2532 user_surface_converter,
2533 &cmd->targets[i].sid, &res);
2534 if (unlikely(ret != 0))
2537 binding.bi.ctx = ctx_node->ctx;
2538 binding.bi.res = res;
2539 binding.bi.bt = vmw_ctx_binding_so_target;
2540 binding.offset = cmd->targets[i].offset;
2541 binding.size = cmd->targets[i].sizeInBytes;
2544 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2550 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2551 struct vmw_sw_context *sw_context,
2552 SVGA3dCmdHeader *header)
2554 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2555 struct vmw_resource *res;
2557 * This is based on the fact that all affected define commands have
2558 * the same initial command body layout.
2561 SVGA3dCmdHeader header;
2564 enum vmw_so_type so_type;
2570 so_type = vmw_so_cmd_to_type(header->id);
2571 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2573 return PTR_ERR(res);
2574 cmd = container_of(header, typeof(*cmd), header);
2575 ret = vmw_cotable_notify(res, cmd->defined_id);
2581 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2584 * @dev_priv: Pointer to a device private struct.
2585 * @sw_context: The software context being used for this batch.
2586 * @header: Pointer to the command header in the command stream.
2588 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2589 struct vmw_sw_context *sw_context,
2590 SVGA3dCmdHeader *header)
2593 SVGA3dCmdHeader header;
2595 SVGA3dCmdDXReadbackSubResource r_body;
2596 SVGA3dCmdDXInvalidateSubResource i_body;
2597 SVGA3dCmdDXUpdateSubResource u_body;
2598 SVGA3dSurfaceId sid;
2602 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2603 offsetof(typeof(*cmd), sid));
2604 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2605 offsetof(typeof(*cmd), sid));
2606 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2607 offsetof(typeof(*cmd), sid));
2609 cmd = container_of(header, typeof(*cmd), header);
2610 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2611 VMW_RES_DIRTY_NONE, user_surface_converter,
2615 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2616 struct vmw_sw_context *sw_context,
2617 SVGA3dCmdHeader *header)
2619 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2628 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2629 * resource for removal.
2631 * @dev_priv: Pointer to a device private struct.
2632 * @sw_context: The software context being used for this batch.
2633 * @header: Pointer to the command header in the command stream.
2635 * Check that the view exists, and if it was not created using this command
2636 * batch, conditionally make this command a NOP.
2638 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2639 struct vmw_sw_context *sw_context,
2640 SVGA3dCmdHeader *header)
2642 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2644 SVGA3dCmdHeader header;
2645 union vmw_view_destroy body;
2646 } *cmd = container_of(header, typeof(*cmd), header);
2647 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2648 struct vmw_resource *view;
2654 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2655 &sw_context->staged_cmd_res, &view);
2660 * If the view wasn't created during this command batch, it might
2661 * have been removed due to a context swapout, so add a
2662 * relocation to conditionally make this command a NOP to avoid
2665 return vmw_resource_relocation_add(sw_context, view,
2666 vmw_ptr_diff(sw_context->buf_start,
2668 vmw_res_rel_cond_nop);
2672 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2674 * @dev_priv: Pointer to a device private struct.
2675 * @sw_context: The software context being used for this batch.
2676 * @header: Pointer to the command header in the command stream.
2678 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2679 struct vmw_sw_context *sw_context,
2680 SVGA3dCmdHeader *header)
2682 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2683 struct vmw_resource *res;
2684 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2685 container_of(header, typeof(*cmd), header);
2691 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2692 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2696 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2697 cmd->body.shaderId, cmd->body.type,
2698 &sw_context->staged_cmd_res);
2702 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2704 * @dev_priv: Pointer to a device private struct.
2705 * @sw_context: The software context being used for this batch.
2706 * @header: Pointer to the command header in the command stream.
2708 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2709 struct vmw_sw_context *sw_context,
2710 SVGA3dCmdHeader *header)
2712 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2713 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2714 container_of(header, typeof(*cmd), header);
2720 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2721 &sw_context->staged_cmd_res);
2727 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2729 * @dev_priv: Pointer to a device private struct.
2730 * @sw_context: The software context being used for this batch.
2731 * @header: Pointer to the command header in the command stream.
2733 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2734 struct vmw_sw_context *sw_context,
2735 SVGA3dCmdHeader *header)
2737 struct vmw_resource *ctx;
2738 struct vmw_resource *res;
2739 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2740 container_of(header, typeof(*cmd), header);
2743 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2744 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2746 user_context_converter, &cmd->body.cid,
2751 struct vmw_ctx_validation_info *ctx_node =
2752 VMW_GET_CTX_NODE(sw_context);
2757 ctx = ctx_node->ctx;
2760 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2762 VMW_DEBUG_USER("Could not find shader to bind.\n");
2763 return PTR_ERR(res);
2766 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2767 vmw_val_add_flag_noctx);
2769 VMW_DEBUG_USER("Error creating resource validation node.\n");
2773 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2775 cmd->body.offsetInBytes);
2779 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2781 * @dev_priv: Pointer to a device private struct.
2782 * @sw_context: The software context being used for this batch.
2783 * @header: Pointer to the command header in the command stream.
2785 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2786 struct vmw_sw_context *sw_context,
2787 SVGA3dCmdHeader *header)
2789 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2790 container_of(header, typeof(*cmd), header);
2791 struct vmw_resource *view;
2792 struct vmw_res_cache_entry *rcache;
2794 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2795 cmd->body.shaderResourceViewId);
2797 return PTR_ERR(view);
2800 * Normally the shader-resource view is not gpu-dirtying, but for
2801 * this particular command it is...
2802 * So mark the last looked-up surface, which is the surface
2803 * the view points to, gpu-dirty.
2805 rcache = &sw_context->res_cache[vmw_res_surface];
2806 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2812 * vmw_cmd_dx_transfer_from_buffer - Validate
2813 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2815 * @dev_priv: Pointer to a device private struct.
2816 * @sw_context: The software context being used for this batch.
2817 * @header: Pointer to the command header in the command stream.
2819 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2820 struct vmw_sw_context *sw_context,
2821 SVGA3dCmdHeader *header)
2823 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2824 container_of(header, typeof(*cmd), header);
2827 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2828 VMW_RES_DIRTY_NONE, user_surface_converter,
2829 &cmd->body.srcSid, NULL);
2833 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2834 VMW_RES_DIRTY_SET, user_surface_converter,
2835 &cmd->body.destSid, NULL);
2839 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2841 * @dev_priv: Pointer to a device private struct.
2842 * @sw_context: The software context being used for this batch.
2843 * @header: Pointer to the command header in the command stream.
2845 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2846 struct vmw_sw_context *sw_context,
2847 SVGA3dCmdHeader *header)
2849 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2850 container_of(header, typeof(*cmd), header);
2852 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2855 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2856 VMW_RES_DIRTY_SET, user_surface_converter,
2857 &cmd->body.surface.sid, NULL);
2860 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2861 struct vmw_sw_context *sw_context,
2862 SVGA3dCmdHeader *header)
2864 if (!has_sm5_context(dev_priv))
2870 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2871 struct vmw_sw_context *sw_context,
2872 SVGA3dCmdHeader *header)
2874 if (!has_sm5_context(dev_priv))
2877 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2880 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2881 struct vmw_sw_context *sw_context,
2882 SVGA3dCmdHeader *header)
2884 if (!has_sm5_context(dev_priv))
2887 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2890 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2891 struct vmw_sw_context *sw_context,
2892 SVGA3dCmdHeader *header)
2895 SVGA3dCmdHeader header;
2896 SVGA3dCmdDXClearUAViewUint body;
2897 } *cmd = container_of(header, typeof(*cmd), header);
2898 struct vmw_resource *ret;
2900 if (!has_sm5_context(dev_priv))
2903 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2904 cmd->body.uaViewId);
2906 return PTR_ERR_OR_ZERO(ret);
2909 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2910 struct vmw_sw_context *sw_context,
2911 SVGA3dCmdHeader *header)
2914 SVGA3dCmdHeader header;
2915 SVGA3dCmdDXClearUAViewFloat body;
2916 } *cmd = container_of(header, typeof(*cmd), header);
2917 struct vmw_resource *ret;
2919 if (!has_sm5_context(dev_priv))
2922 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2923 cmd->body.uaViewId);
2925 return PTR_ERR_OR_ZERO(ret);
2928 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2929 struct vmw_sw_context *sw_context,
2930 SVGA3dCmdHeader *header)
2933 SVGA3dCmdHeader header;
2934 SVGA3dCmdDXSetUAViews body;
2935 } *cmd = container_of(header, typeof(*cmd), header);
2936 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2937 sizeof(SVGA3dUAViewId);
2940 if (!has_sm5_context(dev_priv))
2943 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2944 VMW_DEBUG_USER("Invalid UAV binding.\n");
2948 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2949 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2954 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2955 cmd->body.uavSpliceIndex);
2960 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2961 struct vmw_sw_context *sw_context,
2962 SVGA3dCmdHeader *header)
2965 SVGA3dCmdHeader header;
2966 SVGA3dCmdDXSetCSUAViews body;
2967 } *cmd = container_of(header, typeof(*cmd), header);
2968 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2969 sizeof(SVGA3dUAViewId);
2972 if (!has_sm5_context(dev_priv))
2975 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2976 VMW_DEBUG_USER("Invalid UAV binding.\n");
2980 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2981 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2986 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2987 cmd->body.startIndex);
2992 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2993 struct vmw_sw_context *sw_context,
2994 SVGA3dCmdHeader *header)
2996 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2997 struct vmw_resource *res;
2999 SVGA3dCmdHeader header;
3000 SVGA3dCmdDXDefineStreamOutputWithMob body;
3001 } *cmd = container_of(header, typeof(*cmd), header);
3004 if (!has_sm5_context(dev_priv))
3008 DRM_ERROR("DX Context not set.\n");
3012 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3013 ret = vmw_cotable_notify(res, cmd->body.soid);
3017 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3019 &sw_context->staged_cmd_res);
3022 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3023 struct vmw_sw_context *sw_context,
3024 SVGA3dCmdHeader *header)
3026 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3027 struct vmw_resource *res;
3029 SVGA3dCmdHeader header;
3030 SVGA3dCmdDXDestroyStreamOutput body;
3031 } *cmd = container_of(header, typeof(*cmd), header);
3034 DRM_ERROR("DX Context not set.\n");
3039 * When device does not support SM5 then streamoutput with mob command is
3040 * not available to user-space. Simply return in this case.
3042 if (!has_sm5_context(dev_priv))
3046 * With SM5 capable device if lookup fails then user-space probably used
3047 * old streamoutput define command. Return without an error.
3049 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3054 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3055 &sw_context->staged_cmd_res);
3058 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3059 struct vmw_sw_context *sw_context,
3060 SVGA3dCmdHeader *header)
3062 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3063 struct vmw_resource *res;
3065 SVGA3dCmdHeader header;
3066 SVGA3dCmdDXBindStreamOutput body;
3067 } *cmd = container_of(header, typeof(*cmd), header);
3070 if (!has_sm5_context(dev_priv))
3074 DRM_ERROR("DX Context not set.\n");
3078 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3081 DRM_ERROR("Could not find streamoutput to bind.\n");
3082 return PTR_ERR(res);
3085 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3087 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3088 vmw_val_add_flag_noctx);
3090 DRM_ERROR("Error creating resource validation node.\n");
3094 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3096 cmd->body.offsetInBytes);
3099 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3100 struct vmw_sw_context *sw_context,
3101 SVGA3dCmdHeader *header)
3103 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3104 struct vmw_resource *res;
3105 struct vmw_ctx_bindinfo_so binding;
3107 SVGA3dCmdHeader header;
3108 SVGA3dCmdDXSetStreamOutput body;
3109 } *cmd = container_of(header, typeof(*cmd), header);
3113 DRM_ERROR("DX Context not set.\n");
3117 if (cmd->body.soid == SVGA3D_INVALID_ID)
3121 * When device does not support SM5 then streamoutput with mob command is
3122 * not available to user-space. Simply return in this case.
3124 if (!has_sm5_context(dev_priv))
3128 * With SM5 capable device if lookup fails then user-space probably used
3129 * old streamoutput define command. Return without an error.
3131 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3137 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3138 vmw_val_add_flag_noctx);
3140 DRM_ERROR("Error creating resource validation node.\n");
3144 binding.bi.ctx = ctx_node->ctx;
3145 binding.bi.res = res;
3146 binding.bi.bt = vmw_ctx_binding_so;
3147 binding.slot = 0; /* Only one SO set to context at a time. */
3149 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3155 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3156 struct vmw_sw_context *sw_context,
3157 SVGA3dCmdHeader *header)
3159 struct vmw_draw_indexed_instanced_indirect_cmd {
3160 SVGA3dCmdHeader header;
3161 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3162 } *cmd = container_of(header, typeof(*cmd), header);
3164 if (!has_sm5_context(dev_priv))
3167 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3168 VMW_RES_DIRTY_NONE, user_surface_converter,
3169 &cmd->body.argsBufferSid, NULL);
3172 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3173 struct vmw_sw_context *sw_context,
3174 SVGA3dCmdHeader *header)
3176 struct vmw_draw_instanced_indirect_cmd {
3177 SVGA3dCmdHeader header;
3178 SVGA3dCmdDXDrawInstancedIndirect body;
3179 } *cmd = container_of(header, typeof(*cmd), header);
3181 if (!has_sm5_context(dev_priv))
3184 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3185 VMW_RES_DIRTY_NONE, user_surface_converter,
3186 &cmd->body.argsBufferSid, NULL);
3189 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3190 struct vmw_sw_context *sw_context,
3191 SVGA3dCmdHeader *header)
3193 struct vmw_dispatch_indirect_cmd {
3194 SVGA3dCmdHeader header;
3195 SVGA3dCmdDXDispatchIndirect body;
3196 } *cmd = container_of(header, typeof(*cmd), header);
3198 if (!has_sm5_context(dev_priv))
3201 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3202 VMW_RES_DIRTY_NONE, user_surface_converter,
3203 &cmd->body.argsBufferSid, NULL);
3206 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3207 struct vmw_sw_context *sw_context,
3208 void *buf, uint32_t *size)
3210 uint32_t size_remaining = *size;
3213 cmd_id = ((uint32_t *)buf)[0];
3215 case SVGA_CMD_UPDATE:
3216 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3218 case SVGA_CMD_DEFINE_GMRFB:
3219 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3221 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3222 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3224 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3225 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3228 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3232 if (*size > size_remaining) {
3233 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3238 if (unlikely(!sw_context->kernel)) {
3239 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3243 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3244 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3249 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3250 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3251 false, false, false),
3252 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3253 false, false, false),
3254 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3255 true, false, false),
3256 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3257 true, false, false),
3258 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3259 true, false, false),
3260 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3261 false, false, false),
3262 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3263 false, false, false),
3264 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3265 true, false, false),
3266 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3267 true, false, false),
3268 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3269 true, false, false),
3270 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3271 &vmw_cmd_set_render_target_check, true, false, false),
3272 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3273 true, false, false),
3274 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3275 true, false, false),
3276 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3277 true, false, false),
3278 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3279 true, false, false),
3280 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3281 true, false, false),
3282 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3283 true, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3285 true, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3287 false, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3289 true, false, false),
3290 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3291 true, false, false),
3292 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3293 true, false, false),
3294 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3295 true, false, false),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3297 true, false, false),
3298 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3299 true, false, false),
3300 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3301 true, false, false),
3302 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3303 true, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3305 true, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3307 true, false, false),
3308 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3309 &vmw_cmd_blt_surf_screen_check, false, false, false),
3310 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3311 false, false, false),
3312 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3313 false, false, false),
3314 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3315 false, false, false),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3317 false, false, false),
3318 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3319 false, false, false),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3321 false, false, false),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3323 false, false, false),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3325 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3327 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3329 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3330 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3331 false, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3333 false, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3335 false, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3337 false, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3339 false, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3345 false, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3348 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3349 false, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3352 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3353 &vmw_cmd_update_gb_surface, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3355 &vmw_cmd_readback_gb_image, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3357 &vmw_cmd_readback_gb_surface, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3359 &vmw_cmd_invalidate_gb_image, true, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3361 &vmw_cmd_invalidate_gb_surface, true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3363 false, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3365 false, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3367 false, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3369 false, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3371 false, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3373 false, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3376 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3377 false, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3379 false, false, false),
3380 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3382 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3384 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3386 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3388 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3390 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3391 false, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3393 false, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3395 false, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3397 false, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3399 false, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3401 false, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3403 false, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3405 false, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3407 false, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3409 false, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3412 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3413 false, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3415 false, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3417 false, false, true),
3418 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3419 false, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3423 false, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3425 false, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3427 false, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3429 false, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3431 false, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3433 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3435 &vmw_cmd_dx_set_shader_res, true, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3447 &vmw_cmd_dx_cid_check, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3451 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3453 &vmw_cmd_dx_set_index_buffer, true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3455 &vmw_cmd_dx_set_rendertargets, true, false, true),
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3459 &vmw_cmd_dx_cid_check, true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3461 &vmw_cmd_dx_cid_check, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3469 &vmw_cmd_dx_cid_check, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3483 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3485 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3491 &vmw_cmd_dx_check_subresource, true, false, true),
3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3493 &vmw_cmd_dx_check_subresource, true, false, true),
3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3495 &vmw_cmd_dx_check_subresource, true, false, true),
3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3497 &vmw_cmd_dx_view_define, true, false, true),
3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3499 &vmw_cmd_dx_view_remove, true, false, true),
3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3501 &vmw_cmd_dx_view_define, true, false, true),
3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3503 &vmw_cmd_dx_view_remove, true, false, true),
3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3505 &vmw_cmd_dx_view_define, true, false, true),
3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3507 &vmw_cmd_dx_view_remove, true, false, true),
3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3509 &vmw_cmd_dx_so_define, true, false, true),
3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3511 &vmw_cmd_dx_cid_check, true, false, true),
3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3513 &vmw_cmd_dx_so_define, true, false, true),
3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3515 &vmw_cmd_dx_cid_check, true, false, true),
3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3517 &vmw_cmd_dx_so_define, true, false, true),
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3519 &vmw_cmd_dx_cid_check, true, false, true),
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3521 &vmw_cmd_dx_so_define, true, false, true),
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3523 &vmw_cmd_dx_cid_check, true, false, true),
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3525 &vmw_cmd_dx_so_define, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3527 &vmw_cmd_dx_cid_check, true, false, true),
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3529 &vmw_cmd_dx_define_shader, true, false, true),
3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3531 &vmw_cmd_dx_destroy_shader, true, false, true),
3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3533 &vmw_cmd_dx_bind_shader, true, false, true),
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3535 &vmw_cmd_dx_so_define, true, false, true),
3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3537 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3539 &vmw_cmd_dx_set_streamoutput, true, false, true),
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3541 &vmw_cmd_dx_set_so_targets, true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3543 &vmw_cmd_dx_cid_check, true, false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3545 &vmw_cmd_dx_cid_check, true, false, true),
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3547 &vmw_cmd_buffer_copy_check, true, false, true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3549 &vmw_cmd_pred_copy_check, true, false, true),
3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3551 &vmw_cmd_dx_transfer_from_buffer,
3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3554 &vmw_cmd_dx_set_constant_buffer_offset,
3556 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3557 &vmw_cmd_dx_set_constant_buffer_offset,
3559 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3560 &vmw_cmd_dx_set_constant_buffer_offset,
3562 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3563 &vmw_cmd_dx_set_constant_buffer_offset,
3565 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3566 &vmw_cmd_dx_set_constant_buffer_offset,
3568 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3569 &vmw_cmd_dx_set_constant_buffer_offset,
3571 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3577 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3579 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3581 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3583 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3584 &vmw_cmd_clear_uav_float, true, false, true),
3585 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3587 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3589 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3590 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3592 &vmw_cmd_instanced_indirect, true, false, true),
3593 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3594 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3595 &vmw_cmd_dispatch_indirect, true, false, true),
3596 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3598 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3599 &vmw_cmd_sm5_view_define, true, false, true),
3600 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3601 &vmw_cmd_dx_define_streamoutput, true, false, true),
3602 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3603 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3604 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3605 &vmw_cmd_dx_so_define, true, false, true),
3608 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3610 u32 cmd_id = ((u32 *) buf)[0];
3612 if (cmd_id >= SVGA_CMD_MAX) {
3613 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3614 const struct vmw_cmd_entry *entry;
3616 *size = header->size + sizeof(SVGA3dCmdHeader);
3617 cmd_id = header->id;
3618 if (cmd_id >= SVGA_3D_CMD_MAX)
3621 cmd_id -= SVGA_3D_CMD_BASE;
3622 entry = &vmw_cmd_entries[cmd_id];
3623 *cmd = entry->cmd_name;
3628 case SVGA_CMD_UPDATE:
3629 *cmd = "SVGA_CMD_UPDATE";
3630 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3632 case SVGA_CMD_DEFINE_GMRFB:
3633 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3634 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3636 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3637 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3638 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3640 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3641 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3642 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3653 static int vmw_cmd_check(struct vmw_private *dev_priv,
3654 struct vmw_sw_context *sw_context, void *buf,
3658 uint32_t size_remaining = *size;
3659 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3661 const struct vmw_cmd_entry *entry;
3662 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3664 cmd_id = ((uint32_t *)buf)[0];
3665 /* Handle any none 3D commands */
3666 if (unlikely(cmd_id < SVGA_CMD_MAX))
3667 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3670 cmd_id = header->id;
3671 *size = header->size + sizeof(SVGA3dCmdHeader);
3673 cmd_id -= SVGA_3D_CMD_BASE;
3674 if (unlikely(*size > size_remaining))
3677 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3680 entry = &vmw_cmd_entries[cmd_id];
3681 if (unlikely(!entry->func))
3684 if (unlikely(!entry->user_allow && !sw_context->kernel))
3685 goto out_privileged;
3687 if (unlikely(entry->gb_disable && gb))
3690 if (unlikely(entry->gb_enable && !gb))
3693 ret = entry->func(dev_priv, sw_context, header);
3694 if (unlikely(ret != 0)) {
3695 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3696 cmd_id + SVGA_3D_CMD_BASE, ret);
3702 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3703 cmd_id + SVGA_3D_CMD_BASE);
3706 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3707 cmd_id + SVGA_3D_CMD_BASE);
3710 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3711 cmd_id + SVGA_3D_CMD_BASE);
3714 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3715 cmd_id + SVGA_3D_CMD_BASE);
3719 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3720 struct vmw_sw_context *sw_context, void *buf,
3723 int32_t cur_size = size;
3726 sw_context->buf_start = buf;
3728 while (cur_size > 0) {
3730 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3731 if (unlikely(ret != 0))
3733 buf = (void *)((unsigned long) buf + size);
3737 if (unlikely(cur_size != 0)) {
3738 VMW_DEBUG_USER("Command verifier out of sync.\n");
3745 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3747 /* Memory is validation context memory, so no need to free it */
3748 INIT_LIST_HEAD(&sw_context->bo_relocations);
3751 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3753 struct vmw_relocation *reloc;
3754 struct ttm_buffer_object *bo;
3756 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3757 bo = &reloc->vbo->base;
3758 switch (bo->resource->mem_type) {
3760 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3761 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3764 reloc->location->gmrId = bo->resource->start;
3767 *reloc->mob_loc = bo->resource->start;
3773 vmw_free_relocations(sw_context);
3776 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3779 if (likely(sw_context->cmd_bounce_size >= size))
3782 if (sw_context->cmd_bounce_size == 0)
3783 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3785 while (sw_context->cmd_bounce_size < size) {
3786 sw_context->cmd_bounce_size =
3787 PAGE_ALIGN(sw_context->cmd_bounce_size +
3788 (sw_context->cmd_bounce_size >> 1));
3791 vfree(sw_context->cmd_bounce);
3792 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3794 if (sw_context->cmd_bounce == NULL) {
3795 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3796 sw_context->cmd_bounce_size = 0;
3804 * vmw_execbuf_fence_commands - create and submit a command stream fence
3806 * Creates a fence object and submits a command stream marker.
3807 * If this fails for some reason, We sync the fifo and return NULL.
3808 * It is then safe to fence buffers with a NULL pointer.
3810 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3811 * userspace handle if @p_handle is not NULL, otherwise not.
3814 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3815 struct vmw_private *dev_priv,
3816 struct vmw_fence_obj **p_fence,
3821 bool synced = false;
3823 /* p_handle implies file_priv. */
3824 BUG_ON(p_handle != NULL && file_priv == NULL);
3826 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3827 if (unlikely(ret != 0)) {
3828 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3832 if (p_handle != NULL)
3833 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3834 sequence, p_fence, p_handle);
3836 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3838 if (unlikely(ret != 0 && !synced)) {
3839 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3840 false, VMW_FENCE_WAIT_TIMEOUT);
3848 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3850 * @dev_priv: Pointer to a vmw_private struct.
3851 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3852 * @ret: Return value from fence object creation.
3853 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3854 * the information should be copied.
3855 * @fence: Pointer to the fenc object.
3856 * @fence_handle: User-space fence handle.
3857 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3859 * This function copies fence information to user-space. If copying fails, the
3860 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3861 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3862 * will hopefully be detected.
3864 * Also if copying fails, user-space will be unable to signal the fence object
3865 * so we wait for it immediately, and then unreference the user-space reference.
3868 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3869 struct vmw_fpriv *vmw_fp, int ret,
3870 struct drm_vmw_fence_rep __user *user_fence_rep,
3871 struct vmw_fence_obj *fence, uint32_t fence_handle,
3872 int32_t out_fence_fd)
3874 struct drm_vmw_fence_rep fence_rep;
3876 if (user_fence_rep == NULL)
3879 memset(&fence_rep, 0, sizeof(fence_rep));
3881 fence_rep.error = ret;
3882 fence_rep.fd = out_fence_fd;
3884 BUG_ON(fence == NULL);
3886 fence_rep.handle = fence_handle;
3887 fence_rep.seqno = fence->base.seqno;
3888 vmw_update_seqno(dev_priv);
3889 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3893 * copy_to_user errors will be detected by user space not seeing
3894 * fence_rep::error filled in. Typically user-space would have pre-set
3895 * that member to -EFAULT.
3897 ret = copy_to_user(user_fence_rep, &fence_rep,
3901 * User-space lost the fence object. We need to sync and unreference the
3904 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3905 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3906 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3907 (void) vmw_fence_obj_wait(fence, false, false,
3908 VMW_FENCE_WAIT_TIMEOUT);
3911 return ret ? -EFAULT : 0;
3915 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3917 * @dev_priv: Pointer to a device private structure.
3918 * @kernel_commands: Pointer to the unpatched command batch.
3919 * @command_size: Size of the unpatched command batch.
3920 * @sw_context: Structure holding the relocation lists.
3922 * Side effects: If this function returns 0, then the command batch pointed to
3923 * by @kernel_commands will have been modified.
3925 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3926 void *kernel_commands, u32 command_size,
3927 struct vmw_sw_context *sw_context)
3931 if (sw_context->dx_ctx_node)
3932 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3933 sw_context->dx_ctx_node->ctx->id);
3935 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3940 vmw_apply_relocations(sw_context);
3941 memcpy(cmd, kernel_commands, command_size);
3942 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3943 vmw_resource_relocations_free(&sw_context->res_relocations);
3944 vmw_cmd_commit(dev_priv, command_size);
3950 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3951 * command buffer manager.
3953 * @dev_priv: Pointer to a device private structure.
3954 * @header: Opaque handle to the command buffer allocation.
3955 * @command_size: Size of the unpatched command batch.
3956 * @sw_context: Structure holding the relocation lists.
3958 * Side effects: If this function returns 0, then the command buffer represented
3959 * by @header will have been modified.
3961 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3962 struct vmw_cmdbuf_header *header,
3964 struct vmw_sw_context *sw_context)
3966 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3968 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3971 vmw_apply_relocations(sw_context);
3972 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3973 vmw_resource_relocations_free(&sw_context->res_relocations);
3974 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3980 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3981 * submission using a command buffer.
3983 * @dev_priv: Pointer to a device private structure.
3984 * @user_commands: User-space pointer to the commands to be submitted.
3985 * @command_size: Size of the unpatched command batch.
3986 * @header: Out parameter returning the opaque pointer to the command buffer.
3988 * This function checks whether we can use the command buffer manager for
3989 * submission and if so, creates a command buffer of suitable size and copies
3990 * the user data into that buffer.
3992 * On successful return, the function returns a pointer to the data in the
3993 * command buffer and *@header is set to non-NULL.
3995 * @kernel_commands: If command buffers could not be used, the function will
3996 * return the value of @kernel_commands on function call. That value may be
3997 * NULL. In that case, the value of *@header will be set to NULL.
3999 * If an error is encountered, the function will return a pointer error value.
4000 * If the function is interrupted by a signal while sleeping, it will return
4001 * -ERESTARTSYS casted to a pointer error value.
4003 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4004 void __user *user_commands,
4005 void *kernel_commands, u32 command_size,
4006 struct vmw_cmdbuf_header **header)
4012 if (command_size > SVGA_CB_MAX_SIZE) {
4013 VMW_DEBUG_USER("Command buffer is too large.\n");
4014 return ERR_PTR(-EINVAL);
4017 if (!dev_priv->cman || kernel_commands)
4018 return kernel_commands;
4020 /* If possible, add a little space for fencing. */
4021 cmdbuf_size = command_size + 512;
4022 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4023 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4025 if (IS_ERR(kernel_commands))
4026 return kernel_commands;
4028 ret = copy_from_user(kernel_commands, user_commands, command_size);
4030 VMW_DEBUG_USER("Failed copying commands.\n");
4031 vmw_cmdbuf_header_free(*header);
4033 return ERR_PTR(-EFAULT);
4036 return kernel_commands;
4039 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4040 struct vmw_sw_context *sw_context,
4043 struct vmw_resource *res;
4047 if (handle == SVGA3D_INVALID_ID)
4050 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4051 ret = vmw_validation_preload_res(sw_context->ctx, size);
4055 ret = vmw_user_resource_lookup_handle
4056 (dev_priv, sw_context->fp->tfile, handle,
4057 user_context_converter, &res);
4059 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4060 (unsigned int) handle);
4064 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4065 vmw_val_add_flag_none);
4066 if (unlikely(ret != 0)) {
4067 vmw_resource_unreference(&res);
4071 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4072 sw_context->man = vmw_context_res_man(res);
4074 vmw_resource_unreference(&res);
4078 int vmw_execbuf_process(struct drm_file *file_priv,
4079 struct vmw_private *dev_priv,
4080 void __user *user_commands, void *kernel_commands,
4081 uint32_t command_size, uint64_t throttle_us,
4082 uint32_t dx_context_handle,
4083 struct drm_vmw_fence_rep __user *user_fence_rep,
4084 struct vmw_fence_obj **out_fence, uint32_t flags)
4086 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4087 struct vmw_fence_obj *fence = NULL;
4088 struct vmw_cmdbuf_header *header;
4089 uint32_t handle = 0;
4091 int32_t out_fence_fd = -1;
4092 struct sync_file *sync_file = NULL;
4093 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4095 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4096 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4097 if (out_fence_fd < 0) {
4098 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4099 return out_fence_fd;
4104 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4107 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4108 kernel_commands, command_size,
4110 if (IS_ERR(kernel_commands)) {
4111 ret = PTR_ERR(kernel_commands);
4112 goto out_free_fence_fd;
4115 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4118 goto out_free_header;
4121 sw_context->kernel = false;
4122 if (kernel_commands == NULL) {
4123 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4124 if (unlikely(ret != 0))
4127 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4129 if (unlikely(ret != 0)) {
4131 VMW_DEBUG_USER("Failed copying commands.\n");
4135 kernel_commands = sw_context->cmd_bounce;
4136 } else if (!header) {
4137 sw_context->kernel = true;
4140 sw_context->filp = file_priv;
4141 sw_context->fp = vmw_fpriv(file_priv);
4142 INIT_LIST_HEAD(&sw_context->ctx_list);
4143 sw_context->cur_query_bo = dev_priv->pinned_bo;
4144 sw_context->last_query_ctx = NULL;
4145 sw_context->needs_post_query_barrier = false;
4146 sw_context->dx_ctx_node = NULL;
4147 sw_context->dx_query_mob = NULL;
4148 sw_context->dx_query_ctx = NULL;
4149 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4150 INIT_LIST_HEAD(&sw_context->res_relocations);
4151 INIT_LIST_HEAD(&sw_context->bo_relocations);
4153 if (sw_context->staged_bindings)
4154 vmw_binding_state_reset(sw_context->staged_bindings);
4156 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4157 sw_context->ctx = &val_ctx;
4158 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4159 if (unlikely(ret != 0))
4162 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4164 if (unlikely(ret != 0))
4167 ret = vmw_resources_reserve(sw_context);
4168 if (unlikely(ret != 0))
4171 ret = vmw_validation_bo_reserve(&val_ctx, true);
4172 if (unlikely(ret != 0))
4175 ret = vmw_validation_bo_validate(&val_ctx, true);
4176 if (unlikely(ret != 0))
4179 ret = vmw_validation_res_validate(&val_ctx, true);
4180 if (unlikely(ret != 0))
4183 vmw_validation_drop_ht(&val_ctx);
4185 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4186 if (unlikely(ret != 0)) {
4191 if (dev_priv->has_mob) {
4192 ret = vmw_rebind_contexts(sw_context);
4193 if (unlikely(ret != 0))
4194 goto out_unlock_binding;
4198 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4199 command_size, sw_context);
4201 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4205 mutex_unlock(&dev_priv->binding_mutex);
4209 vmw_query_bo_switch_commit(dev_priv, sw_context);
4210 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4211 (user_fence_rep) ? &handle : NULL);
4213 * This error is harmless, because if fence submission fails,
4214 * vmw_fifo_send_fence will sync. The error will be propagated to
4215 * user-space in @fence_rep
4218 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4220 vmw_execbuf_bindings_commit(sw_context, false);
4221 vmw_bind_dx_query_mob(sw_context);
4222 vmw_validation_res_unreserve(&val_ctx, false);
4224 vmw_validation_bo_fence(sw_context->ctx, fence);
4226 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4227 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4230 * If anything fails here, give up trying to export the fence and do a
4231 * sync since the user mode will not be able to sync the fence itself.
4232 * This ensures we are still functionally correct.
4234 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4236 sync_file = sync_file_create(&fence->base);
4238 VMW_DEBUG_USER("Sync file create failed for fence\n");
4239 put_unused_fd(out_fence_fd);
4242 (void) vmw_fence_obj_wait(fence, false, false,
4243 VMW_FENCE_WAIT_TIMEOUT);
4247 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4248 user_fence_rep, fence, handle, out_fence_fd);
4252 /* usercopy of fence failed, put the file object */
4253 fput(sync_file->file);
4254 put_unused_fd(out_fence_fd);
4256 /* Link the fence with the FD created earlier */
4257 fd_install(out_fence_fd, sync_file->file);
4261 /* Don't unreference when handing fence out */
4262 if (unlikely(out_fence != NULL)) {
4265 } else if (likely(fence != NULL)) {
4266 vmw_fence_obj_unreference(&fence);
4269 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4270 mutex_unlock(&dev_priv->cmdbuf_mutex);
4273 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4274 * in resource destruction paths.
4276 vmw_validation_unref_lists(&val_ctx);
4281 mutex_unlock(&dev_priv->binding_mutex);
4283 vmw_validation_bo_backoff(&val_ctx);
4285 vmw_execbuf_bindings_commit(sw_context, true);
4286 vmw_validation_res_unreserve(&val_ctx, true);
4287 vmw_resource_relocations_free(&sw_context->res_relocations);
4288 vmw_free_relocations(sw_context);
4289 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4290 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4292 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4293 vmw_validation_drop_ht(&val_ctx);
4294 WARN_ON(!list_empty(&sw_context->ctx_list));
4295 mutex_unlock(&dev_priv->cmdbuf_mutex);
4298 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4299 * in resource destruction paths.
4301 vmw_validation_unref_lists(&val_ctx);
4304 vmw_cmdbuf_header_free(header);
4306 if (out_fence_fd >= 0)
4307 put_unused_fd(out_fence_fd);
4313 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4315 * @dev_priv: The device private structure.
4317 * This function is called to idle the fifo and unpin the query buffer if the
4318 * normal way to do this hits an error, which should typically be extremely
4321 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4323 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4325 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4326 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4327 if (dev_priv->dummy_query_bo_pinned) {
4328 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4329 dev_priv->dummy_query_bo_pinned = false;
4335 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4338 * @dev_priv: The device private structure.
4339 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4340 * query barrier that flushes all queries touching the current buffer pointed to
4341 * by @dev_priv->pinned_bo
4343 * This function should be used to unpin the pinned query bo, or as a query
4344 * barrier when we need to make sure that all queries have finished before the
4345 * next fifo command. (For example on hardware context destructions where the
4346 * hardware may otherwise leak unfinished queries).
4348 * This function does not return any failure codes, but make attempts to do safe
4349 * unpinning in case of errors.
4351 * The function will synchronize on the previous query barrier, and will thus
4352 * not finish until that barrier has executed.
4354 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4355 * calling this function.
4357 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4358 struct vmw_fence_obj *fence)
4361 struct vmw_fence_obj *lfence = NULL;
4362 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4364 if (dev_priv->pinned_bo == NULL)
4367 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4370 goto out_no_reserve;
4372 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4375 goto out_no_reserve;
4377 ret = vmw_validation_bo_reserve(&val_ctx, false);
4379 goto out_no_reserve;
4381 if (dev_priv->query_cid_valid) {
4382 BUG_ON(fence != NULL);
4383 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4386 dev_priv->query_cid_valid = false;
4389 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4390 if (dev_priv->dummy_query_bo_pinned) {
4391 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4392 dev_priv->dummy_query_bo_pinned = false;
4394 if (fence == NULL) {
4395 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4399 vmw_validation_bo_fence(&val_ctx, fence);
4401 vmw_fence_obj_unreference(&lfence);
4403 vmw_validation_unref_lists(&val_ctx);
4404 vmw_bo_unreference(&dev_priv->pinned_bo);
4409 vmw_validation_bo_backoff(&val_ctx);
4411 vmw_validation_unref_lists(&val_ctx);
4412 vmw_execbuf_unpin_panic(dev_priv);
4413 vmw_bo_unreference(&dev_priv->pinned_bo);
4417 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4419 * @dev_priv: The device private structure.
4421 * This function should be used to unpin the pinned query bo, or as a query
4422 * barrier when we need to make sure that all queries have finished before the
4423 * next fifo command. (For example on hardware context destructions where the
4424 * hardware may otherwise leak unfinished queries).
4426 * This function does not return any failure codes, but make attempts to do safe
4427 * unpinning in case of errors.
4429 * The function will synchronize on the previous query barrier, and will thus
4430 * not finish until that barrier has executed.
4432 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4434 mutex_lock(&dev_priv->cmdbuf_mutex);
4435 if (dev_priv->query_cid_valid)
4436 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4437 mutex_unlock(&dev_priv->cmdbuf_mutex);
4440 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4441 struct drm_file *file_priv)
4443 struct vmw_private *dev_priv = vmw_priv(dev);
4444 struct drm_vmw_execbuf_arg *arg = data;
4446 struct dma_fence *in_fence = NULL;
4448 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4449 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4452 * Extend the ioctl argument while maintaining backwards compatibility:
4453 * We take different code paths depending on the value of arg->version.
4455 * Note: The ioctl argument is extended and zeropadded by core DRM.
4457 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4458 arg->version == 0)) {
4459 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4464 switch (arg->version) {
4466 /* For v1 core DRM have extended + zeropadded the data */
4467 arg->context_handle = (uint32_t) -1;
4471 /* For v2 and later core DRM would have correctly copied it */
4475 /* If imported a fence FD from elsewhere, then wait on it */
4476 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4477 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4480 VMW_DEBUG_USER("Cannot get imported fence\n");
4485 ret = dma_fence_wait(in_fence, true);
4490 ret = vmw_execbuf_process(file_priv, dev_priv,
4491 (void __user *)(unsigned long)arg->commands,
4492 NULL, arg->command_size, arg->throttle_us,
4493 arg->context_handle,
4494 (void __user *)(unsigned long)arg->fence_rep,
4497 if (unlikely(ret != 0))
4500 vmw_kms_cursor_post_execbuf(dev_priv);
4504 dma_fence_put(in_fence);
4507 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);