1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35 #include "vmwgfx_mksstat.h"
37 #define VMW_RES_HT_ORDER 12
40 * Helper macro to get dx_ctx_node if available otherwise print an error
41 * message. This is for use in command verifier function where if dx_ctx_node
42 * is not set then command is invalid.
44 #define VMW_GET_CTX_NODE(__sw_context) \
46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
47 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
48 __sw_context->dx_ctx_node; \
52 #define VMW_DECLARE_CMD_VAR(__var, __type) \
54 SVGA3dCmdHeader header; \
59 * struct vmw_relocation - Buffer object relocation
61 * @head: List head for the command submission context's relocation list
62 * @vbo: Non ref-counted pointer to buffer object
63 * @mob_loc: Pointer to location for mob id to be modified
64 * @location: Pointer to location for guest pointer to be modified
66 struct vmw_relocation {
67 struct list_head head;
68 struct vmw_buffer_object *vbo;
71 SVGAGuestPtr *location;
76 * enum vmw_resource_relocation_type - Relocation type for resources
78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79 * command stream is replaced with the actual id after validation.
80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 * @vmw_res_rel_max: Last value in the enum - used for error checking
86 enum vmw_resource_relocation_type {
94 * struct vmw_resource_relocation - Relocation info for resources
96 * @head: List head for the software context's relocation list.
97 * @res: Non-ref-counted pointer to the resource.
98 * @offset: Offset of single byte entries into the command buffer where the id
99 * that needs fixup is located.
100 * @rel_type: Type of relocation.
102 struct vmw_resource_relocation {
103 struct list_head head;
104 const struct vmw_resource *res;
106 enum vmw_resource_relocation_type rel_type:3;
110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
112 * @head: List head of context list
113 * @ctx: The context resource
114 * @cur: The context's persistent binding state
115 * @staged: The binding state changes of this command buffer
117 struct vmw_ctx_validation_info {
118 struct list_head head;
119 struct vmw_resource *ctx;
120 struct vmw_ctx_binding_state *cur;
121 struct vmw_ctx_binding_state *staged;
125 * struct vmw_cmd_entry - Describe a command for the verifier
127 * @func: Call-back to handle the command.
128 * @user_allow: Whether allowed from the execbuf ioctl.
129 * @gb_disable: Whether disabled if guest-backed objects are available.
130 * @gb_enable: Whether enabled iff guest-backed objects are available.
131 * @cmd_name: Name of the command.
133 struct vmw_cmd_entry {
134 int (*func) (struct vmw_private *, struct vmw_sw_context *,
139 const char *cmd_name;
142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 (_gb_disable), (_gb_enable), #_cmd}
146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 struct vmw_sw_context *sw_context,
148 struct vmw_resource *ctx);
149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 struct vmw_sw_context *sw_context,
152 struct vmw_buffer_object **vmw_bo_p);
154 * vmw_ptr_diff - Compute the offset from a to b in bytes
156 * @a: A starting pointer.
157 * @b: A pointer offset in the same address space.
159 * Returns: The offset in bytes between the two pointers.
161 static size_t vmw_ptr_diff(void *a, void *b)
163 return (unsigned long) b - (unsigned long) a;
167 * vmw_execbuf_bindings_commit - Commit modified binding state
169 * @sw_context: The command submission context
170 * @backoff: Whether this is part of the error path and binding state changes
173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
176 struct vmw_ctx_validation_info *entry;
178 list_for_each_entry(entry, &sw_context->ctx_list, head) {
180 vmw_binding_state_commit(entry->cur, entry->staged);
182 if (entry->staged != sw_context->staged_bindings)
183 vmw_binding_state_free(entry->staged);
185 sw_context->staged_bindings_inuse = false;
188 /* List entries are freed with the validation context */
189 INIT_LIST_HEAD(&sw_context->ctx_list);
193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
195 * @sw_context: The command submission context
197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
199 if (sw_context->dx_query_mob)
200 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 sw_context->dx_query_mob);
205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
208 * @dev_priv: Pointer to the device private:
209 * @sw_context: The command submission context
210 * @res: Pointer to the resource
211 * @node: The validation node holding the context resource metadata
213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 struct vmw_sw_context *sw_context,
215 struct vmw_resource *res,
216 struct vmw_ctx_validation_info *node)
220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 if (unlikely(ret != 0))
224 if (!sw_context->staged_bindings) {
225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 if (IS_ERR(sw_context->staged_bindings)) {
227 ret = PTR_ERR(sw_context->staged_bindings);
228 sw_context->staged_bindings = NULL;
233 if (sw_context->staged_bindings_inuse) {
234 node->staged = vmw_binding_state_alloc(dev_priv);
235 if (IS_ERR(node->staged)) {
236 ret = PTR_ERR(node->staged);
241 node->staged = sw_context->staged_bindings;
242 sw_context->staged_bindings_inuse = true;
246 node->cur = vmw_context_binding_state(res);
247 list_add_tail(&node->head, &sw_context->ctx_list);
256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
265 * Returns: The extra size requirement based on resource type.
267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 enum vmw_res_type res_type)
270 return (res_type == vmw_res_dx_context ||
271 (res_type == vmw_res_context && dev_priv->has_mob)) ?
272 sizeof(struct vmw_ctx_validation_info) : 0;
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
280 * @private: Pointer to the execbuf-private space in the resource validation
283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 struct vmw_resource *res,
288 rcache->private = private;
290 rcache->valid_handle = 0;
294 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
295 * rcu-protected pointer to the validation list.
297 * @sw_context: Pointer to the software context.
298 * @res: Unreferenced rcu-protected pointer to the resource.
299 * @dirty: Whether to change dirty status.
301 * Returns: 0 on success. Negative error code on failure. Typical error codes
302 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
304 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
305 struct vmw_resource *res,
308 struct vmw_private *dev_priv = res->dev_priv;
310 enum vmw_res_type res_type = vmw_res_type(res);
311 struct vmw_res_cache_entry *rcache;
312 struct vmw_ctx_validation_info *ctx_info;
314 unsigned int priv_size;
316 rcache = &sw_context->res_cache[res_type];
317 if (likely(rcache->valid && rcache->res == res)) {
319 vmw_validation_res_set_dirty(sw_context->ctx,
320 rcache->private, dirty);
321 vmw_user_resource_noref_release();
325 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
326 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
327 dirty, (void **)&ctx_info,
329 vmw_user_resource_noref_release();
333 if (priv_size && first_usage) {
334 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
337 VMW_DEBUG_USER("Failed first usage context setup.\n");
342 vmw_execbuf_rcache_update(rcache, res, ctx_info);
347 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
348 * validation list if it's not already on it
350 * @sw_context: Pointer to the software context.
351 * @res: Pointer to the resource.
352 * @dirty: Whether to change dirty status.
354 * Returns: Zero on success. Negative error code on failure.
356 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
357 struct vmw_resource *res,
360 struct vmw_res_cache_entry *rcache;
361 enum vmw_res_type res_type = vmw_res_type(res);
365 rcache = &sw_context->res_cache[res_type];
366 if (likely(rcache->valid && rcache->res == res)) {
368 vmw_validation_res_set_dirty(sw_context->ctx,
369 rcache->private, dirty);
373 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
378 vmw_execbuf_rcache_update(rcache, res, ptr);
384 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
387 * @sw_context: The software context holding the validation list.
388 * @view: Pointer to the view resource.
390 * Returns 0 if success, negative error code otherwise.
392 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
393 struct vmw_resource *view)
398 * First add the resource the view is pointing to, otherwise it may be
399 * swapped out when the view is validated.
401 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
402 vmw_view_dirtying(view));
406 return vmw_execbuf_res_noctx_val_add(sw_context, view,
411 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
412 * to to the validation list.
414 * @sw_context: The software context holding the validation list.
415 * @view_type: The view type to look up.
416 * @id: view id of the view.
418 * The view is represented by a view id and the DX context it's created on, or
419 * scheduled for creation on. If there is no DX context set, the function will
420 * return an -EINVAL error pointer.
422 * Returns: Unreferenced pointer to the resource on success, negative error
423 * pointer on failure.
425 static struct vmw_resource *
426 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
427 enum vmw_view_type view_type, u32 id)
429 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
430 struct vmw_resource *view;
434 return ERR_PTR(-EINVAL);
436 view = vmw_view_lookup(sw_context->man, view_type, id);
440 ret = vmw_view_res_val_add(sw_context, view);
448 * vmw_resource_context_res_add - Put resources previously bound to a context on
449 * the validation list
451 * @dev_priv: Pointer to a device private structure
452 * @sw_context: Pointer to a software context used for this command submission
453 * @ctx: Pointer to the context resource
455 * This function puts all resources that were previously bound to @ctx on the
456 * resource validation list. This is part of the context state reemission
458 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
459 struct vmw_sw_context *sw_context,
460 struct vmw_resource *ctx)
462 struct list_head *binding_list;
463 struct vmw_ctx_bindinfo *entry;
465 struct vmw_resource *res;
467 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
468 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
470 /* Add all cotables to the validation list. */
471 if (has_sm4_context(dev_priv) &&
472 vmw_res_type(ctx) == vmw_res_dx_context) {
473 for (i = 0; i < cotable_max; ++i) {
474 res = vmw_context_cotable(ctx, i);
478 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
480 if (unlikely(ret != 0))
485 /* Add all resources bound to the context to the validation list */
486 mutex_lock(&dev_priv->binding_mutex);
487 binding_list = vmw_context_binding_list(ctx);
489 list_for_each_entry(entry, binding_list, ctx_list) {
490 if (vmw_res_type(entry->res) == vmw_res_view)
491 ret = vmw_view_res_val_add(sw_context, entry->res);
493 ret = vmw_execbuf_res_noctx_val_add
494 (sw_context, entry->res,
495 vmw_binding_dirtying(entry->bt));
496 if (unlikely(ret != 0))
500 if (has_sm4_context(dev_priv) &&
501 vmw_res_type(ctx) == vmw_res_dx_context) {
502 struct vmw_buffer_object *dx_query_mob;
504 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
506 ret = vmw_validation_add_bo(sw_context->ctx,
507 dx_query_mob, true, false);
510 mutex_unlock(&dev_priv->binding_mutex);
515 * vmw_resource_relocation_add - Add a relocation to the relocation list
517 * @sw_context: Pointer to the software context.
518 * @res: The resource.
519 * @offset: Offset into the command buffer currently being parsed where the id
520 * that needs fixup is located. Granularity is one byte.
521 * @rel_type: Relocation type.
523 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
524 const struct vmw_resource *res,
525 unsigned long offset,
526 enum vmw_resource_relocation_type
529 struct vmw_resource_relocation *rel;
531 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
532 if (unlikely(!rel)) {
533 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
538 rel->offset = offset;
539 rel->rel_type = rel_type;
540 list_add_tail(&rel->head, &sw_context->res_relocations);
546 * vmw_resource_relocations_free - Free all relocations on a list
548 * @list: Pointer to the head of the relocation list
550 static void vmw_resource_relocations_free(struct list_head *list)
552 /* Memory is validation context memory, so no need to free it */
553 INIT_LIST_HEAD(list);
557 * vmw_resource_relocations_apply - Apply all relocations on a list
559 * @cb: Pointer to the start of the command buffer bein patch. This need not be
560 * the same buffer as the one being parsed when the relocation list was built,
561 * but the contents must be the same modulo the resource ids.
562 * @list: Pointer to the head of the relocation list.
564 static void vmw_resource_relocations_apply(uint32_t *cb,
565 struct list_head *list)
567 struct vmw_resource_relocation *rel;
569 /* Validate the struct vmw_resource_relocation member size */
570 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
571 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
573 list_for_each_entry(rel, list, head) {
574 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
575 switch (rel->rel_type) {
576 case vmw_res_rel_normal:
577 *addr = rel->res->id;
579 case vmw_res_rel_nop:
580 *addr = SVGA_3D_CMD_NOP;
583 if (rel->res->id == -1)
584 *addr = SVGA_3D_CMD_NOP;
590 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
591 struct vmw_sw_context *sw_context,
592 SVGA3dCmdHeader *header)
597 static int vmw_cmd_ok(struct vmw_private *dev_priv,
598 struct vmw_sw_context *sw_context,
599 SVGA3dCmdHeader *header)
605 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
608 * @sw_context: Pointer to the software context.
610 * Note that since vmware's command submission currently is protected by the
611 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
612 * only a single thread at once will attempt this.
614 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
618 ret = vmw_validation_res_reserve(sw_context->ctx, true);
622 if (sw_context->dx_query_mob) {
623 struct vmw_buffer_object *expected_dx_query_mob;
625 expected_dx_query_mob =
626 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
627 if (expected_dx_query_mob &&
628 expected_dx_query_mob != sw_context->dx_query_mob) {
637 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
638 * resource validate list unless it's already there.
640 * @dev_priv: Pointer to a device private structure.
641 * @sw_context: Pointer to the software context.
642 * @res_type: Resource type.
643 * @dirty: Whether to change dirty status.
644 * @converter: User-space visisble type specific information.
645 * @id_loc: Pointer to the location in the command buffer currently being parsed
646 * from where the user-space resource id handle is located.
647 * @p_res: Pointer to pointer to resource validalidation node. Populated on
651 vmw_cmd_res_check(struct vmw_private *dev_priv,
652 struct vmw_sw_context *sw_context,
653 enum vmw_res_type res_type,
655 const struct vmw_user_resource_conv *converter,
657 struct vmw_resource **p_res)
659 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
660 struct vmw_resource *res;
666 if (*id_loc == SVGA3D_INVALID_ID) {
667 if (res_type == vmw_res_context) {
668 VMW_DEBUG_USER("Illegal context invalid id.\n");
674 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
677 vmw_validation_res_set_dirty(sw_context->ctx,
678 rcache->private, dirty);
680 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
682 ret = vmw_validation_preload_res(sw_context->ctx, size);
686 res = vmw_user_resource_noref_lookup_handle
687 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
689 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
690 (unsigned int) *id_loc);
694 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
695 if (unlikely(ret != 0))
698 if (rcache->valid && rcache->res == res) {
699 rcache->valid_handle = true;
700 rcache->handle = *id_loc;
704 ret = vmw_resource_relocation_add(sw_context, res,
705 vmw_ptr_diff(sw_context->buf_start,
715 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
717 * @ctx_res: context the query belongs to
719 * This function assumes binding_mutex is held.
721 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
723 struct vmw_private *dev_priv = ctx_res->dev_priv;
724 struct vmw_buffer_object *dx_query_mob;
725 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
727 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
729 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
732 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
736 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
737 cmd->header.size = sizeof(cmd->body);
738 cmd->body.cid = ctx_res->id;
739 cmd->body.mobid = dx_query_mob->base.resource->start;
740 vmw_cmd_commit(dev_priv, sizeof(*cmd));
742 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
748 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
751 * @sw_context: Pointer to the software context.
753 * Rebind context binding points that have been scrubbed because of eviction.
755 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
757 struct vmw_ctx_validation_info *val;
760 list_for_each_entry(val, &sw_context->ctx_list, head) {
761 ret = vmw_binding_rebind_all(val->cur);
762 if (unlikely(ret != 0)) {
763 if (ret != -ERESTARTSYS)
764 VMW_DEBUG_USER("Failed to rebind context.\n");
768 ret = vmw_rebind_all_dx_query(val->ctx);
770 VMW_DEBUG_USER("Failed to rebind queries.\n");
779 * vmw_view_bindings_add - Add an array of view bindings to a context binding
782 * @sw_context: The execbuf state used for this command.
783 * @view_type: View type for the bindings.
784 * @binding_type: Binding type for the bindings.
785 * @shader_slot: The shader slot to user for the bindings.
786 * @view_ids: Array of view ids to be bound.
787 * @num_views: Number of view ids in @view_ids.
788 * @first_slot: The binding slot to be used for the first view id in @view_ids.
790 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
791 enum vmw_view_type view_type,
792 enum vmw_ctx_binding_type binding_type,
794 uint32 view_ids[], u32 num_views,
797 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
803 for (i = 0; i < num_views; ++i) {
804 struct vmw_ctx_bindinfo_view binding;
805 struct vmw_resource *view = NULL;
807 if (view_ids[i] != SVGA3D_INVALID_ID) {
808 view = vmw_view_id_val_add(sw_context, view_type,
811 VMW_DEBUG_USER("View not found.\n");
812 return PTR_ERR(view);
815 binding.bi.ctx = ctx_node->ctx;
816 binding.bi.res = view;
817 binding.bi.bt = binding_type;
818 binding.shader_slot = shader_slot;
819 binding.slot = first_slot + i;
820 vmw_binding_add(ctx_node->staged, &binding.bi,
821 shader_slot, binding.slot);
828 * vmw_cmd_cid_check - Check a command header for valid context information.
830 * @dev_priv: Pointer to a device private structure.
831 * @sw_context: Pointer to the software context.
832 * @header: A command header with an embedded user-space context handle.
834 * Convenience function: Call vmw_cmd_res_check with the user-space context
835 * handle embedded in @header.
837 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
838 struct vmw_sw_context *sw_context,
839 SVGA3dCmdHeader *header)
841 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
842 container_of(header, typeof(*cmd), header);
844 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
845 VMW_RES_DIRTY_SET, user_context_converter,
850 * vmw_execbuf_info_from_res - Get the private validation metadata for a
851 * recently validated resource
853 * @sw_context: Pointer to the command submission context
856 * The resource pointed to by @res needs to be present in the command submission
857 * context's resource cache and hence the last resource of that type to be
858 * processed by the validation code.
860 * Return: a pointer to the private metadata of the resource, or NULL if it
863 static struct vmw_ctx_validation_info *
864 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
865 struct vmw_resource *res)
867 struct vmw_res_cache_entry *rcache =
868 &sw_context->res_cache[vmw_res_type(res)];
870 if (rcache->valid && rcache->res == res)
871 return rcache->private;
877 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
878 struct vmw_sw_context *sw_context,
879 SVGA3dCmdHeader *header)
881 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
882 struct vmw_resource *ctx;
883 struct vmw_resource *res;
886 cmd = container_of(header, typeof(*cmd), header);
888 if (cmd->body.type >= SVGA3D_RT_MAX) {
889 VMW_DEBUG_USER("Illegal render target type %u.\n",
890 (unsigned int) cmd->body.type);
894 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
895 VMW_RES_DIRTY_SET, user_context_converter,
896 &cmd->body.cid, &ctx);
897 if (unlikely(ret != 0))
900 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
901 VMW_RES_DIRTY_SET, user_surface_converter,
902 &cmd->body.target.sid, &res);
906 if (dev_priv->has_mob) {
907 struct vmw_ctx_bindinfo_view binding;
908 struct vmw_ctx_validation_info *node;
910 node = vmw_execbuf_info_from_res(sw_context, ctx);
914 binding.bi.ctx = ctx;
915 binding.bi.res = res;
916 binding.bi.bt = vmw_ctx_binding_rt;
917 binding.slot = cmd->body.type;
918 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
924 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
925 struct vmw_sw_context *sw_context,
926 SVGA3dCmdHeader *header)
928 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
931 cmd = container_of(header, typeof(*cmd), header);
933 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
934 VMW_RES_DIRTY_NONE, user_surface_converter,
935 &cmd->body.src.sid, NULL);
939 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
940 VMW_RES_DIRTY_SET, user_surface_converter,
941 &cmd->body.dest.sid, NULL);
944 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
945 struct vmw_sw_context *sw_context,
946 SVGA3dCmdHeader *header)
948 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
951 cmd = container_of(header, typeof(*cmd), header);
952 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
953 VMW_RES_DIRTY_NONE, user_surface_converter,
954 &cmd->body.src, NULL);
958 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
959 VMW_RES_DIRTY_SET, user_surface_converter,
960 &cmd->body.dest, NULL);
963 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
964 struct vmw_sw_context *sw_context,
965 SVGA3dCmdHeader *header)
967 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
970 cmd = container_of(header, typeof(*cmd), header);
971 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
972 VMW_RES_DIRTY_NONE, user_surface_converter,
973 &cmd->body.srcSid, NULL);
977 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
978 VMW_RES_DIRTY_SET, user_surface_converter,
979 &cmd->body.dstSid, NULL);
982 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
983 struct vmw_sw_context *sw_context,
984 SVGA3dCmdHeader *header)
986 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
989 cmd = container_of(header, typeof(*cmd), header);
990 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
991 VMW_RES_DIRTY_NONE, user_surface_converter,
992 &cmd->body.src.sid, NULL);
993 if (unlikely(ret != 0))
996 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
997 VMW_RES_DIRTY_SET, user_surface_converter,
998 &cmd->body.dest.sid, NULL);
1001 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1002 struct vmw_sw_context *sw_context,
1003 SVGA3dCmdHeader *header)
1005 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1006 container_of(header, typeof(*cmd), header);
1008 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1009 VMW_RES_DIRTY_NONE, user_surface_converter,
1010 &cmd->body.srcImage.sid, NULL);
1013 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1014 struct vmw_sw_context *sw_context,
1015 SVGA3dCmdHeader *header)
1017 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1018 container_of(header, typeof(*cmd), header);
1020 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1021 VMW_RES_DIRTY_NONE, user_surface_converter,
1022 &cmd->body.sid, NULL);
1026 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1028 * @dev_priv: The device private structure.
1029 * @new_query_bo: The new buffer holding query results.
1030 * @sw_context: The software context used for this command submission.
1032 * This function checks whether @new_query_bo is suitable for holding query
1033 * results, and if another buffer currently is pinned for query results. If so,
1034 * the function prepares the state of @sw_context for switching pinned buffers
1035 * after successful submission of the current command batch.
1037 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1038 struct vmw_buffer_object *new_query_bo,
1039 struct vmw_sw_context *sw_context)
1041 struct vmw_res_cache_entry *ctx_entry =
1042 &sw_context->res_cache[vmw_res_context];
1045 BUG_ON(!ctx_entry->valid);
1046 sw_context->last_query_ctx = ctx_entry->res;
1048 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1050 if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
1051 VMW_DEBUG_USER("Query buffer too large.\n");
1055 if (unlikely(sw_context->cur_query_bo != NULL)) {
1056 sw_context->needs_post_query_barrier = true;
1057 ret = vmw_validation_add_bo(sw_context->ctx,
1058 sw_context->cur_query_bo,
1059 dev_priv->has_mob, false);
1060 if (unlikely(ret != 0))
1063 sw_context->cur_query_bo = new_query_bo;
1065 ret = vmw_validation_add_bo(sw_context->ctx,
1066 dev_priv->dummy_query_bo,
1067 dev_priv->has_mob, false);
1068 if (unlikely(ret != 0))
1076 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1078 * @dev_priv: The device private structure.
1079 * @sw_context: The software context used for this command submission batch.
1081 * This function will check if we're switching query buffers, and will then,
1082 * issue a dummy occlusion query wait used as a query barrier. When the fence
1083 * object following that query wait has signaled, we are sure that all preceding
1084 * queries have finished, and the old query buffer can be unpinned. However,
1085 * since both the new query buffer and the old one are fenced with that fence,
1086 * we can do an asynchronus unpin now, and be sure that the old query buffer
1087 * won't be moved until the fence has signaled.
1089 * As mentioned above, both the new - and old query buffers need to be fenced
1090 * using a sequence emitted *after* calling this function.
1092 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1093 struct vmw_sw_context *sw_context)
1096 * The validate list should still hold references to all
1099 if (sw_context->needs_post_query_barrier) {
1100 struct vmw_res_cache_entry *ctx_entry =
1101 &sw_context->res_cache[vmw_res_context];
1102 struct vmw_resource *ctx;
1105 BUG_ON(!ctx_entry->valid);
1106 ctx = ctx_entry->res;
1108 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1110 if (unlikely(ret != 0))
1111 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1114 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1115 if (dev_priv->pinned_bo) {
1116 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1117 vmw_bo_unreference(&dev_priv->pinned_bo);
1120 if (!sw_context->needs_post_query_barrier) {
1121 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1124 * We pin also the dummy_query_bo buffer so that we
1125 * don't need to validate it when emitting dummy queries
1126 * in context destroy paths.
1128 if (!dev_priv->dummy_query_bo_pinned) {
1129 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1131 dev_priv->dummy_query_bo_pinned = true;
1134 BUG_ON(sw_context->last_query_ctx == NULL);
1135 dev_priv->query_cid = sw_context->last_query_ctx->id;
1136 dev_priv->query_cid_valid = true;
1137 dev_priv->pinned_bo =
1138 vmw_bo_reference(sw_context->cur_query_bo);
1144 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1147 * @dev_priv: Pointer to a device private structure.
1148 * @sw_context: The software context used for this command batch validation.
1149 * @id: Pointer to the user-space handle to be translated.
1150 * @vmw_bo_p: Points to a location that, on successful return will carry a
1151 * non-reference-counted pointer to the buffer object identified by the
1152 * user-space handle in @id.
1154 * This function saves information needed to translate a user-space buffer
1155 * handle to a MOB id. The translation does not take place immediately, but
1156 * during a call to vmw_apply_relocations().
1158 * This function builds a relocation list and a list of buffers to validate. The
1159 * former needs to be freed using either vmw_apply_relocations() or
1160 * vmw_free_relocations(). The latter needs to be freed using
1161 * vmw_clear_validations.
1163 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1164 struct vmw_sw_context *sw_context,
1166 struct vmw_buffer_object **vmw_bo_p)
1168 struct vmw_buffer_object *vmw_bo;
1169 uint32_t handle = *id;
1170 struct vmw_relocation *reloc;
1173 vmw_validation_preload_bo(sw_context->ctx);
1174 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1175 if (IS_ERR(vmw_bo)) {
1176 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1177 return PTR_ERR(vmw_bo);
1180 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1181 vmw_user_bo_noref_release();
1182 if (unlikely(ret != 0))
1185 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1189 reloc->mob_loc = id;
1190 reloc->vbo = vmw_bo;
1193 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1199 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1200 * to a valid SVGAGuestPtr
1202 * @dev_priv: Pointer to a device private structure.
1203 * @sw_context: The software context used for this command batch validation.
1204 * @ptr: Pointer to the user-space handle to be translated.
1205 * @vmw_bo_p: Points to a location that, on successful return will carry a
1206 * non-reference-counted pointer to the DMA buffer identified by the user-space
1209 * This function saves information needed to translate a user-space buffer
1210 * handle to a valid SVGAGuestPtr. The translation does not take place
1211 * immediately, but during a call to vmw_apply_relocations().
1213 * This function builds a relocation list and a list of buffers to validate.
1214 * The former needs to be freed using either vmw_apply_relocations() or
1215 * vmw_free_relocations(). The latter needs to be freed using
1216 * vmw_clear_validations.
1218 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1219 struct vmw_sw_context *sw_context,
1221 struct vmw_buffer_object **vmw_bo_p)
1223 struct vmw_buffer_object *vmw_bo;
1224 uint32_t handle = ptr->gmrId;
1225 struct vmw_relocation *reloc;
1228 vmw_validation_preload_bo(sw_context->ctx);
1229 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1230 if (IS_ERR(vmw_bo)) {
1231 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1232 return PTR_ERR(vmw_bo);
1235 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1236 vmw_user_bo_noref_release();
1237 if (unlikely(ret != 0))
1240 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1244 reloc->location = ptr;
1245 reloc->vbo = vmw_bo;
1247 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1253 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1255 * @dev_priv: Pointer to a device private struct.
1256 * @sw_context: The software context used for this command submission.
1257 * @header: Pointer to the command header in the command stream.
1259 * This function adds the new query into the query COTABLE
1261 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1262 struct vmw_sw_context *sw_context,
1263 SVGA3dCmdHeader *header)
1265 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1266 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1267 struct vmw_resource *cotable_res;
1273 cmd = container_of(header, typeof(*cmd), header);
1275 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1276 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1279 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1280 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1286 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1288 * @dev_priv: Pointer to a device private struct.
1289 * @sw_context: The software context used for this command submission.
1290 * @header: Pointer to the command header in the command stream.
1292 * The query bind operation will eventually associate the query ID with its
1293 * backing MOB. In this function, we take the user mode MOB ID and use
1294 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1296 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1297 struct vmw_sw_context *sw_context,
1298 SVGA3dCmdHeader *header)
1300 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1301 struct vmw_buffer_object *vmw_bo;
1304 cmd = container_of(header, typeof(*cmd), header);
1307 * Look up the buffer pointed to by q.mobid, put it on the relocation
1308 * list so its kernel mode MOB ID can be filled in later
1310 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1316 sw_context->dx_query_mob = vmw_bo;
1317 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1322 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1324 * @dev_priv: Pointer to a device private struct.
1325 * @sw_context: The software context used for this command submission.
1326 * @header: Pointer to the command header in the command stream.
1328 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1329 struct vmw_sw_context *sw_context,
1330 SVGA3dCmdHeader *header)
1332 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1333 container_of(header, typeof(*cmd), header);
1335 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1336 VMW_RES_DIRTY_SET, user_context_converter,
1337 &cmd->body.cid, NULL);
1341 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1343 * @dev_priv: Pointer to a device private struct.
1344 * @sw_context: The software context used for this command submission.
1345 * @header: Pointer to the command header in the command stream.
1347 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1348 struct vmw_sw_context *sw_context,
1349 SVGA3dCmdHeader *header)
1351 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1352 container_of(header, typeof(*cmd), header);
1354 if (unlikely(dev_priv->has_mob)) {
1355 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1357 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1359 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1360 gb_cmd.header.size = cmd->header.size;
1361 gb_cmd.body.cid = cmd->body.cid;
1362 gb_cmd.body.type = cmd->body.type;
1364 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1365 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1368 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1369 VMW_RES_DIRTY_SET, user_context_converter,
1370 &cmd->body.cid, NULL);
1374 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1376 * @dev_priv: Pointer to a device private struct.
1377 * @sw_context: The software context used for this command submission.
1378 * @header: Pointer to the command header in the command stream.
1380 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1381 struct vmw_sw_context *sw_context,
1382 SVGA3dCmdHeader *header)
1384 struct vmw_buffer_object *vmw_bo;
1385 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1388 cmd = container_of(header, typeof(*cmd), header);
1389 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1390 if (unlikely(ret != 0))
1393 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1395 if (unlikely(ret != 0))
1398 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1404 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1406 * @dev_priv: Pointer to a device private struct.
1407 * @sw_context: The software context used for this command submission.
1408 * @header: Pointer to the command header in the command stream.
1410 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1411 struct vmw_sw_context *sw_context,
1412 SVGA3dCmdHeader *header)
1414 struct vmw_buffer_object *vmw_bo;
1415 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1418 cmd = container_of(header, typeof(*cmd), header);
1419 if (dev_priv->has_mob) {
1420 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1422 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1424 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1425 gb_cmd.header.size = cmd->header.size;
1426 gb_cmd.body.cid = cmd->body.cid;
1427 gb_cmd.body.type = cmd->body.type;
1428 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1429 gb_cmd.body.offset = cmd->body.guestResult.offset;
1431 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1432 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1435 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1436 if (unlikely(ret != 0))
1439 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1440 &cmd->body.guestResult, &vmw_bo);
1441 if (unlikely(ret != 0))
1444 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1450 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1452 * @dev_priv: Pointer to a device private struct.
1453 * @sw_context: The software context used for this command submission.
1454 * @header: Pointer to the command header in the command stream.
1456 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1457 struct vmw_sw_context *sw_context,
1458 SVGA3dCmdHeader *header)
1460 struct vmw_buffer_object *vmw_bo;
1461 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1464 cmd = container_of(header, typeof(*cmd), header);
1465 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1466 if (unlikely(ret != 0))
1469 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1471 if (unlikely(ret != 0))
1478 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1480 * @dev_priv: Pointer to a device private struct.
1481 * @sw_context: The software context used for this command submission.
1482 * @header: Pointer to the command header in the command stream.
1484 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1485 struct vmw_sw_context *sw_context,
1486 SVGA3dCmdHeader *header)
1488 struct vmw_buffer_object *vmw_bo;
1489 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1492 cmd = container_of(header, typeof(*cmd), header);
1493 if (dev_priv->has_mob) {
1494 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1496 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1498 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1499 gb_cmd.header.size = cmd->header.size;
1500 gb_cmd.body.cid = cmd->body.cid;
1501 gb_cmd.body.type = cmd->body.type;
1502 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1503 gb_cmd.body.offset = cmd->body.guestResult.offset;
1505 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1506 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1509 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1510 if (unlikely(ret != 0))
1513 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1514 &cmd->body.guestResult, &vmw_bo);
1515 if (unlikely(ret != 0))
1521 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1522 struct vmw_sw_context *sw_context,
1523 SVGA3dCmdHeader *header)
1525 struct vmw_buffer_object *vmw_bo = NULL;
1526 struct vmw_surface *srf = NULL;
1527 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1529 SVGA3dCmdSurfaceDMASuffix *suffix;
1533 cmd = container_of(header, typeof(*cmd), header);
1534 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1535 header->size - sizeof(*suffix));
1537 /* Make sure device and verifier stays in sync. */
1538 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1539 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1543 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1544 &cmd->body.guest.ptr, &vmw_bo);
1545 if (unlikely(ret != 0))
1548 /* Make sure DMA doesn't cross BO boundaries. */
1549 bo_size = vmw_bo->base.base.size;
1550 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1551 VMW_DEBUG_USER("Invalid DMA offset.\n");
1555 bo_size -= cmd->body.guest.ptr.offset;
1556 if (unlikely(suffix->maximumOffset > bo_size))
1557 suffix->maximumOffset = bo_size;
1559 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1560 VMW_RES_DIRTY_SET : 0;
1561 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1562 dirty, user_surface_converter,
1563 &cmd->body.host.sid, NULL);
1564 if (unlikely(ret != 0)) {
1565 if (unlikely(ret != -ERESTARTSYS))
1566 VMW_DEBUG_USER("could not find surface for DMA.\n");
1570 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1572 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1577 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1578 struct vmw_sw_context *sw_context,
1579 SVGA3dCmdHeader *header)
1581 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1582 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1583 (unsigned long)header + sizeof(*cmd));
1584 SVGA3dPrimitiveRange *range;
1589 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1590 if (unlikely(ret != 0))
1593 cmd = container_of(header, typeof(*cmd), header);
1594 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1596 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1597 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1601 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1602 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1604 user_surface_converter,
1605 &decl->array.surfaceId, NULL);
1606 if (unlikely(ret != 0))
1610 maxnum = (header->size - sizeof(cmd->body) -
1611 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1612 if (unlikely(cmd->body.numRanges > maxnum)) {
1613 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1617 range = (SVGA3dPrimitiveRange *) decl;
1618 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1619 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1621 user_surface_converter,
1622 &range->indexArray.surfaceId, NULL);
1623 if (unlikely(ret != 0))
1629 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1630 struct vmw_sw_context *sw_context,
1631 SVGA3dCmdHeader *header)
1633 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1634 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1635 ((unsigned long) header + header->size + sizeof(header));
1636 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1637 ((unsigned long) header + sizeof(*cmd));
1638 struct vmw_resource *ctx;
1639 struct vmw_resource *res;
1642 cmd = container_of(header, typeof(*cmd), header);
1644 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1645 VMW_RES_DIRTY_SET, user_context_converter,
1646 &cmd->body.cid, &ctx);
1647 if (unlikely(ret != 0))
1650 for (; cur_state < last_state; ++cur_state) {
1651 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1654 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1655 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1656 (unsigned int) cur_state->stage);
1660 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1662 user_surface_converter,
1663 &cur_state->value, &res);
1664 if (unlikely(ret != 0))
1667 if (dev_priv->has_mob) {
1668 struct vmw_ctx_bindinfo_tex binding;
1669 struct vmw_ctx_validation_info *node;
1671 node = vmw_execbuf_info_from_res(sw_context, ctx);
1675 binding.bi.ctx = ctx;
1676 binding.bi.res = res;
1677 binding.bi.bt = vmw_ctx_binding_tex;
1678 binding.texture_stage = cur_state->stage;
1679 vmw_binding_add(node->staged, &binding.bi, 0,
1680 binding.texture_stage);
1687 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1688 struct vmw_sw_context *sw_context,
1691 struct vmw_buffer_object *vmw_bo;
1695 SVGAFifoCmdDefineGMRFB body;
1698 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1703 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1706 * @dev_priv: Pointer to a device private struct.
1707 * @sw_context: The software context being used for this batch.
1708 * @res: Pointer to the resource.
1709 * @buf_id: Pointer to the user-space backup buffer handle in the command
1711 * @backup_offset: Offset of backup into MOB.
1713 * This function prepares for registering a switch of backup buffers in the
1714 * resource metadata just prior to unreserving. It's basically a wrapper around
1715 * vmw_cmd_res_switch_backup with a different interface.
1717 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1718 struct vmw_sw_context *sw_context,
1719 struct vmw_resource *res, uint32_t *buf_id,
1720 unsigned long backup_offset)
1722 struct vmw_buffer_object *vbo;
1726 info = vmw_execbuf_info_from_res(sw_context, res);
1730 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1734 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1740 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1742 * @dev_priv: Pointer to a device private struct.
1743 * @sw_context: The software context being used for this batch.
1744 * @res_type: The resource type.
1745 * @converter: Information about user-space binding for this resource type.
1746 * @res_id: Pointer to the user-space resource handle in the command stream.
1747 * @buf_id: Pointer to the user-space backup buffer handle in the command
1749 * @backup_offset: Offset of backup into MOB.
1751 * This function prepares for registering a switch of backup buffers in the
1752 * resource metadata just prior to unreserving. It's basically a wrapper around
1753 * vmw_cmd_res_switch_backup with a different interface.
1755 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1756 struct vmw_sw_context *sw_context,
1757 enum vmw_res_type res_type,
1758 const struct vmw_user_resource_conv
1759 *converter, uint32_t *res_id, uint32_t *buf_id,
1760 unsigned long backup_offset)
1762 struct vmw_resource *res;
1765 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1766 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1770 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1775 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1777 * @dev_priv: Pointer to a device private struct.
1778 * @sw_context: The software context being used for this batch.
1779 * @header: Pointer to the command header in the command stream.
1781 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1782 struct vmw_sw_context *sw_context,
1783 SVGA3dCmdHeader *header)
1785 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1786 container_of(header, typeof(*cmd), header);
1788 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1789 user_surface_converter, &cmd->body.sid,
1790 &cmd->body.mobid, 0);
1794 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1796 * @dev_priv: Pointer to a device private struct.
1797 * @sw_context: The software context being used for this batch.
1798 * @header: Pointer to the command header in the command stream.
1800 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1801 struct vmw_sw_context *sw_context,
1802 SVGA3dCmdHeader *header)
1804 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1805 container_of(header, typeof(*cmd), header);
1807 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1808 VMW_RES_DIRTY_NONE, user_surface_converter,
1809 &cmd->body.image.sid, NULL);
1813 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1815 * @dev_priv: Pointer to a device private struct.
1816 * @sw_context: The software context being used for this batch.
1817 * @header: Pointer to the command header in the command stream.
1819 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1820 struct vmw_sw_context *sw_context,
1821 SVGA3dCmdHeader *header)
1823 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1824 container_of(header, typeof(*cmd), header);
1826 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1827 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1828 &cmd->body.sid, NULL);
1832 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1834 * @dev_priv: Pointer to a device private struct.
1835 * @sw_context: The software context being used for this batch.
1836 * @header: Pointer to the command header in the command stream.
1838 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1839 struct vmw_sw_context *sw_context,
1840 SVGA3dCmdHeader *header)
1842 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1843 container_of(header, typeof(*cmd), header);
1845 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1846 VMW_RES_DIRTY_NONE, user_surface_converter,
1847 &cmd->body.image.sid, NULL);
1851 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1854 * @dev_priv: Pointer to a device private struct.
1855 * @sw_context: The software context being used for this batch.
1856 * @header: Pointer to the command header in the command stream.
1858 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1859 struct vmw_sw_context *sw_context,
1860 SVGA3dCmdHeader *header)
1862 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1863 container_of(header, typeof(*cmd), header);
1865 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1866 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1867 &cmd->body.sid, NULL);
1871 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1874 * @dev_priv: Pointer to a device private struct.
1875 * @sw_context: The software context being used for this batch.
1876 * @header: Pointer to the command header in the command stream.
1878 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1879 struct vmw_sw_context *sw_context,
1880 SVGA3dCmdHeader *header)
1882 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1883 container_of(header, typeof(*cmd), header);
1885 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1886 VMW_RES_DIRTY_NONE, user_surface_converter,
1887 &cmd->body.image.sid, NULL);
1891 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1894 * @dev_priv: Pointer to a device private struct.
1895 * @sw_context: The software context being used for this batch.
1896 * @header: Pointer to the command header in the command stream.
1898 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1899 struct vmw_sw_context *sw_context,
1900 SVGA3dCmdHeader *header)
1902 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1903 container_of(header, typeof(*cmd), header);
1905 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1906 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1907 &cmd->body.sid, NULL);
1911 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1913 * @dev_priv: Pointer to a device private struct.
1914 * @sw_context: The software context being used for this batch.
1915 * @header: Pointer to the command header in the command stream.
1917 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1918 struct vmw_sw_context *sw_context,
1919 SVGA3dCmdHeader *header)
1921 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1924 struct vmw_resource *ctx;
1926 cmd = container_of(header, typeof(*cmd), header);
1928 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1929 VMW_RES_DIRTY_SET, user_context_converter,
1930 &cmd->body.cid, &ctx);
1931 if (unlikely(ret != 0))
1934 if (unlikely(!dev_priv->has_mob))
1937 size = cmd->header.size - sizeof(cmd->body);
1938 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1939 cmd->body.shid, cmd + 1, cmd->body.type,
1940 size, &sw_context->staged_cmd_res);
1941 if (unlikely(ret != 0))
1944 return vmw_resource_relocation_add(sw_context, NULL,
1945 vmw_ptr_diff(sw_context->buf_start,
1951 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1953 * @dev_priv: Pointer to a device private struct.
1954 * @sw_context: The software context being used for this batch.
1955 * @header: Pointer to the command header in the command stream.
1957 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1958 struct vmw_sw_context *sw_context,
1959 SVGA3dCmdHeader *header)
1961 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1963 struct vmw_resource *ctx;
1965 cmd = container_of(header, typeof(*cmd), header);
1967 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1968 VMW_RES_DIRTY_SET, user_context_converter,
1969 &cmd->body.cid, &ctx);
1970 if (unlikely(ret != 0))
1973 if (unlikely(!dev_priv->has_mob))
1976 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1977 cmd->body.type, &sw_context->staged_cmd_res);
1978 if (unlikely(ret != 0))
1981 return vmw_resource_relocation_add(sw_context, NULL,
1982 vmw_ptr_diff(sw_context->buf_start,
1988 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1990 * @dev_priv: Pointer to a device private struct.
1991 * @sw_context: The software context being used for this batch.
1992 * @header: Pointer to the command header in the command stream.
1994 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1995 struct vmw_sw_context *sw_context,
1996 SVGA3dCmdHeader *header)
1998 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1999 struct vmw_ctx_bindinfo_shader binding;
2000 struct vmw_resource *ctx, *res = NULL;
2001 struct vmw_ctx_validation_info *ctx_info;
2004 cmd = container_of(header, typeof(*cmd), header);
2006 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2007 VMW_DEBUG_USER("Illegal shader type %u.\n",
2008 (unsigned int) cmd->body.type);
2012 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2013 VMW_RES_DIRTY_SET, user_context_converter,
2014 &cmd->body.cid, &ctx);
2015 if (unlikely(ret != 0))
2018 if (!dev_priv->has_mob)
2021 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2023 * This is the compat shader path - Per device guest-backed
2024 * shaders, but user-space thinks it's per context host-
2027 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2028 cmd->body.shid, cmd->body.type);
2030 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2031 VMW_RES_DIRTY_NONE);
2032 if (unlikely(ret != 0))
2035 ret = vmw_resource_relocation_add
2037 vmw_ptr_diff(sw_context->buf_start,
2039 vmw_res_rel_normal);
2040 if (unlikely(ret != 0))
2045 if (IS_ERR_OR_NULL(res)) {
2046 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2048 user_shader_converter, &cmd->body.shid,
2050 if (unlikely(ret != 0))
2054 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2058 binding.bi.ctx = ctx;
2059 binding.bi.res = res;
2060 binding.bi.bt = vmw_ctx_binding_shader;
2061 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2062 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2068 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2070 * @dev_priv: Pointer to a device private struct.
2071 * @sw_context: The software context being used for this batch.
2072 * @header: Pointer to the command header in the command stream.
2074 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2075 struct vmw_sw_context *sw_context,
2076 SVGA3dCmdHeader *header)
2078 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2081 cmd = container_of(header, typeof(*cmd), header);
2083 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2084 VMW_RES_DIRTY_SET, user_context_converter,
2085 &cmd->body.cid, NULL);
2086 if (unlikely(ret != 0))
2089 if (dev_priv->has_mob)
2090 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2096 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2098 * @dev_priv: Pointer to a device private struct.
2099 * @sw_context: The software context being used for this batch.
2100 * @header: Pointer to the command header in the command stream.
2102 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2103 struct vmw_sw_context *sw_context,
2104 SVGA3dCmdHeader *header)
2106 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2107 container_of(header, typeof(*cmd), header);
2109 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2110 user_shader_converter, &cmd->body.shid,
2111 &cmd->body.mobid, cmd->body.offsetInBytes);
2115 * vmw_cmd_dx_set_single_constant_buffer - Validate
2116 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2118 * @dev_priv: Pointer to a device private struct.
2119 * @sw_context: The software context being used for this batch.
2120 * @header: Pointer to the command header in the command stream.
2123 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2124 struct vmw_sw_context *sw_context,
2125 SVGA3dCmdHeader *header)
2127 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2128 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2129 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2131 struct vmw_resource *res = NULL;
2132 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2133 struct vmw_ctx_bindinfo_cb binding;
2139 cmd = container_of(header, typeof(*cmd), header);
2140 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2141 VMW_RES_DIRTY_NONE, user_surface_converter,
2142 &cmd->body.sid, &res);
2143 if (unlikely(ret != 0))
2146 binding.bi.ctx = ctx_node->ctx;
2147 binding.bi.res = res;
2148 binding.bi.bt = vmw_ctx_binding_cb;
2149 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2150 binding.offset = cmd->body.offsetInBytes;
2151 binding.size = cmd->body.sizeInBytes;
2152 binding.slot = cmd->body.slot;
2154 if (binding.shader_slot >= max_shader_num ||
2155 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2156 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2157 (unsigned int) cmd->body.type,
2158 (unsigned int) binding.slot);
2162 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2169 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2172 * @dev_priv: Pointer to a device private struct.
2173 * @sw_context: The software context being used for this batch.
2174 * @header: Pointer to the command header in the command stream.
2176 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2177 struct vmw_sw_context *sw_context,
2178 SVGA3dCmdHeader *header)
2180 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2181 container_of(header, typeof(*cmd), header);
2182 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2183 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2185 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2186 sizeof(SVGA3dShaderResourceViewId);
2188 if ((u64) cmd->body.startView + (u64) num_sr_view >
2189 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2190 cmd->body.type >= max_allowed) {
2191 VMW_DEBUG_USER("Invalid shader binding.\n");
2195 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2197 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2198 (void *) &cmd[1], num_sr_view,
2199 cmd->body.startView);
2203 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2205 * @dev_priv: Pointer to a device private struct.
2206 * @sw_context: The software context being used for this batch.
2207 * @header: Pointer to the command header in the command stream.
2209 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2210 struct vmw_sw_context *sw_context,
2211 SVGA3dCmdHeader *header)
2213 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2214 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2215 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2216 struct vmw_resource *res = NULL;
2217 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2218 struct vmw_ctx_bindinfo_shader binding;
2224 cmd = container_of(header, typeof(*cmd), header);
2226 if (cmd->body.type >= max_allowed ||
2227 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2228 VMW_DEBUG_USER("Illegal shader type %u.\n",
2229 (unsigned int) cmd->body.type);
2233 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2234 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2236 VMW_DEBUG_USER("Could not find shader for binding.\n");
2237 return PTR_ERR(res);
2240 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2241 VMW_RES_DIRTY_NONE);
2246 binding.bi.ctx = ctx_node->ctx;
2247 binding.bi.res = res;
2248 binding.bi.bt = vmw_ctx_binding_dx_shader;
2249 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2251 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2257 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2260 * @dev_priv: Pointer to a device private struct.
2261 * @sw_context: The software context being used for this batch.
2262 * @header: Pointer to the command header in the command stream.
2264 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2265 struct vmw_sw_context *sw_context,
2266 SVGA3dCmdHeader *header)
2268 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2269 struct vmw_ctx_bindinfo_vb binding;
2270 struct vmw_resource *res;
2272 SVGA3dCmdHeader header;
2273 SVGA3dCmdDXSetVertexBuffers body;
2274 SVGA3dVertexBuffer buf[];
2281 cmd = container_of(header, typeof(*cmd), header);
2282 num = (cmd->header.size - sizeof(cmd->body)) /
2283 sizeof(SVGA3dVertexBuffer);
2284 if ((u64)num + (u64)cmd->body.startBuffer >
2285 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2286 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2290 for (i = 0; i < num; i++) {
2291 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2293 user_surface_converter,
2294 &cmd->buf[i].sid, &res);
2295 if (unlikely(ret != 0))
2298 binding.bi.ctx = ctx_node->ctx;
2299 binding.bi.bt = vmw_ctx_binding_vb;
2300 binding.bi.res = res;
2301 binding.offset = cmd->buf[i].offset;
2302 binding.stride = cmd->buf[i].stride;
2303 binding.slot = i + cmd->body.startBuffer;
2305 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2312 * vmw_cmd_dx_set_index_buffer - Validate
2313 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2315 * @dev_priv: Pointer to a device private struct.
2316 * @sw_context: The software context being used for this batch.
2317 * @header: Pointer to the command header in the command stream.
2319 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2320 struct vmw_sw_context *sw_context,
2321 SVGA3dCmdHeader *header)
2323 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2324 struct vmw_ctx_bindinfo_ib binding;
2325 struct vmw_resource *res;
2326 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2332 cmd = container_of(header, typeof(*cmd), header);
2333 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2334 VMW_RES_DIRTY_NONE, user_surface_converter,
2335 &cmd->body.sid, &res);
2336 if (unlikely(ret != 0))
2339 binding.bi.ctx = ctx_node->ctx;
2340 binding.bi.res = res;
2341 binding.bi.bt = vmw_ctx_binding_ib;
2342 binding.offset = cmd->body.offset;
2343 binding.format = cmd->body.format;
2345 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2351 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2354 * @dev_priv: Pointer to a device private struct.
2355 * @sw_context: The software context being used for this batch.
2356 * @header: Pointer to the command header in the command stream.
2358 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2359 struct vmw_sw_context *sw_context,
2360 SVGA3dCmdHeader *header)
2362 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2363 container_of(header, typeof(*cmd), header);
2364 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2365 sizeof(SVGA3dRenderTargetViewId);
2368 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2369 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2373 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2374 0, &cmd->body.depthStencilViewId, 1, 0);
2378 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2379 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2384 * vmw_cmd_dx_clear_rendertarget_view - Validate
2385 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2387 * @dev_priv: Pointer to a device private struct.
2388 * @sw_context: The software context being used for this batch.
2389 * @header: Pointer to the command header in the command stream.
2391 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2392 struct vmw_sw_context *sw_context,
2393 SVGA3dCmdHeader *header)
2395 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2396 container_of(header, typeof(*cmd), header);
2397 struct vmw_resource *ret;
2399 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2400 cmd->body.renderTargetViewId);
2402 return PTR_ERR_OR_ZERO(ret);
2406 * vmw_cmd_dx_clear_depthstencil_view - Validate
2407 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2409 * @dev_priv: Pointer to a device private struct.
2410 * @sw_context: The software context being used for this batch.
2411 * @header: Pointer to the command header in the command stream.
2413 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2414 struct vmw_sw_context *sw_context,
2415 SVGA3dCmdHeader *header)
2417 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2418 container_of(header, typeof(*cmd), header);
2419 struct vmw_resource *ret;
2421 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2422 cmd->body.depthStencilViewId);
2424 return PTR_ERR_OR_ZERO(ret);
2427 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2428 struct vmw_sw_context *sw_context,
2429 SVGA3dCmdHeader *header)
2431 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2432 struct vmw_resource *srf;
2433 struct vmw_resource *res;
2434 enum vmw_view_type view_type;
2437 * This is based on the fact that all affected define commands have the
2438 * same initial command body layout.
2441 SVGA3dCmdHeader header;
2449 view_type = vmw_view_cmd_to_type(header->id);
2450 if (view_type == vmw_view_max)
2453 cmd = container_of(header, typeof(*cmd), header);
2454 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2455 VMW_DEBUG_USER("Invalid surface id.\n");
2458 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2459 VMW_RES_DIRTY_NONE, user_surface_converter,
2461 if (unlikely(ret != 0))
2464 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2465 ret = vmw_cotable_notify(res, cmd->defined_id);
2466 if (unlikely(ret != 0))
2469 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2470 cmd->defined_id, header,
2471 header->size + sizeof(*header),
2472 &sw_context->staged_cmd_res);
2476 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2478 * @dev_priv: Pointer to a device private struct.
2479 * @sw_context: The software context being used for this batch.
2480 * @header: Pointer to the command header in the command stream.
2482 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2483 struct vmw_sw_context *sw_context,
2484 SVGA3dCmdHeader *header)
2486 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2487 struct vmw_ctx_bindinfo_so_target binding;
2488 struct vmw_resource *res;
2490 SVGA3dCmdHeader header;
2491 SVGA3dCmdDXSetSOTargets body;
2492 SVGA3dSoTarget targets[];
2499 cmd = container_of(header, typeof(*cmd), header);
2500 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2502 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2503 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2507 for (i = 0; i < num; i++) {
2508 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2510 user_surface_converter,
2511 &cmd->targets[i].sid, &res);
2512 if (unlikely(ret != 0))
2515 binding.bi.ctx = ctx_node->ctx;
2516 binding.bi.res = res;
2517 binding.bi.bt = vmw_ctx_binding_so_target;
2518 binding.offset = cmd->targets[i].offset;
2519 binding.size = cmd->targets[i].sizeInBytes;
2522 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2528 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2529 struct vmw_sw_context *sw_context,
2530 SVGA3dCmdHeader *header)
2532 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2533 struct vmw_resource *res;
2535 * This is based on the fact that all affected define commands have
2536 * the same initial command body layout.
2539 SVGA3dCmdHeader header;
2542 enum vmw_so_type so_type;
2548 so_type = vmw_so_cmd_to_type(header->id);
2549 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2551 return PTR_ERR(res);
2552 cmd = container_of(header, typeof(*cmd), header);
2553 ret = vmw_cotable_notify(res, cmd->defined_id);
2559 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2562 * @dev_priv: Pointer to a device private struct.
2563 * @sw_context: The software context being used for this batch.
2564 * @header: Pointer to the command header in the command stream.
2566 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2567 struct vmw_sw_context *sw_context,
2568 SVGA3dCmdHeader *header)
2571 SVGA3dCmdHeader header;
2573 SVGA3dCmdDXReadbackSubResource r_body;
2574 SVGA3dCmdDXInvalidateSubResource i_body;
2575 SVGA3dCmdDXUpdateSubResource u_body;
2576 SVGA3dSurfaceId sid;
2580 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2581 offsetof(typeof(*cmd), sid));
2582 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2583 offsetof(typeof(*cmd), sid));
2584 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2585 offsetof(typeof(*cmd), sid));
2587 cmd = container_of(header, typeof(*cmd), header);
2588 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2589 VMW_RES_DIRTY_NONE, user_surface_converter,
2593 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2594 struct vmw_sw_context *sw_context,
2595 SVGA3dCmdHeader *header)
2597 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2606 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2607 * resource for removal.
2609 * @dev_priv: Pointer to a device private struct.
2610 * @sw_context: The software context being used for this batch.
2611 * @header: Pointer to the command header in the command stream.
2613 * Check that the view exists, and if it was not created using this command
2614 * batch, conditionally make this command a NOP.
2616 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2617 struct vmw_sw_context *sw_context,
2618 SVGA3dCmdHeader *header)
2620 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2622 SVGA3dCmdHeader header;
2623 union vmw_view_destroy body;
2624 } *cmd = container_of(header, typeof(*cmd), header);
2625 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2626 struct vmw_resource *view;
2632 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2633 &sw_context->staged_cmd_res, &view);
2638 * If the view wasn't created during this command batch, it might
2639 * have been removed due to a context swapout, so add a
2640 * relocation to conditionally make this command a NOP to avoid
2643 return vmw_resource_relocation_add(sw_context, view,
2644 vmw_ptr_diff(sw_context->buf_start,
2646 vmw_res_rel_cond_nop);
2650 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2652 * @dev_priv: Pointer to a device private struct.
2653 * @sw_context: The software context being used for this batch.
2654 * @header: Pointer to the command header in the command stream.
2656 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2657 struct vmw_sw_context *sw_context,
2658 SVGA3dCmdHeader *header)
2660 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2661 struct vmw_resource *res;
2662 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2663 container_of(header, typeof(*cmd), header);
2669 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2670 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2674 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2675 cmd->body.shaderId, cmd->body.type,
2676 &sw_context->staged_cmd_res);
2680 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2682 * @dev_priv: Pointer to a device private struct.
2683 * @sw_context: The software context being used for this batch.
2684 * @header: Pointer to the command header in the command stream.
2686 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2687 struct vmw_sw_context *sw_context,
2688 SVGA3dCmdHeader *header)
2690 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2691 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2692 container_of(header, typeof(*cmd), header);
2698 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2699 &sw_context->staged_cmd_res);
2705 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2707 * @dev_priv: Pointer to a device private struct.
2708 * @sw_context: The software context being used for this batch.
2709 * @header: Pointer to the command header in the command stream.
2711 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2712 struct vmw_sw_context *sw_context,
2713 SVGA3dCmdHeader *header)
2715 struct vmw_resource *ctx;
2716 struct vmw_resource *res;
2717 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2718 container_of(header, typeof(*cmd), header);
2721 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2722 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2724 user_context_converter, &cmd->body.cid,
2729 struct vmw_ctx_validation_info *ctx_node =
2730 VMW_GET_CTX_NODE(sw_context);
2735 ctx = ctx_node->ctx;
2738 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2740 VMW_DEBUG_USER("Could not find shader to bind.\n");
2741 return PTR_ERR(res);
2744 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2745 VMW_RES_DIRTY_NONE);
2747 VMW_DEBUG_USER("Error creating resource validation node.\n");
2751 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2753 cmd->body.offsetInBytes);
2757 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2759 * @dev_priv: Pointer to a device private struct.
2760 * @sw_context: The software context being used for this batch.
2761 * @header: Pointer to the command header in the command stream.
2763 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2764 struct vmw_sw_context *sw_context,
2765 SVGA3dCmdHeader *header)
2767 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2768 container_of(header, typeof(*cmd), header);
2769 struct vmw_resource *view;
2770 struct vmw_res_cache_entry *rcache;
2772 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2773 cmd->body.shaderResourceViewId);
2775 return PTR_ERR(view);
2778 * Normally the shader-resource view is not gpu-dirtying, but for
2779 * this particular command it is...
2780 * So mark the last looked-up surface, which is the surface
2781 * the view points to, gpu-dirty.
2783 rcache = &sw_context->res_cache[vmw_res_surface];
2784 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2790 * vmw_cmd_dx_transfer_from_buffer - Validate
2791 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2793 * @dev_priv: Pointer to a device private struct.
2794 * @sw_context: The software context being used for this batch.
2795 * @header: Pointer to the command header in the command stream.
2797 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2798 struct vmw_sw_context *sw_context,
2799 SVGA3dCmdHeader *header)
2801 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2802 container_of(header, typeof(*cmd), header);
2805 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2806 VMW_RES_DIRTY_NONE, user_surface_converter,
2807 &cmd->body.srcSid, NULL);
2811 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2812 VMW_RES_DIRTY_SET, user_surface_converter,
2813 &cmd->body.destSid, NULL);
2817 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2819 * @dev_priv: Pointer to a device private struct.
2820 * @sw_context: The software context being used for this batch.
2821 * @header: Pointer to the command header in the command stream.
2823 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2824 struct vmw_sw_context *sw_context,
2825 SVGA3dCmdHeader *header)
2827 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2828 container_of(header, typeof(*cmd), header);
2830 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2833 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2834 VMW_RES_DIRTY_SET, user_surface_converter,
2835 &cmd->body.surface.sid, NULL);
2838 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2839 struct vmw_sw_context *sw_context,
2840 SVGA3dCmdHeader *header)
2842 if (!has_sm5_context(dev_priv))
2848 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2849 struct vmw_sw_context *sw_context,
2850 SVGA3dCmdHeader *header)
2852 if (!has_sm5_context(dev_priv))
2855 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2858 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2859 struct vmw_sw_context *sw_context,
2860 SVGA3dCmdHeader *header)
2862 if (!has_sm5_context(dev_priv))
2865 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2868 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2869 struct vmw_sw_context *sw_context,
2870 SVGA3dCmdHeader *header)
2873 SVGA3dCmdHeader header;
2874 SVGA3dCmdDXClearUAViewUint body;
2875 } *cmd = container_of(header, typeof(*cmd), header);
2876 struct vmw_resource *ret;
2878 if (!has_sm5_context(dev_priv))
2881 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2882 cmd->body.uaViewId);
2884 return PTR_ERR_OR_ZERO(ret);
2887 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2888 struct vmw_sw_context *sw_context,
2889 SVGA3dCmdHeader *header)
2892 SVGA3dCmdHeader header;
2893 SVGA3dCmdDXClearUAViewFloat body;
2894 } *cmd = container_of(header, typeof(*cmd), header);
2895 struct vmw_resource *ret;
2897 if (!has_sm5_context(dev_priv))
2900 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2901 cmd->body.uaViewId);
2903 return PTR_ERR_OR_ZERO(ret);
2906 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2907 struct vmw_sw_context *sw_context,
2908 SVGA3dCmdHeader *header)
2911 SVGA3dCmdHeader header;
2912 SVGA3dCmdDXSetUAViews body;
2913 } *cmd = container_of(header, typeof(*cmd), header);
2914 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2915 sizeof(SVGA3dUAViewId);
2918 if (!has_sm5_context(dev_priv))
2921 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2922 VMW_DEBUG_USER("Invalid UAV binding.\n");
2926 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2927 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2932 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2933 cmd->body.uavSpliceIndex);
2938 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2939 struct vmw_sw_context *sw_context,
2940 SVGA3dCmdHeader *header)
2943 SVGA3dCmdHeader header;
2944 SVGA3dCmdDXSetCSUAViews body;
2945 } *cmd = container_of(header, typeof(*cmd), header);
2946 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2947 sizeof(SVGA3dUAViewId);
2950 if (!has_sm5_context(dev_priv))
2953 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2954 VMW_DEBUG_USER("Invalid UAV binding.\n");
2958 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2959 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2964 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2965 cmd->body.startIndex);
2970 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2971 struct vmw_sw_context *sw_context,
2972 SVGA3dCmdHeader *header)
2974 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2975 struct vmw_resource *res;
2977 SVGA3dCmdHeader header;
2978 SVGA3dCmdDXDefineStreamOutputWithMob body;
2979 } *cmd = container_of(header, typeof(*cmd), header);
2982 if (!has_sm5_context(dev_priv))
2986 DRM_ERROR("DX Context not set.\n");
2990 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2991 ret = vmw_cotable_notify(res, cmd->body.soid);
2995 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2997 &sw_context->staged_cmd_res);
3000 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3001 struct vmw_sw_context *sw_context,
3002 SVGA3dCmdHeader *header)
3004 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3005 struct vmw_resource *res;
3007 SVGA3dCmdHeader header;
3008 SVGA3dCmdDXDestroyStreamOutput body;
3009 } *cmd = container_of(header, typeof(*cmd), header);
3012 DRM_ERROR("DX Context not set.\n");
3017 * When device does not support SM5 then streamoutput with mob command is
3018 * not available to user-space. Simply return in this case.
3020 if (!has_sm5_context(dev_priv))
3024 * With SM5 capable device if lookup fails then user-space probably used
3025 * old streamoutput define command. Return without an error.
3027 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3032 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3033 &sw_context->staged_cmd_res);
3036 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3037 struct vmw_sw_context *sw_context,
3038 SVGA3dCmdHeader *header)
3040 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3041 struct vmw_resource *res;
3043 SVGA3dCmdHeader header;
3044 SVGA3dCmdDXBindStreamOutput body;
3045 } *cmd = container_of(header, typeof(*cmd), header);
3048 if (!has_sm5_context(dev_priv))
3052 DRM_ERROR("DX Context not set.\n");
3056 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3059 DRM_ERROR("Could not find streamoutput to bind.\n");
3060 return PTR_ERR(res);
3063 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3065 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3066 VMW_RES_DIRTY_NONE);
3068 DRM_ERROR("Error creating resource validation node.\n");
3072 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3074 cmd->body.offsetInBytes);
3077 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3078 struct vmw_sw_context *sw_context,
3079 SVGA3dCmdHeader *header)
3081 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3082 struct vmw_resource *res;
3083 struct vmw_ctx_bindinfo_so binding;
3085 SVGA3dCmdHeader header;
3086 SVGA3dCmdDXSetStreamOutput body;
3087 } *cmd = container_of(header, typeof(*cmd), header);
3091 DRM_ERROR("DX Context not set.\n");
3095 if (cmd->body.soid == SVGA3D_INVALID_ID)
3099 * When device does not support SM5 then streamoutput with mob command is
3100 * not available to user-space. Simply return in this case.
3102 if (!has_sm5_context(dev_priv))
3106 * With SM5 capable device if lookup fails then user-space probably used
3107 * old streamoutput define command. Return without an error.
3109 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3115 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3116 VMW_RES_DIRTY_NONE);
3118 DRM_ERROR("Error creating resource validation node.\n");
3122 binding.bi.ctx = ctx_node->ctx;
3123 binding.bi.res = res;
3124 binding.bi.bt = vmw_ctx_binding_so;
3125 binding.slot = 0; /* Only one SO set to context at a time. */
3127 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3133 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3134 struct vmw_sw_context *sw_context,
3135 SVGA3dCmdHeader *header)
3137 struct vmw_draw_indexed_instanced_indirect_cmd {
3138 SVGA3dCmdHeader header;
3139 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3140 } *cmd = container_of(header, typeof(*cmd), header);
3142 if (!has_sm5_context(dev_priv))
3145 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3146 VMW_RES_DIRTY_NONE, user_surface_converter,
3147 &cmd->body.argsBufferSid, NULL);
3150 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3151 struct vmw_sw_context *sw_context,
3152 SVGA3dCmdHeader *header)
3154 struct vmw_draw_instanced_indirect_cmd {
3155 SVGA3dCmdHeader header;
3156 SVGA3dCmdDXDrawInstancedIndirect body;
3157 } *cmd = container_of(header, typeof(*cmd), header);
3159 if (!has_sm5_context(dev_priv))
3162 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3163 VMW_RES_DIRTY_NONE, user_surface_converter,
3164 &cmd->body.argsBufferSid, NULL);
3167 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3168 struct vmw_sw_context *sw_context,
3169 SVGA3dCmdHeader *header)
3171 struct vmw_dispatch_indirect_cmd {
3172 SVGA3dCmdHeader header;
3173 SVGA3dCmdDXDispatchIndirect body;
3174 } *cmd = container_of(header, typeof(*cmd), header);
3176 if (!has_sm5_context(dev_priv))
3179 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3180 VMW_RES_DIRTY_NONE, user_surface_converter,
3181 &cmd->body.argsBufferSid, NULL);
3184 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3185 struct vmw_sw_context *sw_context,
3186 void *buf, uint32_t *size)
3188 uint32_t size_remaining = *size;
3191 cmd_id = ((uint32_t *)buf)[0];
3193 case SVGA_CMD_UPDATE:
3194 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3196 case SVGA_CMD_DEFINE_GMRFB:
3197 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3199 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3200 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3202 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3203 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3206 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3210 if (*size > size_remaining) {
3211 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3216 if (unlikely(!sw_context->kernel)) {
3217 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3221 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3222 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3227 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3228 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3229 false, false, false),
3230 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3231 false, false, false),
3232 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3233 true, false, false),
3234 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3235 true, false, false),
3236 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3237 true, false, false),
3238 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3239 false, false, false),
3240 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3241 false, false, false),
3242 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3243 true, false, false),
3244 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3245 true, false, false),
3246 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3247 true, false, false),
3248 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3249 &vmw_cmd_set_render_target_check, true, false, false),
3250 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3251 true, false, false),
3252 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3253 true, false, false),
3254 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3255 true, false, false),
3256 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3257 true, false, false),
3258 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3259 true, false, false),
3260 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3261 true, false, false),
3262 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3263 true, false, false),
3264 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3265 false, false, false),
3266 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3267 true, false, false),
3268 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3269 true, false, false),
3270 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3271 true, false, false),
3272 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3273 true, false, false),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3275 true, false, false),
3276 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3277 true, false, false),
3278 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3279 true, false, false),
3280 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3281 true, false, false),
3282 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3283 true, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3285 true, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3287 &vmw_cmd_blt_surf_screen_check, false, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3289 false, false, false),
3290 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3291 false, false, false),
3292 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3293 false, false, false),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3295 false, false, false),
3296 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3297 false, false, false),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3299 false, false, false),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3301 false, false, false),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3303 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3305 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3307 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3308 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3309 false, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3311 false, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3313 false, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3315 false, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3317 false, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3319 false, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3321 false, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3323 false, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3326 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3327 false, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3330 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3331 &vmw_cmd_update_gb_surface, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3333 &vmw_cmd_readback_gb_image, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3335 &vmw_cmd_readback_gb_surface, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3337 &vmw_cmd_invalidate_gb_image, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3339 &vmw_cmd_invalidate_gb_surface, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3345 false, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3347 false, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3349 false, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3351 false, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3354 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3355 false, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3357 false, false, false),
3358 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3360 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3362 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3364 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3366 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3368 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3369 false, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3371 false, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3373 false, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3375 false, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3377 false, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3379 false, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3381 false, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3383 false, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3385 false, false, true),
3386 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3387 false, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3390 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3391 false, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3393 false, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3395 false, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3397 false, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3401 false, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3403 false, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3405 false, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3407 false, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3409 false, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3411 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3413 &vmw_cmd_dx_set_shader_res, true, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3416 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3418 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3420 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3425 &vmw_cmd_dx_cid_check, true, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3429 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3431 &vmw_cmd_dx_set_index_buffer, true, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3433 &vmw_cmd_dx_set_rendertargets, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3437 &vmw_cmd_dx_cid_check, true, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3439 &vmw_cmd_dx_cid_check, true, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3447 &vmw_cmd_dx_cid_check, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3461 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3463 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3469 &vmw_cmd_dx_check_subresource, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3471 &vmw_cmd_dx_check_subresource, true, false, true),
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3473 &vmw_cmd_dx_check_subresource, true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3475 &vmw_cmd_dx_view_define, true, false, true),
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3477 &vmw_cmd_dx_view_remove, true, false, true),
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3479 &vmw_cmd_dx_view_define, true, false, true),
3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3481 &vmw_cmd_dx_view_remove, true, false, true),
3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3483 &vmw_cmd_dx_view_define, true, false, true),
3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3485 &vmw_cmd_dx_view_remove, true, false, true),
3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3487 &vmw_cmd_dx_so_define, true, false, true),
3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3489 &vmw_cmd_dx_cid_check, true, false, true),
3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3491 &vmw_cmd_dx_so_define, true, false, true),
3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3493 &vmw_cmd_dx_cid_check, true, false, true),
3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3495 &vmw_cmd_dx_so_define, true, false, true),
3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3497 &vmw_cmd_dx_cid_check, true, false, true),
3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3499 &vmw_cmd_dx_so_define, true, false, true),
3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3501 &vmw_cmd_dx_cid_check, true, false, true),
3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3503 &vmw_cmd_dx_so_define, true, false, true),
3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3505 &vmw_cmd_dx_cid_check, true, false, true),
3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3507 &vmw_cmd_dx_define_shader, true, false, true),
3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3509 &vmw_cmd_dx_destroy_shader, true, false, true),
3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3511 &vmw_cmd_dx_bind_shader, true, false, true),
3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3513 &vmw_cmd_dx_so_define, true, false, true),
3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3515 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3517 &vmw_cmd_dx_set_streamoutput, true, false, true),
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3519 &vmw_cmd_dx_set_so_targets, true, false, true),
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3521 &vmw_cmd_dx_cid_check, true, false, true),
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3523 &vmw_cmd_dx_cid_check, true, false, true),
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3525 &vmw_cmd_buffer_copy_check, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3527 &vmw_cmd_pred_copy_check, true, false, true),
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3529 &vmw_cmd_dx_transfer_from_buffer,
3531 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3537 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3539 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3541 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3543 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3544 &vmw_cmd_clear_uav_float, true, false, true),
3545 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3547 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3549 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3550 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3551 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3552 &vmw_cmd_instanced_indirect, true, false, true),
3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3554 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3555 &vmw_cmd_dispatch_indirect, true, false, true),
3556 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3558 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3559 &vmw_cmd_sm5_view_define, true, false, true),
3560 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3561 &vmw_cmd_dx_define_streamoutput, true, false, true),
3562 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3563 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3566 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3568 u32 cmd_id = ((u32 *) buf)[0];
3570 if (cmd_id >= SVGA_CMD_MAX) {
3571 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3572 const struct vmw_cmd_entry *entry;
3574 *size = header->size + sizeof(SVGA3dCmdHeader);
3575 cmd_id = header->id;
3576 if (cmd_id >= SVGA_3D_CMD_MAX)
3579 cmd_id -= SVGA_3D_CMD_BASE;
3580 entry = &vmw_cmd_entries[cmd_id];
3581 *cmd = entry->cmd_name;
3586 case SVGA_CMD_UPDATE:
3587 *cmd = "SVGA_CMD_UPDATE";
3588 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3590 case SVGA_CMD_DEFINE_GMRFB:
3591 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3592 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3594 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3595 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3596 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3598 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3599 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3600 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3611 static int vmw_cmd_check(struct vmw_private *dev_priv,
3612 struct vmw_sw_context *sw_context, void *buf,
3616 uint32_t size_remaining = *size;
3617 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3619 const struct vmw_cmd_entry *entry;
3620 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3622 cmd_id = ((uint32_t *)buf)[0];
3623 /* Handle any none 3D commands */
3624 if (unlikely(cmd_id < SVGA_CMD_MAX))
3625 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3628 cmd_id = header->id;
3629 *size = header->size + sizeof(SVGA3dCmdHeader);
3631 cmd_id -= SVGA_3D_CMD_BASE;
3632 if (unlikely(*size > size_remaining))
3635 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3638 entry = &vmw_cmd_entries[cmd_id];
3639 if (unlikely(!entry->func))
3642 if (unlikely(!entry->user_allow && !sw_context->kernel))
3643 goto out_privileged;
3645 if (unlikely(entry->gb_disable && gb))
3648 if (unlikely(entry->gb_enable && !gb))
3651 ret = entry->func(dev_priv, sw_context, header);
3652 if (unlikely(ret != 0)) {
3653 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3654 cmd_id + SVGA_3D_CMD_BASE, ret);
3660 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3661 cmd_id + SVGA_3D_CMD_BASE);
3664 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3665 cmd_id + SVGA_3D_CMD_BASE);
3668 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3669 cmd_id + SVGA_3D_CMD_BASE);
3672 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3673 cmd_id + SVGA_3D_CMD_BASE);
3677 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3678 struct vmw_sw_context *sw_context, void *buf,
3681 int32_t cur_size = size;
3684 sw_context->buf_start = buf;
3686 while (cur_size > 0) {
3688 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3689 if (unlikely(ret != 0))
3691 buf = (void *)((unsigned long) buf + size);
3695 if (unlikely(cur_size != 0)) {
3696 VMW_DEBUG_USER("Command verifier out of sync.\n");
3703 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3705 /* Memory is validation context memory, so no need to free it */
3706 INIT_LIST_HEAD(&sw_context->bo_relocations);
3709 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3711 struct vmw_relocation *reloc;
3712 struct ttm_buffer_object *bo;
3714 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3715 bo = &reloc->vbo->base;
3716 switch (bo->resource->mem_type) {
3718 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3719 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3722 reloc->location->gmrId = bo->resource->start;
3725 *reloc->mob_loc = bo->resource->start;
3731 vmw_free_relocations(sw_context);
3734 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3737 if (likely(sw_context->cmd_bounce_size >= size))
3740 if (sw_context->cmd_bounce_size == 0)
3741 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3743 while (sw_context->cmd_bounce_size < size) {
3744 sw_context->cmd_bounce_size =
3745 PAGE_ALIGN(sw_context->cmd_bounce_size +
3746 (sw_context->cmd_bounce_size >> 1));
3749 vfree(sw_context->cmd_bounce);
3750 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3752 if (sw_context->cmd_bounce == NULL) {
3753 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3754 sw_context->cmd_bounce_size = 0;
3762 * vmw_execbuf_fence_commands - create and submit a command stream fence
3764 * Creates a fence object and submits a command stream marker.
3765 * If this fails for some reason, We sync the fifo and return NULL.
3766 * It is then safe to fence buffers with a NULL pointer.
3768 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3769 * userspace handle if @p_handle is not NULL, otherwise not.
3772 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3773 struct vmw_private *dev_priv,
3774 struct vmw_fence_obj **p_fence,
3779 bool synced = false;
3781 /* p_handle implies file_priv. */
3782 BUG_ON(p_handle != NULL && file_priv == NULL);
3784 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3785 if (unlikely(ret != 0)) {
3786 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3790 if (p_handle != NULL)
3791 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3792 sequence, p_fence, p_handle);
3794 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3796 if (unlikely(ret != 0 && !synced)) {
3797 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3798 false, VMW_FENCE_WAIT_TIMEOUT);
3806 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3808 * @dev_priv: Pointer to a vmw_private struct.
3809 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3810 * @ret: Return value from fence object creation.
3811 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3812 * the information should be copied.
3813 * @fence: Pointer to the fenc object.
3814 * @fence_handle: User-space fence handle.
3815 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3816 * @sync_file: Only used to clean up in case of an error in this function.
3818 * This function copies fence information to user-space. If copying fails, the
3819 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3820 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3821 * will hopefully be detected.
3823 * Also if copying fails, user-space will be unable to signal the fence object
3824 * so we wait for it immediately, and then unreference the user-space reference.
3827 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3828 struct vmw_fpriv *vmw_fp, int ret,
3829 struct drm_vmw_fence_rep __user *user_fence_rep,
3830 struct vmw_fence_obj *fence, uint32_t fence_handle,
3831 int32_t out_fence_fd, struct sync_file *sync_file)
3833 struct drm_vmw_fence_rep fence_rep;
3835 if (user_fence_rep == NULL)
3838 memset(&fence_rep, 0, sizeof(fence_rep));
3840 fence_rep.error = ret;
3841 fence_rep.fd = out_fence_fd;
3843 BUG_ON(fence == NULL);
3845 fence_rep.handle = fence_handle;
3846 fence_rep.seqno = fence->base.seqno;
3847 vmw_update_seqno(dev_priv);
3848 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3852 * copy_to_user errors will be detected by user space not seeing
3853 * fence_rep::error filled in. Typically user-space would have pre-set
3854 * that member to -EFAULT.
3856 ret = copy_to_user(user_fence_rep, &fence_rep,
3860 * User-space lost the fence object. We need to sync and unreference the
3863 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3865 fput(sync_file->file);
3867 if (fence_rep.fd != -1) {
3868 put_unused_fd(fence_rep.fd);
3872 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3874 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3875 (void) vmw_fence_obj_wait(fence, false, false,
3876 VMW_FENCE_WAIT_TIMEOUT);
3881 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3883 * @dev_priv: Pointer to a device private structure.
3884 * @kernel_commands: Pointer to the unpatched command batch.
3885 * @command_size: Size of the unpatched command batch.
3886 * @sw_context: Structure holding the relocation lists.
3888 * Side effects: If this function returns 0, then the command batch pointed to
3889 * by @kernel_commands will have been modified.
3891 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3892 void *kernel_commands, u32 command_size,
3893 struct vmw_sw_context *sw_context)
3897 if (sw_context->dx_ctx_node)
3898 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3899 sw_context->dx_ctx_node->ctx->id);
3901 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3906 vmw_apply_relocations(sw_context);
3907 memcpy(cmd, kernel_commands, command_size);
3908 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3909 vmw_resource_relocations_free(&sw_context->res_relocations);
3910 vmw_cmd_commit(dev_priv, command_size);
3916 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3917 * command buffer manager.
3919 * @dev_priv: Pointer to a device private structure.
3920 * @header: Opaque handle to the command buffer allocation.
3921 * @command_size: Size of the unpatched command batch.
3922 * @sw_context: Structure holding the relocation lists.
3924 * Side effects: If this function returns 0, then the command buffer represented
3925 * by @header will have been modified.
3927 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3928 struct vmw_cmdbuf_header *header,
3930 struct vmw_sw_context *sw_context)
3932 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3934 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3937 vmw_apply_relocations(sw_context);
3938 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3939 vmw_resource_relocations_free(&sw_context->res_relocations);
3940 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3946 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3947 * submission using a command buffer.
3949 * @dev_priv: Pointer to a device private structure.
3950 * @user_commands: User-space pointer to the commands to be submitted.
3951 * @command_size: Size of the unpatched command batch.
3952 * @header: Out parameter returning the opaque pointer to the command buffer.
3954 * This function checks whether we can use the command buffer manager for
3955 * submission and if so, creates a command buffer of suitable size and copies
3956 * the user data into that buffer.
3958 * On successful return, the function returns a pointer to the data in the
3959 * command buffer and *@header is set to non-NULL.
3961 * @kernel_commands: If command buffers could not be used, the function will
3962 * return the value of @kernel_commands on function call. That value may be
3963 * NULL. In that case, the value of *@header will be set to NULL.
3965 * If an error is encountered, the function will return a pointer error value.
3966 * If the function is interrupted by a signal while sleeping, it will return
3967 * -ERESTARTSYS casted to a pointer error value.
3969 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3970 void __user *user_commands,
3971 void *kernel_commands, u32 command_size,
3972 struct vmw_cmdbuf_header **header)
3978 if (command_size > SVGA_CB_MAX_SIZE) {
3979 VMW_DEBUG_USER("Command buffer is too large.\n");
3980 return ERR_PTR(-EINVAL);
3983 if (!dev_priv->cman || kernel_commands)
3984 return kernel_commands;
3986 /* If possible, add a little space for fencing. */
3987 cmdbuf_size = command_size + 512;
3988 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3989 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3991 if (IS_ERR(kernel_commands))
3992 return kernel_commands;
3994 ret = copy_from_user(kernel_commands, user_commands, command_size);
3996 VMW_DEBUG_USER("Failed copying commands.\n");
3997 vmw_cmdbuf_header_free(*header);
3999 return ERR_PTR(-EFAULT);
4002 return kernel_commands;
4005 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4006 struct vmw_sw_context *sw_context,
4009 struct vmw_resource *res;
4013 if (handle == SVGA3D_INVALID_ID)
4016 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4017 ret = vmw_validation_preload_res(sw_context->ctx, size);
4021 res = vmw_user_resource_noref_lookup_handle
4022 (dev_priv, sw_context->fp->tfile, handle,
4023 user_context_converter);
4025 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4026 (unsigned int) handle);
4027 return PTR_ERR(res);
4030 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4031 if (unlikely(ret != 0))
4034 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4035 sw_context->man = vmw_context_res_man(res);
4040 int vmw_execbuf_process(struct drm_file *file_priv,
4041 struct vmw_private *dev_priv,
4042 void __user *user_commands, void *kernel_commands,
4043 uint32_t command_size, uint64_t throttle_us,
4044 uint32_t dx_context_handle,
4045 struct drm_vmw_fence_rep __user *user_fence_rep,
4046 struct vmw_fence_obj **out_fence, uint32_t flags)
4048 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4049 struct vmw_fence_obj *fence = NULL;
4050 struct vmw_cmdbuf_header *header;
4051 uint32_t handle = 0;
4053 int32_t out_fence_fd = -1;
4054 struct sync_file *sync_file = NULL;
4055 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4057 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4059 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4060 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4061 if (out_fence_fd < 0) {
4062 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4063 return out_fence_fd;
4068 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4071 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4072 kernel_commands, command_size,
4074 if (IS_ERR(kernel_commands)) {
4075 ret = PTR_ERR(kernel_commands);
4076 goto out_free_fence_fd;
4079 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4082 goto out_free_header;
4085 sw_context->kernel = false;
4086 if (kernel_commands == NULL) {
4087 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4088 if (unlikely(ret != 0))
4091 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4093 if (unlikely(ret != 0)) {
4095 VMW_DEBUG_USER("Failed copying commands.\n");
4099 kernel_commands = sw_context->cmd_bounce;
4100 } else if (!header) {
4101 sw_context->kernel = true;
4104 sw_context->fp = vmw_fpriv(file_priv);
4105 INIT_LIST_HEAD(&sw_context->ctx_list);
4106 sw_context->cur_query_bo = dev_priv->pinned_bo;
4107 sw_context->last_query_ctx = NULL;
4108 sw_context->needs_post_query_barrier = false;
4109 sw_context->dx_ctx_node = NULL;
4110 sw_context->dx_query_mob = NULL;
4111 sw_context->dx_query_ctx = NULL;
4112 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4113 INIT_LIST_HEAD(&sw_context->res_relocations);
4114 INIT_LIST_HEAD(&sw_context->bo_relocations);
4116 if (sw_context->staged_bindings)
4117 vmw_binding_state_reset(sw_context->staged_bindings);
4119 if (!sw_context->res_ht_initialized) {
4120 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4121 if (unlikely(ret != 0))
4124 sw_context->res_ht_initialized = true;
4127 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4128 sw_context->ctx = &val_ctx;
4129 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4130 if (unlikely(ret != 0))
4133 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4135 if (unlikely(ret != 0))
4138 ret = vmw_resources_reserve(sw_context);
4139 if (unlikely(ret != 0))
4142 ret = vmw_validation_bo_reserve(&val_ctx, true);
4143 if (unlikely(ret != 0))
4146 ret = vmw_validation_bo_validate(&val_ctx, true);
4147 if (unlikely(ret != 0))
4150 ret = vmw_validation_res_validate(&val_ctx, true);
4151 if (unlikely(ret != 0))
4154 vmw_validation_drop_ht(&val_ctx);
4156 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4157 if (unlikely(ret != 0)) {
4162 if (dev_priv->has_mob) {
4163 ret = vmw_rebind_contexts(sw_context);
4164 if (unlikely(ret != 0))
4165 goto out_unlock_binding;
4169 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4170 command_size, sw_context);
4172 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4176 mutex_unlock(&dev_priv->binding_mutex);
4180 vmw_query_bo_switch_commit(dev_priv, sw_context);
4181 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4182 (user_fence_rep) ? &handle : NULL);
4184 * This error is harmless, because if fence submission fails,
4185 * vmw_fifo_send_fence will sync. The error will be propagated to
4186 * user-space in @fence_rep
4189 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4191 vmw_execbuf_bindings_commit(sw_context, false);
4192 vmw_bind_dx_query_mob(sw_context);
4193 vmw_validation_res_unreserve(&val_ctx, false);
4195 vmw_validation_bo_fence(sw_context->ctx, fence);
4197 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4198 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4201 * If anything fails here, give up trying to export the fence and do a
4202 * sync since the user mode will not be able to sync the fence itself.
4203 * This ensures we are still functionally correct.
4205 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4207 sync_file = sync_file_create(&fence->base);
4209 VMW_DEBUG_USER("Sync file create failed for fence\n");
4210 put_unused_fd(out_fence_fd);
4213 (void) vmw_fence_obj_wait(fence, false, false,
4214 VMW_FENCE_WAIT_TIMEOUT);
4216 /* Link the fence with the FD created earlier */
4217 fd_install(out_fence_fd, sync_file->file);
4221 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4222 user_fence_rep, fence, handle, out_fence_fd,
4225 /* Don't unreference when handing fence out */
4226 if (unlikely(out_fence != NULL)) {
4229 } else if (likely(fence != NULL)) {
4230 vmw_fence_obj_unreference(&fence);
4233 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4234 mutex_unlock(&dev_priv->cmdbuf_mutex);
4237 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4238 * in resource destruction paths.
4240 vmw_validation_unref_lists(&val_ctx);
4245 mutex_unlock(&dev_priv->binding_mutex);
4247 vmw_validation_bo_backoff(&val_ctx);
4249 vmw_execbuf_bindings_commit(sw_context, true);
4250 vmw_validation_res_unreserve(&val_ctx, true);
4251 vmw_resource_relocations_free(&sw_context->res_relocations);
4252 vmw_free_relocations(sw_context);
4253 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4254 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4256 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4257 vmw_validation_drop_ht(&val_ctx);
4258 WARN_ON(!list_empty(&sw_context->ctx_list));
4259 mutex_unlock(&dev_priv->cmdbuf_mutex);
4262 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4263 * in resource destruction paths.
4265 vmw_validation_unref_lists(&val_ctx);
4268 vmw_cmdbuf_header_free(header);
4270 if (out_fence_fd >= 0)
4271 put_unused_fd(out_fence_fd);
4277 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4279 * @dev_priv: The device private structure.
4281 * This function is called to idle the fifo and unpin the query buffer if the
4282 * normal way to do this hits an error, which should typically be extremely
4285 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4287 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4289 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4290 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4291 if (dev_priv->dummy_query_bo_pinned) {
4292 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4293 dev_priv->dummy_query_bo_pinned = false;
4299 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4302 * @dev_priv: The device private structure.
4303 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4304 * query barrier that flushes all queries touching the current buffer pointed to
4305 * by @dev_priv->pinned_bo
4307 * This function should be used to unpin the pinned query bo, or as a query
4308 * barrier when we need to make sure that all queries have finished before the
4309 * next fifo command. (For example on hardware context destructions where the
4310 * hardware may otherwise leak unfinished queries).
4312 * This function does not return any failure codes, but make attempts to do safe
4313 * unpinning in case of errors.
4315 * The function will synchronize on the previous query barrier, and will thus
4316 * not finish until that barrier has executed.
4318 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4319 * calling this function.
4321 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4322 struct vmw_fence_obj *fence)
4325 struct vmw_fence_obj *lfence = NULL;
4326 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4328 if (dev_priv->pinned_bo == NULL)
4331 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4334 goto out_no_reserve;
4336 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4339 goto out_no_reserve;
4341 ret = vmw_validation_bo_reserve(&val_ctx, false);
4343 goto out_no_reserve;
4345 if (dev_priv->query_cid_valid) {
4346 BUG_ON(fence != NULL);
4347 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4350 dev_priv->query_cid_valid = false;
4353 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4354 if (dev_priv->dummy_query_bo_pinned) {
4355 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4356 dev_priv->dummy_query_bo_pinned = false;
4358 if (fence == NULL) {
4359 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4363 vmw_validation_bo_fence(&val_ctx, fence);
4365 vmw_fence_obj_unreference(&lfence);
4367 vmw_validation_unref_lists(&val_ctx);
4368 vmw_bo_unreference(&dev_priv->pinned_bo);
4373 vmw_validation_bo_backoff(&val_ctx);
4375 vmw_validation_unref_lists(&val_ctx);
4376 vmw_execbuf_unpin_panic(dev_priv);
4377 vmw_bo_unreference(&dev_priv->pinned_bo);
4381 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4383 * @dev_priv: The device private structure.
4385 * This function should be used to unpin the pinned query bo, or as a query
4386 * barrier when we need to make sure that all queries have finished before the
4387 * next fifo command. (For example on hardware context destructions where the
4388 * hardware may otherwise leak unfinished queries).
4390 * This function does not return any failure codes, but make attempts to do safe
4391 * unpinning in case of errors.
4393 * The function will synchronize on the previous query barrier, and will thus
4394 * not finish until that barrier has executed.
4396 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4398 mutex_lock(&dev_priv->cmdbuf_mutex);
4399 if (dev_priv->query_cid_valid)
4400 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4401 mutex_unlock(&dev_priv->cmdbuf_mutex);
4404 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4405 struct drm_file *file_priv)
4407 struct vmw_private *dev_priv = vmw_priv(dev);
4408 struct drm_vmw_execbuf_arg *arg = data;
4410 struct dma_fence *in_fence = NULL;
4412 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4413 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4416 * Extend the ioctl argument while maintaining backwards compatibility:
4417 * We take different code paths depending on the value of arg->version.
4419 * Note: The ioctl argument is extended and zeropadded by core DRM.
4421 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4422 arg->version == 0)) {
4423 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4428 switch (arg->version) {
4430 /* For v1 core DRM have extended + zeropadded the data */
4431 arg->context_handle = (uint32_t) -1;
4435 /* For v2 and later core DRM would have correctly copied it */
4439 /* If imported a fence FD from elsewhere, then wait on it */
4440 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4441 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4444 VMW_DEBUG_USER("Cannot get imported fence\n");
4449 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4454 ret = vmw_execbuf_process(file_priv, dev_priv,
4455 (void __user *)(unsigned long)arg->commands,
4456 NULL, arg->command_size, arg->throttle_us,
4457 arg->context_handle,
4458 (void __user *)(unsigned long)arg->fence_rep,
4461 if (unlikely(ret != 0))
4464 vmw_kms_cursor_post_execbuf(dev_priv);
4468 dma_fence_put(in_fence);
4471 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);