]> Git Repo - J-linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_binding.h"
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_mksstat.h"
31 #include "vmwgfx_so.h"
32
33 #include <drm/ttm/ttm_bo.h>
34 #include <drm/ttm/ttm_placement.h>
35
36 #include <linux/sync_file.h>
37 #include <linux/hashtable.h>
38 #include <linux/vmalloc.h>
39
40 /*
41  * Helper macro to get dx_ctx_node if available otherwise print an error
42  * message. This is for use in command verifier function where if dx_ctx_node
43  * is not set then command is invalid.
44  */
45 #define VMW_GET_CTX_NODE(__sw_context)                                        \
46 ({                                                                            \
47         __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
48                 VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
49                 __sw_context->dx_ctx_node;                                    \
50         });                                                                   \
51 })
52
53 #define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
54         struct {                                                              \
55                 SVGA3dCmdHeader header;                                       \
56                 __type body;                                                  \
57         } __var
58
59 /**
60  * struct vmw_relocation - Buffer object relocation
61  *
62  * @head: List head for the command submission context's relocation list
63  * @vbo: Non ref-counted pointer to buffer object
64  * @mob_loc: Pointer to location for mob id to be modified
65  * @location: Pointer to location for guest pointer to be modified
66  */
67 struct vmw_relocation {
68         struct list_head head;
69         struct vmw_bo *vbo;
70         union {
71                 SVGAMobId *mob_loc;
72                 SVGAGuestPtr *location;
73         };
74 };
75
76 /**
77  * enum vmw_resource_relocation_type - Relocation type for resources
78  *
79  * @vmw_res_rel_normal: Traditional relocation. The resource id in the
80  * command stream is replaced with the actual id after validation.
81  * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
82  * with a NOP.
83  * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
84  * validation is -1, the command is replaced with a NOP. Otherwise no action.
85  * @vmw_res_rel_max: Last value in the enum - used for error checking
86 */
87 enum vmw_resource_relocation_type {
88         vmw_res_rel_normal,
89         vmw_res_rel_nop,
90         vmw_res_rel_cond_nop,
91         vmw_res_rel_max
92 };
93
94 /**
95  * struct vmw_resource_relocation - Relocation info for resources
96  *
97  * @head: List head for the software context's relocation list.
98  * @res: Non-ref-counted pointer to the resource.
99  * @offset: Offset of single byte entries into the command buffer where the id
100  * that needs fixup is located.
101  * @rel_type: Type of relocation.
102  */
103 struct vmw_resource_relocation {
104         struct list_head head;
105         const struct vmw_resource *res;
106         u32 offset:29;
107         enum vmw_resource_relocation_type rel_type:3;
108 };
109
110 /**
111  * struct vmw_ctx_validation_info - Extra validation metadata for contexts
112  *
113  * @head: List head of context list
114  * @ctx: The context resource
115  * @cur: The context's persistent binding state
116  * @staged: The binding state changes of this command buffer
117  */
118 struct vmw_ctx_validation_info {
119         struct list_head head;
120         struct vmw_resource *ctx;
121         struct vmw_ctx_binding_state *cur;
122         struct vmw_ctx_binding_state *staged;
123 };
124
125 /**
126  * struct vmw_cmd_entry - Describe a command for the verifier
127  *
128  * @func: Call-back to handle the command.
129  * @user_allow: Whether allowed from the execbuf ioctl.
130  * @gb_disable: Whether disabled if guest-backed objects are available.
131  * @gb_enable: Whether enabled iff guest-backed objects are available.
132  * @cmd_name: Name of the command.
133  */
134 struct vmw_cmd_entry {
135         int (*func) (struct vmw_private *, struct vmw_sw_context *,
136                      SVGA3dCmdHeader *);
137         bool user_allow;
138         bool gb_disable;
139         bool gb_enable;
140         const char *cmd_name;
141 };
142
143 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
144         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
145                                        (_gb_disable), (_gb_enable), #_cmd}
146
147 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
148                                         struct vmw_sw_context *sw_context,
149                                         struct vmw_resource *ctx);
150 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
151                                  struct vmw_sw_context *sw_context,
152                                  SVGAMobId *id,
153                                  struct vmw_bo **vmw_bo_p);
154 /**
155  * vmw_ptr_diff - Compute the offset from a to b in bytes
156  *
157  * @a: A starting pointer.
158  * @b: A pointer offset in the same address space.
159  *
160  * Returns: The offset in bytes between the two pointers.
161  */
162 static size_t vmw_ptr_diff(void *a, void *b)
163 {
164         return (unsigned long) b - (unsigned long) a;
165 }
166
167 /**
168  * vmw_execbuf_bindings_commit - Commit modified binding state
169  *
170  * @sw_context: The command submission context
171  * @backoff: Whether this is part of the error path and binding state changes
172  * should be ignored
173  */
174 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
175                                         bool backoff)
176 {
177         struct vmw_ctx_validation_info *entry;
178
179         list_for_each_entry(entry, &sw_context->ctx_list, head) {
180                 if (!backoff)
181                         vmw_binding_state_commit(entry->cur, entry->staged);
182
183                 if (entry->staged != sw_context->staged_bindings)
184                         vmw_binding_state_free(entry->staged);
185                 else
186                         sw_context->staged_bindings_inuse = false;
187         }
188
189         /* List entries are freed with the validation context */
190         INIT_LIST_HEAD(&sw_context->ctx_list);
191 }
192
193 /**
194  * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
195  *
196  * @sw_context: The command submission context
197  */
198 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
199 {
200         if (sw_context->dx_query_mob)
201                 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
202                                           sw_context->dx_query_mob);
203 }
204
205 /**
206  * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
207  * the validate list.
208  *
209  * @dev_priv: Pointer to the device private:
210  * @sw_context: The command submission context
211  * @res: Pointer to the resource
212  * @node: The validation node holding the context resource metadata
213  */
214 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
215                                    struct vmw_sw_context *sw_context,
216                                    struct vmw_resource *res,
217                                    struct vmw_ctx_validation_info *node)
218 {
219         int ret;
220
221         ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
222         if (unlikely(ret != 0))
223                 goto out_err;
224
225         if (!sw_context->staged_bindings) {
226                 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
227                 if (IS_ERR(sw_context->staged_bindings)) {
228                         ret = PTR_ERR(sw_context->staged_bindings);
229                         sw_context->staged_bindings = NULL;
230                         goto out_err;
231                 }
232         }
233
234         if (sw_context->staged_bindings_inuse) {
235                 node->staged = vmw_binding_state_alloc(dev_priv);
236                 if (IS_ERR(node->staged)) {
237                         ret = PTR_ERR(node->staged);
238                         node->staged = NULL;
239                         goto out_err;
240                 }
241         } else {
242                 node->staged = sw_context->staged_bindings;
243                 sw_context->staged_bindings_inuse = true;
244         }
245
246         node->ctx = res;
247         node->cur = vmw_context_binding_state(res);
248         list_add_tail(&node->head, &sw_context->ctx_list);
249
250         return 0;
251
252 out_err:
253         return ret;
254 }
255
256 /**
257  * vmw_execbuf_res_size - calculate extra size fore the resource validation node
258  *
259  * @dev_priv: Pointer to the device private struct.
260  * @res_type: The resource type.
261  *
262  * Guest-backed contexts and DX contexts require extra size to store execbuf
263  * private information in the validation node. Typically the binding manager
264  * associated data structures.
265  *
266  * Returns: The extra size requirement based on resource type.
267  */
268 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
269                                          enum vmw_res_type res_type)
270 {
271         return (res_type == vmw_res_dx_context ||
272                 (res_type == vmw_res_context && dev_priv->has_mob)) ?
273                 sizeof(struct vmw_ctx_validation_info) : 0;
274 }
275
276 /**
277  * vmw_execbuf_rcache_update - Update a resource-node cache entry
278  *
279  * @rcache: Pointer to the entry to update.
280  * @res: Pointer to the resource.
281  * @private: Pointer to the execbuf-private space in the resource validation
282  * node.
283  */
284 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
285                                       struct vmw_resource *res,
286                                       void *private)
287 {
288         rcache->res = res;
289         rcache->private = private;
290         rcache->valid = 1;
291         rcache->valid_handle = 0;
292 }
293
294 enum vmw_val_add_flags {
295         vmw_val_add_flag_none  =      0,
296         vmw_val_add_flag_noctx = 1 << 0,
297 };
298
299 /**
300  * vmw_execbuf_res_val_add - Add a resource to the validation list.
301  *
302  * @sw_context: Pointer to the software context.
303  * @res: Unreferenced rcu-protected pointer to the resource.
304  * @dirty: Whether to change dirty status.
305  * @flags: specifies whether to use the context or not
306  *
307  * Returns: 0 on success. Negative error code on failure. Typical error codes
308  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
309  */
310 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
311                                    struct vmw_resource *res,
312                                    u32 dirty,
313                                    u32 flags)
314 {
315         struct vmw_private *dev_priv = res->dev_priv;
316         int ret;
317         enum vmw_res_type res_type = vmw_res_type(res);
318         struct vmw_res_cache_entry *rcache;
319         struct vmw_ctx_validation_info *ctx_info;
320         bool first_usage;
321         unsigned int priv_size;
322
323         rcache = &sw_context->res_cache[res_type];
324         if (likely(rcache->valid && rcache->res == res)) {
325                 if (dirty)
326                         vmw_validation_res_set_dirty(sw_context->ctx,
327                                                      rcache->private, dirty);
328                 return 0;
329         }
330
331         if ((flags & vmw_val_add_flag_noctx) != 0) {
332                 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
333                                                   (void **)&ctx_info, NULL);
334                 if (ret)
335                         return ret;
336
337         } else {
338                 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
339                 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
340                                                   dirty, (void **)&ctx_info,
341                                                   &first_usage);
342                 if (ret)
343                         return ret;
344
345                 if (priv_size && first_usage) {
346                         ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
347                                                       ctx_info);
348                         if (ret) {
349                                 VMW_DEBUG_USER("Failed first usage context setup.\n");
350                                 return ret;
351                         }
352                 }
353         }
354
355         vmw_execbuf_rcache_update(rcache, res, ctx_info);
356         return 0;
357 }
358
359 /**
360  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
361  * validation list
362  *
363  * @sw_context: The software context holding the validation list.
364  * @view: Pointer to the view resource.
365  *
366  * Returns 0 if success, negative error code otherwise.
367  */
368 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
369                                 struct vmw_resource *view)
370 {
371         int ret;
372
373         /*
374          * First add the resource the view is pointing to, otherwise it may be
375          * swapped out when the view is validated.
376          */
377         ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
378                                       vmw_view_dirtying(view), vmw_val_add_flag_noctx);
379         if (ret)
380                 return ret;
381
382         return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
383                                        vmw_val_add_flag_noctx);
384 }
385
386 /**
387  * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
388  * to to the validation list.
389  *
390  * @sw_context: The software context holding the validation list.
391  * @view_type: The view type to look up.
392  * @id: view id of the view.
393  *
394  * The view is represented by a view id and the DX context it's created on, or
395  * scheduled for creation on. If there is no DX context set, the function will
396  * return an -EINVAL error pointer.
397  *
398  * Returns: Unreferenced pointer to the resource on success, negative error
399  * pointer on failure.
400  */
401 static struct vmw_resource *
402 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
403                     enum vmw_view_type view_type, u32 id)
404 {
405         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
406         struct vmw_resource *view;
407         int ret;
408
409         if (!ctx_node)
410                 return ERR_PTR(-EINVAL);
411
412         view = vmw_view_lookup(sw_context->man, view_type, id);
413         if (IS_ERR(view))
414                 return view;
415
416         ret = vmw_view_res_val_add(sw_context, view);
417         if (ret)
418                 return ERR_PTR(ret);
419
420         return view;
421 }
422
423 /**
424  * vmw_resource_context_res_add - Put resources previously bound to a context on
425  * the validation list
426  *
427  * @dev_priv: Pointer to a device private structure
428  * @sw_context: Pointer to a software context used for this command submission
429  * @ctx: Pointer to the context resource
430  *
431  * This function puts all resources that were previously bound to @ctx on the
432  * resource validation list. This is part of the context state reemission
433  */
434 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
435                                         struct vmw_sw_context *sw_context,
436                                         struct vmw_resource *ctx)
437 {
438         struct list_head *binding_list;
439         struct vmw_ctx_bindinfo *entry;
440         int ret = 0;
441         struct vmw_resource *res;
442         u32 i;
443         u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
444                 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
445
446         /* Add all cotables to the validation list. */
447         if (has_sm4_context(dev_priv) &&
448             vmw_res_type(ctx) == vmw_res_dx_context) {
449                 for (i = 0; i < cotable_max; ++i) {
450                         res = vmw_context_cotable(ctx, i);
451                         if (IS_ERR_OR_NULL(res))
452                                 continue;
453
454                         ret = vmw_execbuf_res_val_add(sw_context, res,
455                                                       VMW_RES_DIRTY_SET,
456                                                       vmw_val_add_flag_noctx);
457                         if (unlikely(ret != 0))
458                                 return ret;
459                 }
460         }
461
462         /* Add all resources bound to the context to the validation list */
463         mutex_lock(&dev_priv->binding_mutex);
464         binding_list = vmw_context_binding_list(ctx);
465
466         list_for_each_entry(entry, binding_list, ctx_list) {
467                 if (vmw_res_type(entry->res) == vmw_res_view)
468                         ret = vmw_view_res_val_add(sw_context, entry->res);
469                 else
470                         ret = vmw_execbuf_res_val_add(sw_context, entry->res,
471                                                       vmw_binding_dirtying(entry->bt),
472                                                       vmw_val_add_flag_noctx);
473                 if (unlikely(ret != 0))
474                         break;
475         }
476
477         if (has_sm4_context(dev_priv) &&
478             vmw_res_type(ctx) == vmw_res_dx_context) {
479                 struct vmw_bo *dx_query_mob;
480
481                 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
482                 if (dx_query_mob) {
483                         vmw_bo_placement_set(dx_query_mob,
484                                              VMW_BO_DOMAIN_MOB,
485                                              VMW_BO_DOMAIN_MOB);
486                         ret = vmw_validation_add_bo(sw_context->ctx,
487                                                     dx_query_mob);
488                 }
489         }
490
491         mutex_unlock(&dev_priv->binding_mutex);
492         return ret;
493 }
494
495 /**
496  * vmw_resource_relocation_add - Add a relocation to the relocation list
497  *
498  * @sw_context: Pointer to the software context.
499  * @res: The resource.
500  * @offset: Offset into the command buffer currently being parsed where the id
501  * that needs fixup is located. Granularity is one byte.
502  * @rel_type: Relocation type.
503  */
504 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
505                                        const struct vmw_resource *res,
506                                        unsigned long offset,
507                                        enum vmw_resource_relocation_type
508                                        rel_type)
509 {
510         struct vmw_resource_relocation *rel;
511
512         rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
513         if (unlikely(!rel)) {
514                 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
515                 return -ENOMEM;
516         }
517
518         rel->res = res;
519         rel->offset = offset;
520         rel->rel_type = rel_type;
521         list_add_tail(&rel->head, &sw_context->res_relocations);
522
523         return 0;
524 }
525
526 /**
527  * vmw_resource_relocations_free - Free all relocations on a list
528  *
529  * @list: Pointer to the head of the relocation list
530  */
531 static void vmw_resource_relocations_free(struct list_head *list)
532 {
533         /* Memory is validation context memory, so no need to free it */
534         INIT_LIST_HEAD(list);
535 }
536
537 /**
538  * vmw_resource_relocations_apply - Apply all relocations on a list
539  *
540  * @cb: Pointer to the start of the command buffer bein patch. This need not be
541  * the same buffer as the one being parsed when the relocation list was built,
542  * but the contents must be the same modulo the resource ids.
543  * @list: Pointer to the head of the relocation list.
544  */
545 static void vmw_resource_relocations_apply(uint32_t *cb,
546                                            struct list_head *list)
547 {
548         struct vmw_resource_relocation *rel;
549
550         /* Validate the struct vmw_resource_relocation member size */
551         BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
552         BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
553
554         list_for_each_entry(rel, list, head) {
555                 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
556                 switch (rel->rel_type) {
557                 case vmw_res_rel_normal:
558                         *addr = rel->res->id;
559                         break;
560                 case vmw_res_rel_nop:
561                         *addr = SVGA_3D_CMD_NOP;
562                         break;
563                 default:
564                         if (rel->res->id == -1)
565                                 *addr = SVGA_3D_CMD_NOP;
566                         break;
567                 }
568         }
569 }
570
571 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
572                            struct vmw_sw_context *sw_context,
573                            SVGA3dCmdHeader *header)
574 {
575         return -EINVAL;
576 }
577
578 static int vmw_cmd_ok(struct vmw_private *dev_priv,
579                       struct vmw_sw_context *sw_context,
580                       SVGA3dCmdHeader *header)
581 {
582         return 0;
583 }
584
585 /**
586  * vmw_resources_reserve - Reserve all resources on the sw_context's resource
587  * list.
588  *
589  * @sw_context: Pointer to the software context.
590  *
591  * Note that since vmware's command submission currently is protected by the
592  * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
593  * only a single thread at once will attempt this.
594  */
595 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
596 {
597         int ret;
598
599         ret = vmw_validation_res_reserve(sw_context->ctx, true);
600         if (ret)
601                 return ret;
602
603         if (sw_context->dx_query_mob) {
604                 struct vmw_bo *expected_dx_query_mob;
605
606                 expected_dx_query_mob =
607                         vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
608                 if (expected_dx_query_mob &&
609                     expected_dx_query_mob != sw_context->dx_query_mob) {
610                         ret = -EINVAL;
611                 }
612         }
613
614         return ret;
615 }
616
617 /**
618  * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
619  * resource validate list unless it's already there.
620  *
621  * @dev_priv: Pointer to a device private structure.
622  * @sw_context: Pointer to the software context.
623  * @res_type: Resource type.
624  * @dirty: Whether to change dirty status.
625  * @converter: User-space visible type specific information.
626  * @id_loc: Pointer to the location in the command buffer currently being parsed
627  * from where the user-space resource id handle is located.
628  * @p_res: Pointer to pointer to resource validation node. Populated on
629  * exit.
630  */
631 static int
632 vmw_cmd_res_check(struct vmw_private *dev_priv,
633                   struct vmw_sw_context *sw_context,
634                   enum vmw_res_type res_type,
635                   u32 dirty,
636                   const struct vmw_user_resource_conv *converter,
637                   uint32_t *id_loc,
638                   struct vmw_resource **p_res)
639 {
640         struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
641         struct vmw_resource *res;
642         int ret = 0;
643         bool needs_unref = false;
644
645         if (p_res)
646                 *p_res = NULL;
647
648         if (*id_loc == SVGA3D_INVALID_ID) {
649                 if (res_type == vmw_res_context) {
650                         VMW_DEBUG_USER("Illegal context invalid id.\n");
651                         return -EINVAL;
652                 }
653                 return 0;
654         }
655
656         if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
657                 res = rcache->res;
658                 if (dirty)
659                         vmw_validation_res_set_dirty(sw_context->ctx,
660                                                      rcache->private, dirty);
661         } else {
662                 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
663
664                 ret = vmw_validation_preload_res(sw_context->ctx, size);
665                 if (ret)
666                         return ret;
667
668                 ret = vmw_user_resource_lookup_handle
669                         (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
670                 if (ret != 0) {
671                         VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
672                                        (unsigned int) *id_loc);
673                         return ret;
674                 }
675                 needs_unref = true;
676
677                 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
678                 if (unlikely(ret != 0))
679                         goto res_check_done;
680
681                 if (rcache->valid && rcache->res == res) {
682                         rcache->valid_handle = true;
683                         rcache->handle = *id_loc;
684                 }
685         }
686
687         ret = vmw_resource_relocation_add(sw_context, res,
688                                           vmw_ptr_diff(sw_context->buf_start,
689                                                        id_loc),
690                                           vmw_res_rel_normal);
691         if (p_res)
692                 *p_res = res;
693
694 res_check_done:
695         if (needs_unref)
696                 vmw_resource_unreference(&res);
697
698         return ret;
699 }
700
701 /**
702  * vmw_rebind_all_dx_query - Rebind DX query associated with the context
703  *
704  * @ctx_res: context the query belongs to
705  *
706  * This function assumes binding_mutex is held.
707  */
708 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
709 {
710         struct vmw_private *dev_priv = ctx_res->dev_priv;
711         struct vmw_bo *dx_query_mob;
712         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
713
714         dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
715
716         if (!dx_query_mob || dx_query_mob->dx_query_ctx)
717                 return 0;
718
719         cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
720         if (cmd == NULL)
721                 return -ENOMEM;
722
723         cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
724         cmd->header.size = sizeof(cmd->body);
725         cmd->body.cid = ctx_res->id;
726         cmd->body.mobid = dx_query_mob->tbo.resource->start;
727         vmw_cmd_commit(dev_priv, sizeof(*cmd));
728
729         vmw_context_bind_dx_query(ctx_res, dx_query_mob);
730
731         return 0;
732 }
733
734 /**
735  * vmw_rebind_contexts - Rebind all resources previously bound to referenced
736  * contexts.
737  *
738  * @sw_context: Pointer to the software context.
739  *
740  * Rebind context binding points that have been scrubbed because of eviction.
741  */
742 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
743 {
744         struct vmw_ctx_validation_info *val;
745         int ret;
746
747         list_for_each_entry(val, &sw_context->ctx_list, head) {
748                 ret = vmw_binding_rebind_all(val->cur);
749                 if (unlikely(ret != 0)) {
750                         if (ret != -ERESTARTSYS)
751                                 VMW_DEBUG_USER("Failed to rebind context.\n");
752                         return ret;
753                 }
754
755                 ret = vmw_rebind_all_dx_query(val->ctx);
756                 if (ret != 0) {
757                         VMW_DEBUG_USER("Failed to rebind queries.\n");
758                         return ret;
759                 }
760         }
761
762         return 0;
763 }
764
765 /**
766  * vmw_view_bindings_add - Add an array of view bindings to a context binding
767  * state tracker.
768  *
769  * @sw_context: The execbuf state used for this command.
770  * @view_type: View type for the bindings.
771  * @binding_type: Binding type for the bindings.
772  * @shader_slot: The shader slot to user for the bindings.
773  * @view_ids: Array of view ids to be bound.
774  * @num_views: Number of view ids in @view_ids.
775  * @first_slot: The binding slot to be used for the first view id in @view_ids.
776  */
777 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
778                                  enum vmw_view_type view_type,
779                                  enum vmw_ctx_binding_type binding_type,
780                                  uint32 shader_slot,
781                                  uint32 view_ids[], u32 num_views,
782                                  u32 first_slot)
783 {
784         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
785         u32 i;
786
787         if (!ctx_node)
788                 return -EINVAL;
789
790         for (i = 0; i < num_views; ++i) {
791                 struct vmw_ctx_bindinfo_view binding;
792                 struct vmw_resource *view = NULL;
793
794                 if (view_ids[i] != SVGA3D_INVALID_ID) {
795                         view = vmw_view_id_val_add(sw_context, view_type,
796                                                    view_ids[i]);
797                         if (IS_ERR(view)) {
798                                 VMW_DEBUG_USER("View not found.\n");
799                                 return PTR_ERR(view);
800                         }
801                 }
802                 binding.bi.ctx = ctx_node->ctx;
803                 binding.bi.res = view;
804                 binding.bi.bt = binding_type;
805                 binding.shader_slot = shader_slot;
806                 binding.slot = first_slot + i;
807                 vmw_binding_add(ctx_node->staged, &binding.bi,
808                                 shader_slot, binding.slot);
809         }
810
811         return 0;
812 }
813
814 /**
815  * vmw_cmd_cid_check - Check a command header for valid context information.
816  *
817  * @dev_priv: Pointer to a device private structure.
818  * @sw_context: Pointer to the software context.
819  * @header: A command header with an embedded user-space context handle.
820  *
821  * Convenience function: Call vmw_cmd_res_check with the user-space context
822  * handle embedded in @header.
823  */
824 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
825                              struct vmw_sw_context *sw_context,
826                              SVGA3dCmdHeader *header)
827 {
828         VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
829                 container_of(header, typeof(*cmd), header);
830
831         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
832                                  VMW_RES_DIRTY_SET, user_context_converter,
833                                  &cmd->body, NULL);
834 }
835
836 /**
837  * vmw_execbuf_info_from_res - Get the private validation metadata for a
838  * recently validated resource
839  *
840  * @sw_context: Pointer to the command submission context
841  * @res: The resource
842  *
843  * The resource pointed to by @res needs to be present in the command submission
844  * context's resource cache and hence the last resource of that type to be
845  * processed by the validation code.
846  *
847  * Return: a pointer to the private metadata of the resource, or NULL if it
848  * wasn't found
849  */
850 static struct vmw_ctx_validation_info *
851 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
852                           struct vmw_resource *res)
853 {
854         struct vmw_res_cache_entry *rcache =
855                 &sw_context->res_cache[vmw_res_type(res)];
856
857         if (rcache->valid && rcache->res == res)
858                 return rcache->private;
859
860         WARN_ON_ONCE(true);
861         return NULL;
862 }
863
864 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
865                                            struct vmw_sw_context *sw_context,
866                                            SVGA3dCmdHeader *header)
867 {
868         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
869         struct vmw_resource *ctx;
870         struct vmw_resource *res;
871         int ret;
872
873         cmd = container_of(header, typeof(*cmd), header);
874
875         if (cmd->body.type >= SVGA3D_RT_MAX) {
876                 VMW_DEBUG_USER("Illegal render target type %u.\n",
877                                (unsigned int) cmd->body.type);
878                 return -EINVAL;
879         }
880
881         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
882                                 VMW_RES_DIRTY_SET, user_context_converter,
883                                 &cmd->body.cid, &ctx);
884         if (unlikely(ret != 0))
885                 return ret;
886
887         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
888                                 VMW_RES_DIRTY_SET, user_surface_converter,
889                                 &cmd->body.target.sid, &res);
890         if (unlikely(ret))
891                 return ret;
892
893         if (dev_priv->has_mob) {
894                 struct vmw_ctx_bindinfo_view binding;
895                 struct vmw_ctx_validation_info *node;
896
897                 node = vmw_execbuf_info_from_res(sw_context, ctx);
898                 if (!node)
899                         return -EINVAL;
900
901                 binding.bi.ctx = ctx;
902                 binding.bi.res = res;
903                 binding.bi.bt = vmw_ctx_binding_rt;
904                 binding.slot = cmd->body.type;
905                 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
906         }
907
908         return 0;
909 }
910
911 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
912                                       struct vmw_sw_context *sw_context,
913                                       SVGA3dCmdHeader *header)
914 {
915         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
916         int ret;
917
918         cmd = container_of(header, typeof(*cmd), header);
919
920         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
921                                 VMW_RES_DIRTY_NONE, user_surface_converter,
922                                 &cmd->body.src.sid, NULL);
923         if (ret)
924                 return ret;
925
926         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
927                                  VMW_RES_DIRTY_SET, user_surface_converter,
928                                  &cmd->body.dest.sid, NULL);
929 }
930
931 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
932                                      struct vmw_sw_context *sw_context,
933                                      SVGA3dCmdHeader *header)
934 {
935         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
936         int ret;
937
938         cmd = container_of(header, typeof(*cmd), header);
939         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
940                                 VMW_RES_DIRTY_NONE, user_surface_converter,
941                                 &cmd->body.src, NULL);
942         if (ret != 0)
943                 return ret;
944
945         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
946                                  VMW_RES_DIRTY_SET, user_surface_converter,
947                                  &cmd->body.dest, NULL);
948 }
949
950 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
951                                    struct vmw_sw_context *sw_context,
952                                    SVGA3dCmdHeader *header)
953 {
954         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
955         int ret;
956
957         cmd = container_of(header, typeof(*cmd), header);
958         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
959                                 VMW_RES_DIRTY_NONE, user_surface_converter,
960                                 &cmd->body.srcSid, NULL);
961         if (ret != 0)
962                 return ret;
963
964         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
965                                  VMW_RES_DIRTY_SET, user_surface_converter,
966                                  &cmd->body.dstSid, NULL);
967 }
968
969 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
970                                      struct vmw_sw_context *sw_context,
971                                      SVGA3dCmdHeader *header)
972 {
973         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
974         int ret;
975
976         cmd = container_of(header, typeof(*cmd), header);
977         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
978                                 VMW_RES_DIRTY_NONE, user_surface_converter,
979                                 &cmd->body.src.sid, NULL);
980         if (unlikely(ret != 0))
981                 return ret;
982
983         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
984                                  VMW_RES_DIRTY_SET, user_surface_converter,
985                                  &cmd->body.dest.sid, NULL);
986 }
987
988 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
989                                          struct vmw_sw_context *sw_context,
990                                          SVGA3dCmdHeader *header)
991 {
992         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
993                 container_of(header, typeof(*cmd), header);
994
995         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
996                                  VMW_RES_DIRTY_NONE, user_surface_converter,
997                                  &cmd->body.srcImage.sid, NULL);
998 }
999
1000 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1001                                  struct vmw_sw_context *sw_context,
1002                                  SVGA3dCmdHeader *header)
1003 {
1004         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1005                 container_of(header, typeof(*cmd), header);
1006
1007         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008                                  VMW_RES_DIRTY_NONE, user_surface_converter,
1009                                  &cmd->body.sid, NULL);
1010 }
1011
1012 /**
1013  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1014  *
1015  * @dev_priv: The device private structure.
1016  * @new_query_bo: The new buffer holding query results.
1017  * @sw_context: The software context used for this command submission.
1018  *
1019  * This function checks whether @new_query_bo is suitable for holding query
1020  * results, and if another buffer currently is pinned for query results. If so,
1021  * the function prepares the state of @sw_context for switching pinned buffers
1022  * after successful submission of the current command batch.
1023  */
1024 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1025                                        struct vmw_bo *new_query_bo,
1026                                        struct vmw_sw_context *sw_context)
1027 {
1028         struct vmw_res_cache_entry *ctx_entry =
1029                 &sw_context->res_cache[vmw_res_context];
1030         int ret;
1031
1032         BUG_ON(!ctx_entry->valid);
1033         sw_context->last_query_ctx = ctx_entry->res;
1034
1035         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1036
1037                 if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1038                         VMW_DEBUG_USER("Query buffer too large.\n");
1039                         return -EINVAL;
1040                 }
1041
1042                 if (unlikely(sw_context->cur_query_bo != NULL)) {
1043                         sw_context->needs_post_query_barrier = true;
1044                         vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1045                         ret = vmw_validation_add_bo(sw_context->ctx,
1046                                                     sw_context->cur_query_bo);
1047                         if (unlikely(ret != 0))
1048                                 return ret;
1049                 }
1050                 sw_context->cur_query_bo = new_query_bo;
1051
1052                 vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1053                 ret = vmw_validation_add_bo(sw_context->ctx,
1054                                             dev_priv->dummy_query_bo);
1055                 if (unlikely(ret != 0))
1056                         return ret;
1057         }
1058
1059         return 0;
1060 }
1061
1062 /**
1063  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1064  *
1065  * @dev_priv: The device private structure.
1066  * @sw_context: The software context used for this command submission batch.
1067  *
1068  * This function will check if we're switching query buffers, and will then,
1069  * issue a dummy occlusion query wait used as a query barrier. When the fence
1070  * object following that query wait has signaled, we are sure that all preceding
1071  * queries have finished, and the old query buffer can be unpinned. However,
1072  * since both the new query buffer and the old one are fenced with that fence,
1073  * we can do an asynchronus unpin now, and be sure that the old query buffer
1074  * won't be moved until the fence has signaled.
1075  *
1076  * As mentioned above, both the new - and old query buffers need to be fenced
1077  * using a sequence emitted *after* calling this function.
1078  */
1079 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1080                                      struct vmw_sw_context *sw_context)
1081 {
1082         /*
1083          * The validate list should still hold references to all
1084          * contexts here.
1085          */
1086         if (sw_context->needs_post_query_barrier) {
1087                 struct vmw_res_cache_entry *ctx_entry =
1088                         &sw_context->res_cache[vmw_res_context];
1089                 struct vmw_resource *ctx;
1090                 int ret;
1091
1092                 BUG_ON(!ctx_entry->valid);
1093                 ctx = ctx_entry->res;
1094
1095                 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1096
1097                 if (unlikely(ret != 0))
1098                         VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1099         }
1100
1101         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1102                 if (dev_priv->pinned_bo) {
1103                         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1104                         vmw_bo_unreference(&dev_priv->pinned_bo);
1105                 }
1106
1107                 if (!sw_context->needs_post_query_barrier) {
1108                         vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1109
1110                         /*
1111                          * We pin also the dummy_query_bo buffer so that we
1112                          * don't need to validate it when emitting dummy queries
1113                          * in context destroy paths.
1114                          */
1115                         if (!dev_priv->dummy_query_bo_pinned) {
1116                                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1117                                                     true);
1118                                 dev_priv->dummy_query_bo_pinned = true;
1119                         }
1120
1121                         BUG_ON(sw_context->last_query_ctx == NULL);
1122                         dev_priv->query_cid = sw_context->last_query_ctx->id;
1123                         dev_priv->query_cid_valid = true;
1124                         dev_priv->pinned_bo =
1125                                 vmw_bo_reference(sw_context->cur_query_bo);
1126                 }
1127         }
1128 }
1129
1130 /**
1131  * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1132  * to a MOB id.
1133  *
1134  * @dev_priv: Pointer to a device private structure.
1135  * @sw_context: The software context used for this command batch validation.
1136  * @id: Pointer to the user-space handle to be translated.
1137  * @vmw_bo_p: Points to a location that, on successful return will carry a
1138  * non-reference-counted pointer to the buffer object identified by the
1139  * user-space handle in @id.
1140  *
1141  * This function saves information needed to translate a user-space buffer
1142  * handle to a MOB id. The translation does not take place immediately, but
1143  * during a call to vmw_apply_relocations().
1144  *
1145  * This function builds a relocation list and a list of buffers to validate. The
1146  * former needs to be freed using either vmw_apply_relocations() or
1147  * vmw_free_relocations(). The latter needs to be freed using
1148  * vmw_clear_validations.
1149  */
1150 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1151                                  struct vmw_sw_context *sw_context,
1152                                  SVGAMobId *id,
1153                                  struct vmw_bo **vmw_bo_p)
1154 {
1155         struct vmw_bo *vmw_bo, *tmp_bo;
1156         uint32_t handle = *id;
1157         struct vmw_relocation *reloc;
1158         int ret;
1159
1160         vmw_validation_preload_bo(sw_context->ctx);
1161         ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1162         if (ret != 0) {
1163                 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1164                 return PTR_ERR(vmw_bo);
1165         }
1166         vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1167         ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1168         tmp_bo = vmw_bo;
1169         vmw_user_bo_unref(&tmp_bo);
1170         if (unlikely(ret != 0))
1171                 return ret;
1172
1173         reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1174         if (!reloc)
1175                 return -ENOMEM;
1176
1177         reloc->mob_loc = id;
1178         reloc->vbo = vmw_bo;
1179
1180         *vmw_bo_p = vmw_bo;
1181         list_add_tail(&reloc->head, &sw_context->bo_relocations);
1182
1183         return 0;
1184 }
1185
1186 /**
1187  * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1188  * to a valid SVGAGuestPtr
1189  *
1190  * @dev_priv: Pointer to a device private structure.
1191  * @sw_context: The software context used for this command batch validation.
1192  * @ptr: Pointer to the user-space handle to be translated.
1193  * @vmw_bo_p: Points to a location that, on successful return will carry a
1194  * non-reference-counted pointer to the DMA buffer identified by the user-space
1195  * handle in @id.
1196  *
1197  * This function saves information needed to translate a user-space buffer
1198  * handle to a valid SVGAGuestPtr. The translation does not take place
1199  * immediately, but during a call to vmw_apply_relocations().
1200  *
1201  * This function builds a relocation list and a list of buffers to validate.
1202  * The former needs to be freed using either vmw_apply_relocations() or
1203  * vmw_free_relocations(). The latter needs to be freed using
1204  * vmw_clear_validations.
1205  */
1206 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1207                                    struct vmw_sw_context *sw_context,
1208                                    SVGAGuestPtr *ptr,
1209                                    struct vmw_bo **vmw_bo_p)
1210 {
1211         struct vmw_bo *vmw_bo, *tmp_bo;
1212         uint32_t handle = ptr->gmrId;
1213         struct vmw_relocation *reloc;
1214         int ret;
1215
1216         vmw_validation_preload_bo(sw_context->ctx);
1217         ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1218         if (ret != 0) {
1219                 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1220                 return PTR_ERR(vmw_bo);
1221         }
1222         vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1223                              VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1224         ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1225         tmp_bo = vmw_bo;
1226         vmw_user_bo_unref(&tmp_bo);
1227         if (unlikely(ret != 0))
1228                 return ret;
1229
1230         reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1231         if (!reloc)
1232                 return -ENOMEM;
1233
1234         reloc->location = ptr;
1235         reloc->vbo = vmw_bo;
1236         *vmw_bo_p = vmw_bo;
1237         list_add_tail(&reloc->head, &sw_context->bo_relocations);
1238
1239         return 0;
1240 }
1241
1242 /**
1243  * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1244  *
1245  * @dev_priv: Pointer to a device private struct.
1246  * @sw_context: The software context used for this command submission.
1247  * @header: Pointer to the command header in the command stream.
1248  *
1249  * This function adds the new query into the query COTABLE
1250  */
1251 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1252                                    struct vmw_sw_context *sw_context,
1253                                    SVGA3dCmdHeader *header)
1254 {
1255         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1256         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1257         struct vmw_resource *cotable_res;
1258         int ret;
1259
1260         if (!ctx_node)
1261                 return -EINVAL;
1262
1263         cmd = container_of(header, typeof(*cmd), header);
1264
1265         if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1266             cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1267                 return -EINVAL;
1268
1269         cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1270         if (IS_ERR_OR_NULL(cotable_res))
1271                 return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
1272         ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1273
1274         return ret;
1275 }
1276
1277 /**
1278  * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1279  *
1280  * @dev_priv: Pointer to a device private struct.
1281  * @sw_context: The software context used for this command submission.
1282  * @header: Pointer to the command header in the command stream.
1283  *
1284  * The query bind operation will eventually associate the query ID with its
1285  * backing MOB.  In this function, we take the user mode MOB ID and use
1286  * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1287  */
1288 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1289                                  struct vmw_sw_context *sw_context,
1290                                  SVGA3dCmdHeader *header)
1291 {
1292         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1293         struct vmw_bo *vmw_bo;
1294         int ret;
1295
1296         cmd = container_of(header, typeof(*cmd), header);
1297
1298         /*
1299          * Look up the buffer pointed to by q.mobid, put it on the relocation
1300          * list so its kernel mode MOB ID can be filled in later
1301          */
1302         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1303                                     &vmw_bo);
1304
1305         if (ret != 0)
1306                 return ret;
1307
1308         sw_context->dx_query_mob = vmw_bo;
1309         sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1310         return 0;
1311 }
1312
1313 /**
1314  * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1315  *
1316  * @dev_priv: Pointer to a device private struct.
1317  * @sw_context: The software context used for this command submission.
1318  * @header: Pointer to the command header in the command stream.
1319  */
1320 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1321                                   struct vmw_sw_context *sw_context,
1322                                   SVGA3dCmdHeader *header)
1323 {
1324         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1325                 container_of(header, typeof(*cmd), header);
1326
1327         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1328                                  VMW_RES_DIRTY_SET, user_context_converter,
1329                                  &cmd->body.cid, NULL);
1330 }
1331
1332 /**
1333  * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1334  *
1335  * @dev_priv: Pointer to a device private struct.
1336  * @sw_context: The software context used for this command submission.
1337  * @header: Pointer to the command header in the command stream.
1338  */
1339 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1340                                struct vmw_sw_context *sw_context,
1341                                SVGA3dCmdHeader *header)
1342 {
1343         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1344                 container_of(header, typeof(*cmd), header);
1345
1346         if (unlikely(dev_priv->has_mob)) {
1347                 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1348
1349                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1350
1351                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1352                 gb_cmd.header.size = cmd->header.size;
1353                 gb_cmd.body.cid = cmd->body.cid;
1354                 gb_cmd.body.type = cmd->body.type;
1355
1356                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1357                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1358         }
1359
1360         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1361                                  VMW_RES_DIRTY_SET, user_context_converter,
1362                                  &cmd->body.cid, NULL);
1363 }
1364
1365 /**
1366  * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1367  *
1368  * @dev_priv: Pointer to a device private struct.
1369  * @sw_context: The software context used for this command submission.
1370  * @header: Pointer to the command header in the command stream.
1371  */
1372 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1373                                 struct vmw_sw_context *sw_context,
1374                                 SVGA3dCmdHeader *header)
1375 {
1376         struct vmw_bo *vmw_bo;
1377         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1378         int ret;
1379
1380         cmd = container_of(header, typeof(*cmd), header);
1381         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1382         if (unlikely(ret != 0))
1383                 return ret;
1384
1385         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1386                                     &vmw_bo);
1387         if (unlikely(ret != 0))
1388                 return ret;
1389
1390         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1391
1392         return ret;
1393 }
1394
1395 /**
1396  * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1397  *
1398  * @dev_priv: Pointer to a device private struct.
1399  * @sw_context: The software context used for this command submission.
1400  * @header: Pointer to the command header in the command stream.
1401  */
1402 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1403                              struct vmw_sw_context *sw_context,
1404                              SVGA3dCmdHeader *header)
1405 {
1406         struct vmw_bo *vmw_bo;
1407         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1408         int ret;
1409
1410         cmd = container_of(header, typeof(*cmd), header);
1411         if (dev_priv->has_mob) {
1412                 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1413
1414                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1415
1416                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1417                 gb_cmd.header.size = cmd->header.size;
1418                 gb_cmd.body.cid = cmd->body.cid;
1419                 gb_cmd.body.type = cmd->body.type;
1420                 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1421                 gb_cmd.body.offset = cmd->body.guestResult.offset;
1422
1423                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1424                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1425         }
1426
1427         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1428         if (unlikely(ret != 0))
1429                 return ret;
1430
1431         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1432                                       &cmd->body.guestResult, &vmw_bo);
1433         if (unlikely(ret != 0))
1434                 return ret;
1435
1436         ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1437
1438         return ret;
1439 }
1440
1441 /**
1442  * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1443  *
1444  * @dev_priv: Pointer to a device private struct.
1445  * @sw_context: The software context used for this command submission.
1446  * @header: Pointer to the command header in the command stream.
1447  */
1448 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1449                                  struct vmw_sw_context *sw_context,
1450                                  SVGA3dCmdHeader *header)
1451 {
1452         struct vmw_bo *vmw_bo;
1453         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1454         int ret;
1455
1456         cmd = container_of(header, typeof(*cmd), header);
1457         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1458         if (unlikely(ret != 0))
1459                 return ret;
1460
1461         ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1462                                     &vmw_bo);
1463         if (unlikely(ret != 0))
1464                 return ret;
1465
1466         return 0;
1467 }
1468
1469 /**
1470  * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1471  *
1472  * @dev_priv: Pointer to a device private struct.
1473  * @sw_context: The software context used for this command submission.
1474  * @header: Pointer to the command header in the command stream.
1475  */
1476 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1477                               struct vmw_sw_context *sw_context,
1478                               SVGA3dCmdHeader *header)
1479 {
1480         struct vmw_bo *vmw_bo;
1481         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1482         int ret;
1483
1484         cmd = container_of(header, typeof(*cmd), header);
1485         if (dev_priv->has_mob) {
1486                 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1487
1488                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1489
1490                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1491                 gb_cmd.header.size = cmd->header.size;
1492                 gb_cmd.body.cid = cmd->body.cid;
1493                 gb_cmd.body.type = cmd->body.type;
1494                 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1495                 gb_cmd.body.offset = cmd->body.guestResult.offset;
1496
1497                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1498                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1499         }
1500
1501         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1502         if (unlikely(ret != 0))
1503                 return ret;
1504
1505         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1506                                       &cmd->body.guestResult, &vmw_bo);
1507         if (unlikely(ret != 0))
1508                 return ret;
1509
1510         return 0;
1511 }
1512
1513 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1514                        struct vmw_sw_context *sw_context,
1515                        SVGA3dCmdHeader *header)
1516 {
1517         struct vmw_bo *vmw_bo = NULL;
1518         struct vmw_surface *srf = NULL;
1519         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1520         int ret;
1521         SVGA3dCmdSurfaceDMASuffix *suffix;
1522         uint32_t bo_size;
1523         bool dirty;
1524
1525         cmd = container_of(header, typeof(*cmd), header);
1526         suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1527                                                header->size - sizeof(*suffix));
1528
1529         /* Make sure device and verifier stays in sync. */
1530         if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1531                 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1532                 return -EINVAL;
1533         }
1534
1535         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1536                                       &cmd->body.guest.ptr, &vmw_bo);
1537         if (unlikely(ret != 0))
1538                 return ret;
1539
1540         /* Make sure DMA doesn't cross BO boundaries. */
1541         bo_size = vmw_bo->tbo.base.size;
1542         if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1543                 VMW_DEBUG_USER("Invalid DMA offset.\n");
1544                 return -EINVAL;
1545         }
1546
1547         bo_size -= cmd->body.guest.ptr.offset;
1548         if (unlikely(suffix->maximumOffset > bo_size))
1549                 suffix->maximumOffset = bo_size;
1550
1551         dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1552                 VMW_RES_DIRTY_SET : 0;
1553         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1554                                 dirty, user_surface_converter,
1555                                 &cmd->body.host.sid, NULL);
1556         if (unlikely(ret != 0)) {
1557                 if (unlikely(ret != -ERESTARTSYS))
1558                         VMW_DEBUG_USER("could not find surface for DMA.\n");
1559                 return ret;
1560         }
1561
1562         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1563
1564         vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1565
1566         return 0;
1567 }
1568
1569 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1570                         struct vmw_sw_context *sw_context,
1571                         SVGA3dCmdHeader *header)
1572 {
1573         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1574         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1575                 (unsigned long)header + sizeof(*cmd));
1576         SVGA3dPrimitiveRange *range;
1577         uint32_t i;
1578         uint32_t maxnum;
1579         int ret;
1580
1581         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1582         if (unlikely(ret != 0))
1583                 return ret;
1584
1585         cmd = container_of(header, typeof(*cmd), header);
1586         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1587
1588         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1589                 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1590                 return -EINVAL;
1591         }
1592
1593         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1594                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1595                                         VMW_RES_DIRTY_NONE,
1596                                         user_surface_converter,
1597                                         &decl->array.surfaceId, NULL);
1598                 if (unlikely(ret != 0))
1599                         return ret;
1600         }
1601
1602         maxnum = (header->size - sizeof(cmd->body) -
1603                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1604         if (unlikely(cmd->body.numRanges > maxnum)) {
1605                 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1606                 return -EINVAL;
1607         }
1608
1609         range = (SVGA3dPrimitiveRange *) decl;
1610         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1611                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1612                                         VMW_RES_DIRTY_NONE,
1613                                         user_surface_converter,
1614                                         &range->indexArray.surfaceId, NULL);
1615                 if (unlikely(ret != 0))
1616                         return ret;
1617         }
1618         return 0;
1619 }
1620
1621 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1622                              struct vmw_sw_context *sw_context,
1623                              SVGA3dCmdHeader *header)
1624 {
1625         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1626         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1627           ((unsigned long) header + header->size + sizeof(*header));
1628         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1629                 ((unsigned long) header + sizeof(*cmd));
1630         struct vmw_resource *ctx;
1631         struct vmw_resource *res;
1632         int ret;
1633
1634         cmd = container_of(header, typeof(*cmd), header);
1635
1636         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1637                                 VMW_RES_DIRTY_SET, user_context_converter,
1638                                 &cmd->body.cid, &ctx);
1639         if (unlikely(ret != 0))
1640                 return ret;
1641
1642         for (; cur_state < last_state; ++cur_state) {
1643                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1644                         continue;
1645
1646                 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1647                         VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1648                                        (unsigned int) cur_state->stage);
1649                         return -EINVAL;
1650                 }
1651
1652                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1653                                         VMW_RES_DIRTY_NONE,
1654                                         user_surface_converter,
1655                                         &cur_state->value, &res);
1656                 if (unlikely(ret != 0))
1657                         return ret;
1658
1659                 if (dev_priv->has_mob) {
1660                         struct vmw_ctx_bindinfo_tex binding;
1661                         struct vmw_ctx_validation_info *node;
1662
1663                         node = vmw_execbuf_info_from_res(sw_context, ctx);
1664                         if (!node)
1665                                 return -EINVAL;
1666
1667                         binding.bi.ctx = ctx;
1668                         binding.bi.res = res;
1669                         binding.bi.bt = vmw_ctx_binding_tex;
1670                         binding.texture_stage = cur_state->stage;
1671                         vmw_binding_add(node->staged, &binding.bi, 0,
1672                                         binding.texture_stage);
1673                 }
1674         }
1675
1676         return 0;
1677 }
1678
1679 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1680                                       struct vmw_sw_context *sw_context,
1681                                       void *buf)
1682 {
1683         struct vmw_bo *vmw_bo;
1684
1685         struct {
1686                 uint32_t header;
1687                 SVGAFifoCmdDefineGMRFB body;
1688         } *cmd = buf;
1689
1690         return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1691                                        &vmw_bo);
1692 }
1693
1694 /**
1695  * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1696  * switching
1697  *
1698  * @dev_priv: Pointer to a device private struct.
1699  * @sw_context: The software context being used for this batch.
1700  * @res: Pointer to the resource.
1701  * @buf_id: Pointer to the user-space backup buffer handle in the command
1702  * stream.
1703  * @backup_offset: Offset of backup into MOB.
1704  *
1705  * This function prepares for registering a switch of backup buffers in the
1706  * resource metadata just prior to unreserving. It's basically a wrapper around
1707  * vmw_cmd_res_switch_backup with a different interface.
1708  */
1709 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1710                                      struct vmw_sw_context *sw_context,
1711                                      struct vmw_resource *res, uint32_t *buf_id,
1712                                      unsigned long backup_offset)
1713 {
1714         struct vmw_bo *vbo;
1715         void *info;
1716         int ret;
1717
1718         info = vmw_execbuf_info_from_res(sw_context, res);
1719         if (!info)
1720                 return -EINVAL;
1721
1722         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1723         if (ret)
1724                 return ret;
1725
1726         vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1727                                          backup_offset);
1728         return 0;
1729 }
1730
1731 /**
1732  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1733  *
1734  * @dev_priv: Pointer to a device private struct.
1735  * @sw_context: The software context being used for this batch.
1736  * @res_type: The resource type.
1737  * @converter: Information about user-space binding for this resource type.
1738  * @res_id: Pointer to the user-space resource handle in the command stream.
1739  * @buf_id: Pointer to the user-space backup buffer handle in the command
1740  * stream.
1741  * @backup_offset: Offset of backup into MOB.
1742  *
1743  * This function prepares for registering a switch of backup buffers in the
1744  * resource metadata just prior to unreserving. It's basically a wrapper around
1745  * vmw_cmd_res_switch_backup with a different interface.
1746  */
1747 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1748                                  struct vmw_sw_context *sw_context,
1749                                  enum vmw_res_type res_type,
1750                                  const struct vmw_user_resource_conv
1751                                  *converter, uint32_t *res_id, uint32_t *buf_id,
1752                                  unsigned long backup_offset)
1753 {
1754         struct vmw_resource *res;
1755         int ret;
1756
1757         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1758                                 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1759         if (ret)
1760                 return ret;
1761
1762         return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1763                                          backup_offset);
1764 }
1765
1766 /**
1767  * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1768  *
1769  * @dev_priv: Pointer to a device private struct.
1770  * @sw_context: The software context being used for this batch.
1771  * @header: Pointer to the command header in the command stream.
1772  */
1773 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1774                                    struct vmw_sw_context *sw_context,
1775                                    SVGA3dCmdHeader *header)
1776 {
1777         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1778                 container_of(header, typeof(*cmd), header);
1779
1780         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1781                                      user_surface_converter, &cmd->body.sid,
1782                                      &cmd->body.mobid, 0);
1783 }
1784
1785 /**
1786  * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1787  *
1788  * @dev_priv: Pointer to a device private struct.
1789  * @sw_context: The software context being used for this batch.
1790  * @header: Pointer to the command header in the command stream.
1791  */
1792 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1793                                    struct vmw_sw_context *sw_context,
1794                                    SVGA3dCmdHeader *header)
1795 {
1796         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1797                 container_of(header, typeof(*cmd), header);
1798
1799         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1800                                  VMW_RES_DIRTY_NONE, user_surface_converter,
1801                                  &cmd->body.image.sid, NULL);
1802 }
1803
1804 /**
1805  * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1806  *
1807  * @dev_priv: Pointer to a device private struct.
1808  * @sw_context: The software context being used for this batch.
1809  * @header: Pointer to the command header in the command stream.
1810  */
1811 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1812                                      struct vmw_sw_context *sw_context,
1813                                      SVGA3dCmdHeader *header)
1814 {
1815         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1816                 container_of(header, typeof(*cmd), header);
1817
1818         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1819                                  VMW_RES_DIRTY_CLEAR, user_surface_converter,
1820                                  &cmd->body.sid, NULL);
1821 }
1822
1823 /**
1824  * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1825  *
1826  * @dev_priv: Pointer to a device private struct.
1827  * @sw_context: The software context being used for this batch.
1828  * @header: Pointer to the command header in the command stream.
1829  */
1830 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1831                                      struct vmw_sw_context *sw_context,
1832                                      SVGA3dCmdHeader *header)
1833 {
1834         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1835                 container_of(header, typeof(*cmd), header);
1836
1837         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1838                                  VMW_RES_DIRTY_NONE, user_surface_converter,
1839                                  &cmd->body.image.sid, NULL);
1840 }
1841
1842 /**
1843  * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1844  * command
1845  *
1846  * @dev_priv: Pointer to a device private struct.
1847  * @sw_context: The software context being used for this batch.
1848  * @header: Pointer to the command header in the command stream.
1849  */
1850 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1851                                        struct vmw_sw_context *sw_context,
1852                                        SVGA3dCmdHeader *header)
1853 {
1854         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1855                 container_of(header, typeof(*cmd), header);
1856
1857         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1858                                  VMW_RES_DIRTY_CLEAR, user_surface_converter,
1859                                  &cmd->body.sid, NULL);
1860 }
1861
1862 /**
1863  * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1864  * command
1865  *
1866  * @dev_priv: Pointer to a device private struct.
1867  * @sw_context: The software context being used for this batch.
1868  * @header: Pointer to the command header in the command stream.
1869  */
1870 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1871                                        struct vmw_sw_context *sw_context,
1872                                        SVGA3dCmdHeader *header)
1873 {
1874         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1875                 container_of(header, typeof(*cmd), header);
1876
1877         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1878                                  VMW_RES_DIRTY_NONE, user_surface_converter,
1879                                  &cmd->body.image.sid, NULL);
1880 }
1881
1882 /**
1883  * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1884  * command
1885  *
1886  * @dev_priv: Pointer to a device private struct.
1887  * @sw_context: The software context being used for this batch.
1888  * @header: Pointer to the command header in the command stream.
1889  */
1890 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1891                                          struct vmw_sw_context *sw_context,
1892                                          SVGA3dCmdHeader *header)
1893 {
1894         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1895                 container_of(header, typeof(*cmd), header);
1896
1897         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1898                                  VMW_RES_DIRTY_CLEAR, user_surface_converter,
1899                                  &cmd->body.sid, NULL);
1900 }
1901
1902 /**
1903  * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1904  *
1905  * @dev_priv: Pointer to a device private struct.
1906  * @sw_context: The software context being used for this batch.
1907  * @header: Pointer to the command header in the command stream.
1908  */
1909 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1910                                  struct vmw_sw_context *sw_context,
1911                                  SVGA3dCmdHeader *header)
1912 {
1913         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1914         int ret;
1915         size_t size;
1916         struct vmw_resource *ctx;
1917
1918         cmd = container_of(header, typeof(*cmd), header);
1919
1920         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1921                                 VMW_RES_DIRTY_SET, user_context_converter,
1922                                 &cmd->body.cid, &ctx);
1923         if (unlikely(ret != 0))
1924                 return ret;
1925
1926         if (unlikely(!dev_priv->has_mob))
1927                 return 0;
1928
1929         size = cmd->header.size - sizeof(cmd->body);
1930         ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1931                                     cmd->body.shid, cmd + 1, cmd->body.type,
1932                                     size, &sw_context->staged_cmd_res);
1933         if (unlikely(ret != 0))
1934                 return ret;
1935
1936         return vmw_resource_relocation_add(sw_context, NULL,
1937                                            vmw_ptr_diff(sw_context->buf_start,
1938                                                         &cmd->header.id),
1939                                            vmw_res_rel_nop);
1940 }
1941
1942 /**
1943  * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1944  *
1945  * @dev_priv: Pointer to a device private struct.
1946  * @sw_context: The software context being used for this batch.
1947  * @header: Pointer to the command header in the command stream.
1948  */
1949 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1950                                   struct vmw_sw_context *sw_context,
1951                                   SVGA3dCmdHeader *header)
1952 {
1953         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1954         int ret;
1955         struct vmw_resource *ctx;
1956
1957         cmd = container_of(header, typeof(*cmd), header);
1958
1959         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1960                                 VMW_RES_DIRTY_SET, user_context_converter,
1961                                 &cmd->body.cid, &ctx);
1962         if (unlikely(ret != 0))
1963                 return ret;
1964
1965         if (unlikely(!dev_priv->has_mob))
1966                 return 0;
1967
1968         ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1969                                 cmd->body.type, &sw_context->staged_cmd_res);
1970         if (unlikely(ret != 0))
1971                 return ret;
1972
1973         return vmw_resource_relocation_add(sw_context, NULL,
1974                                            vmw_ptr_diff(sw_context->buf_start,
1975                                                         &cmd->header.id),
1976                                            vmw_res_rel_nop);
1977 }
1978
1979 /**
1980  * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1981  *
1982  * @dev_priv: Pointer to a device private struct.
1983  * @sw_context: The software context being used for this batch.
1984  * @header: Pointer to the command header in the command stream.
1985  */
1986 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1987                               struct vmw_sw_context *sw_context,
1988                               SVGA3dCmdHeader *header)
1989 {
1990         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1991         struct vmw_ctx_bindinfo_shader binding;
1992         struct vmw_resource *ctx, *res = NULL;
1993         struct vmw_ctx_validation_info *ctx_info;
1994         int ret;
1995
1996         cmd = container_of(header, typeof(*cmd), header);
1997
1998         if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1999                 VMW_DEBUG_USER("Illegal shader type %u.\n",
2000                                (unsigned int) cmd->body.type);
2001                 return -EINVAL;
2002         }
2003
2004         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2005                                 VMW_RES_DIRTY_SET, user_context_converter,
2006                                 &cmd->body.cid, &ctx);
2007         if (unlikely(ret != 0))
2008                 return ret;
2009
2010         if (!dev_priv->has_mob)
2011                 return 0;
2012
2013         if (cmd->body.shid != SVGA3D_INVALID_ID) {
2014                 /*
2015                  * This is the compat shader path - Per device guest-backed
2016                  * shaders, but user-space thinks it's per context host-
2017                  * backed shaders.
2018                  */
2019                 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2020                                         cmd->body.shid, cmd->body.type);
2021                 if (!IS_ERR(res)) {
2022                         ret = vmw_execbuf_res_val_add(sw_context, res,
2023                                                       VMW_RES_DIRTY_NONE,
2024                                                       vmw_val_add_flag_noctx);
2025                         if (unlikely(ret != 0))
2026                                 return ret;
2027
2028                         ret = vmw_resource_relocation_add
2029                                 (sw_context, res,
2030                                  vmw_ptr_diff(sw_context->buf_start,
2031                                               &cmd->body.shid),
2032                                  vmw_res_rel_normal);
2033                         if (unlikely(ret != 0))
2034                                 return ret;
2035                 }
2036         }
2037
2038         if (IS_ERR_OR_NULL(res)) {
2039                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2040                                         VMW_RES_DIRTY_NONE,
2041                                         user_shader_converter, &cmd->body.shid,
2042                                         &res);
2043                 if (unlikely(ret != 0))
2044                         return ret;
2045         }
2046
2047         ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2048         if (!ctx_info)
2049                 return -EINVAL;
2050
2051         binding.bi.ctx = ctx;
2052         binding.bi.res = res;
2053         binding.bi.bt = vmw_ctx_binding_shader;
2054         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2055         vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2056
2057         return 0;
2058 }
2059
2060 /**
2061  * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2062  *
2063  * @dev_priv: Pointer to a device private struct.
2064  * @sw_context: The software context being used for this batch.
2065  * @header: Pointer to the command header in the command stream.
2066  */
2067 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2068                                     struct vmw_sw_context *sw_context,
2069                                     SVGA3dCmdHeader *header)
2070 {
2071         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2072         int ret;
2073
2074         cmd = container_of(header, typeof(*cmd), header);
2075
2076         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2077                                 VMW_RES_DIRTY_SET, user_context_converter,
2078                                 &cmd->body.cid, NULL);
2079         if (unlikely(ret != 0))
2080                 return ret;
2081
2082         if (dev_priv->has_mob)
2083                 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2084
2085         return 0;
2086 }
2087
2088 /**
2089  * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2090  *
2091  * @dev_priv: Pointer to a device private struct.
2092  * @sw_context: The software context being used for this batch.
2093  * @header: Pointer to the command header in the command stream.
2094  */
2095 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2096                                   struct vmw_sw_context *sw_context,
2097                                   SVGA3dCmdHeader *header)
2098 {
2099         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2100                 container_of(header, typeof(*cmd), header);
2101
2102         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2103                                      user_shader_converter, &cmd->body.shid,
2104                                      &cmd->body.mobid, cmd->body.offsetInBytes);
2105 }
2106
2107 /**
2108  * vmw_cmd_dx_set_single_constant_buffer - Validate
2109  * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2110  *
2111  * @dev_priv: Pointer to a device private struct.
2112  * @sw_context: The software context being used for this batch.
2113  * @header: Pointer to the command header in the command stream.
2114  */
2115 static int
2116 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2117                                       struct vmw_sw_context *sw_context,
2118                                       SVGA3dCmdHeader *header)
2119 {
2120         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2121
2122         struct vmw_resource *res = NULL;
2123         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2124         struct vmw_ctx_bindinfo_cb binding;
2125         int ret;
2126
2127         if (!ctx_node)
2128                 return -EINVAL;
2129
2130         cmd = container_of(header, typeof(*cmd), header);
2131         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2132                                 VMW_RES_DIRTY_NONE, user_surface_converter,
2133                                 &cmd->body.sid, &res);
2134         if (unlikely(ret != 0))
2135                 return ret;
2136
2137         if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2138             cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2139                 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2140                                (unsigned int) cmd->body.type,
2141                                (unsigned int) cmd->body.slot);
2142                 return -EINVAL;
2143         }
2144
2145         binding.bi.ctx = ctx_node->ctx;
2146         binding.bi.res = res;
2147         binding.bi.bt = vmw_ctx_binding_cb;
2148         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2149         binding.offset = cmd->body.offsetInBytes;
2150         binding.size = cmd->body.sizeInBytes;
2151         binding.slot = cmd->body.slot;
2152
2153         vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2154                         binding.slot);
2155
2156         return 0;
2157 }
2158
2159 /**
2160  * vmw_cmd_dx_set_constant_buffer_offset - Validate
2161  * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2162  *
2163  * @dev_priv: Pointer to a device private struct.
2164  * @sw_context: The software context being used for this batch.
2165  * @header: Pointer to the command header in the command stream.
2166  */
2167 static int
2168 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2169                                       struct vmw_sw_context *sw_context,
2170                                       SVGA3dCmdHeader *header)
2171 {
2172         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2173
2174         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2175         u32 shader_slot;
2176
2177         if (!has_sm5_context(dev_priv))
2178                 return -EINVAL;
2179
2180         if (!ctx_node)
2181                 return -EINVAL;
2182
2183         cmd = container_of(header, typeof(*cmd), header);
2184         if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2185                 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2186                                (unsigned int) cmd->body.slot);
2187                 return -EINVAL;
2188         }
2189
2190         shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2191         vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2192                                      cmd->body.slot, cmd->body.offsetInBytes);
2193
2194         return 0;
2195 }
2196
2197 /**
2198  * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2199  * command
2200  *
2201  * @dev_priv: Pointer to a device private struct.
2202  * @sw_context: The software context being used for this batch.
2203  * @header: Pointer to the command header in the command stream.
2204  */
2205 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2206                                      struct vmw_sw_context *sw_context,
2207                                      SVGA3dCmdHeader *header)
2208 {
2209         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2210                 container_of(header, typeof(*cmd), header);
2211
2212         u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2213                 sizeof(SVGA3dShaderResourceViewId);
2214
2215         if ((u64) cmd->body.startView + (u64) num_sr_view >
2216             (u64) SVGA3D_DX_MAX_SRVIEWS ||
2217             !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2218                 VMW_DEBUG_USER("Invalid shader binding.\n");
2219                 return -EINVAL;
2220         }
2221
2222         return vmw_view_bindings_add(sw_context, vmw_view_sr,
2223                                      vmw_ctx_binding_sr,
2224                                      cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2225                                      (void *) &cmd[1], num_sr_view,
2226                                      cmd->body.startView);
2227 }
2228
2229 /**
2230  * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2231  *
2232  * @dev_priv: Pointer to a device private struct.
2233  * @sw_context: The software context being used for this batch.
2234  * @header: Pointer to the command header in the command stream.
2235  */
2236 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2237                                  struct vmw_sw_context *sw_context,
2238                                  SVGA3dCmdHeader *header)
2239 {
2240         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2241         struct vmw_resource *res = NULL;
2242         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2243         struct vmw_ctx_bindinfo_shader binding;
2244         int ret = 0;
2245
2246         if (!ctx_node)
2247                 return -EINVAL;
2248
2249         cmd = container_of(header, typeof(*cmd), header);
2250
2251         if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2252                 VMW_DEBUG_USER("Illegal shader type %u.\n",
2253                                (unsigned int) cmd->body.type);
2254                 return -EINVAL;
2255         }
2256
2257         if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2258                 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2259                 if (IS_ERR(res)) {
2260                         VMW_DEBUG_USER("Could not find shader for binding.\n");
2261                         return PTR_ERR(res);
2262                 }
2263
2264                 ret = vmw_execbuf_res_val_add(sw_context, res,
2265                                               VMW_RES_DIRTY_NONE,
2266                                               vmw_val_add_flag_noctx);
2267                 if (ret)
2268                         return ret;
2269         }
2270
2271         binding.bi.ctx = ctx_node->ctx;
2272         binding.bi.res = res;
2273         binding.bi.bt = vmw_ctx_binding_dx_shader;
2274         binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2275
2276         vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2277
2278         return 0;
2279 }
2280
2281 /**
2282  * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2283  * command
2284  *
2285  * @dev_priv: Pointer to a device private struct.
2286  * @sw_context: The software context being used for this batch.
2287  * @header: Pointer to the command header in the command stream.
2288  */
2289 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2290                                          struct vmw_sw_context *sw_context,
2291                                          SVGA3dCmdHeader *header)
2292 {
2293         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2294         struct vmw_ctx_bindinfo_vb binding;
2295         struct vmw_resource *res;
2296         struct {
2297                 SVGA3dCmdHeader header;
2298                 SVGA3dCmdDXSetVertexBuffers body;
2299                 SVGA3dVertexBuffer buf[];
2300         } *cmd;
2301         int i, ret, num;
2302
2303         if (!ctx_node)
2304                 return -EINVAL;
2305
2306         cmd = container_of(header, typeof(*cmd), header);
2307         num = (cmd->header.size - sizeof(cmd->body)) /
2308                 sizeof(SVGA3dVertexBuffer);
2309         if ((u64)num + (u64)cmd->body.startBuffer >
2310             (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2311                 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2312                 return -EINVAL;
2313         }
2314
2315         for (i = 0; i < num; i++) {
2316                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2317                                         VMW_RES_DIRTY_NONE,
2318                                         user_surface_converter,
2319                                         &cmd->buf[i].sid, &res);
2320                 if (unlikely(ret != 0))
2321                         return ret;
2322
2323                 binding.bi.ctx = ctx_node->ctx;
2324                 binding.bi.bt = vmw_ctx_binding_vb;
2325                 binding.bi.res = res;
2326                 binding.offset = cmd->buf[i].offset;
2327                 binding.stride = cmd->buf[i].stride;
2328                 binding.slot = i + cmd->body.startBuffer;
2329
2330                 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2331         }
2332
2333         return 0;
2334 }
2335
2336 /**
2337  * vmw_cmd_dx_set_index_buffer - Validate
2338  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2339  *
2340  * @dev_priv: Pointer to a device private struct.
2341  * @sw_context: The software context being used for this batch.
2342  * @header: Pointer to the command header in the command stream.
2343  */
2344 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2345                                        struct vmw_sw_context *sw_context,
2346                                        SVGA3dCmdHeader *header)
2347 {
2348         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2349         struct vmw_ctx_bindinfo_ib binding;
2350         struct vmw_resource *res;
2351         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2352         int ret;
2353
2354         if (!ctx_node)
2355                 return -EINVAL;
2356
2357         cmd = container_of(header, typeof(*cmd), header);
2358         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2359                                 VMW_RES_DIRTY_NONE, user_surface_converter,
2360                                 &cmd->body.sid, &res);
2361         if (unlikely(ret != 0))
2362                 return ret;
2363
2364         binding.bi.ctx = ctx_node->ctx;
2365         binding.bi.res = res;
2366         binding.bi.bt = vmw_ctx_binding_ib;
2367         binding.offset = cmd->body.offset;
2368         binding.format = cmd->body.format;
2369
2370         vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2371
2372         return 0;
2373 }
2374
2375 /**
2376  * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2377  * command
2378  *
2379  * @dev_priv: Pointer to a device private struct.
2380  * @sw_context: The software context being used for this batch.
2381  * @header: Pointer to the command header in the command stream.
2382  */
2383 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2384                                         struct vmw_sw_context *sw_context,
2385                                         SVGA3dCmdHeader *header)
2386 {
2387         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2388                 container_of(header, typeof(*cmd), header);
2389         u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2390                 sizeof(SVGA3dRenderTargetViewId);
2391         int ret;
2392
2393         if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2394                 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2395                 return -EINVAL;
2396         }
2397
2398         ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2399                                     0, &cmd->body.depthStencilViewId, 1, 0);
2400         if (ret)
2401                 return ret;
2402
2403         return vmw_view_bindings_add(sw_context, vmw_view_rt,
2404                                      vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2405                                      num_rt_view, 0);
2406 }
2407
2408 /**
2409  * vmw_cmd_dx_clear_rendertarget_view - Validate
2410  * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2411  *
2412  * @dev_priv: Pointer to a device private struct.
2413  * @sw_context: The software context being used for this batch.
2414  * @header: Pointer to the command header in the command stream.
2415  */
2416 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2417                                               struct vmw_sw_context *sw_context,
2418                                               SVGA3dCmdHeader *header)
2419 {
2420         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2421                 container_of(header, typeof(*cmd), header);
2422         struct vmw_resource *ret;
2423
2424         ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2425                                   cmd->body.renderTargetViewId);
2426
2427         return PTR_ERR_OR_ZERO(ret);
2428 }
2429
2430 /**
2431  * vmw_cmd_dx_clear_depthstencil_view - Validate
2432  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2433  *
2434  * @dev_priv: Pointer to a device private struct.
2435  * @sw_context: The software context being used for this batch.
2436  * @header: Pointer to the command header in the command stream.
2437  */
2438 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2439                                               struct vmw_sw_context *sw_context,
2440                                               SVGA3dCmdHeader *header)
2441 {
2442         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2443                 container_of(header, typeof(*cmd), header);
2444         struct vmw_resource *ret;
2445
2446         ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2447                                   cmd->body.depthStencilViewId);
2448
2449         return PTR_ERR_OR_ZERO(ret);
2450 }
2451
2452 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2453                                   struct vmw_sw_context *sw_context,
2454                                   SVGA3dCmdHeader *header)
2455 {
2456         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2457         struct vmw_resource *srf;
2458         struct vmw_resource *res;
2459         enum vmw_view_type view_type;
2460         int ret;
2461         /*
2462          * This is based on the fact that all affected define commands have the
2463          * same initial command body layout.
2464          */
2465         struct {
2466                 SVGA3dCmdHeader header;
2467                 uint32 defined_id;
2468                 uint32 sid;
2469         } *cmd;
2470
2471         if (!ctx_node)
2472                 return -EINVAL;
2473
2474         view_type = vmw_view_cmd_to_type(header->id);
2475         if (view_type == vmw_view_max)
2476                 return -EINVAL;
2477
2478         cmd = container_of(header, typeof(*cmd), header);
2479         if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2480                 VMW_DEBUG_USER("Invalid surface id.\n");
2481                 return -EINVAL;
2482         }
2483         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2484                                 VMW_RES_DIRTY_NONE, user_surface_converter,
2485                                 &cmd->sid, &srf);
2486         if (unlikely(ret != 0))
2487                 return ret;
2488
2489         res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2490         if (IS_ERR_OR_NULL(res))
2491                 return res ? PTR_ERR(res) : -EINVAL;
2492         ret = vmw_cotable_notify(res, cmd->defined_id);
2493         if (unlikely(ret != 0))
2494                 return ret;
2495
2496         return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2497                             cmd->defined_id, header,
2498                             header->size + sizeof(*header),
2499                             &sw_context->staged_cmd_res);
2500 }
2501
2502 /**
2503  * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2504  *
2505  * @dev_priv: Pointer to a device private struct.
2506  * @sw_context: The software context being used for this batch.
2507  * @header: Pointer to the command header in the command stream.
2508  */
2509 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2510                                      struct vmw_sw_context *sw_context,
2511                                      SVGA3dCmdHeader *header)
2512 {
2513         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2514         struct vmw_ctx_bindinfo_so_target binding;
2515         struct vmw_resource *res;
2516         struct {
2517                 SVGA3dCmdHeader header;
2518                 SVGA3dCmdDXSetSOTargets body;
2519                 SVGA3dSoTarget targets[];
2520         } *cmd;
2521         int i, ret, num;
2522
2523         if (!ctx_node)
2524                 return -EINVAL;
2525
2526         cmd = container_of(header, typeof(*cmd), header);
2527         num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2528
2529         if (num > SVGA3D_DX_MAX_SOTARGETS) {
2530                 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2531                 return -EINVAL;
2532         }
2533
2534         for (i = 0; i < num; i++) {
2535                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2536                                         VMW_RES_DIRTY_SET,
2537                                         user_surface_converter,
2538                                         &cmd->targets[i].sid, &res);
2539                 if (unlikely(ret != 0))
2540                         return ret;
2541
2542                 binding.bi.ctx = ctx_node->ctx;
2543                 binding.bi.res = res;
2544                 binding.bi.bt = vmw_ctx_binding_so_target;
2545                 binding.offset = cmd->targets[i].offset;
2546                 binding.size = cmd->targets[i].sizeInBytes;
2547                 binding.slot = i;
2548
2549                 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2550         }
2551
2552         return 0;
2553 }
2554
2555 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2556                                 struct vmw_sw_context *sw_context,
2557                                 SVGA3dCmdHeader *header)
2558 {
2559         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2560         struct vmw_resource *res;
2561         /*
2562          * This is based on the fact that all affected define commands have
2563          * the same initial command body layout.
2564          */
2565         struct {
2566                 SVGA3dCmdHeader header;
2567                 uint32 defined_id;
2568         } *cmd;
2569         enum vmw_so_type so_type;
2570         int ret;
2571
2572         if (!ctx_node)
2573                 return -EINVAL;
2574
2575         so_type = vmw_so_cmd_to_type(header->id);
2576         res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2577         if (IS_ERR_OR_NULL(res))
2578                 return res ? PTR_ERR(res) : -EINVAL;
2579         cmd = container_of(header, typeof(*cmd), header);
2580         ret = vmw_cotable_notify(res, cmd->defined_id);
2581
2582         return ret;
2583 }
2584
2585 /**
2586  * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2587  * command
2588  *
2589  * @dev_priv: Pointer to a device private struct.
2590  * @sw_context: The software context being used for this batch.
2591  * @header: Pointer to the command header in the command stream.
2592  */
2593 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2594                                         struct vmw_sw_context *sw_context,
2595                                         SVGA3dCmdHeader *header)
2596 {
2597         struct {
2598                 SVGA3dCmdHeader header;
2599                 union {
2600                         SVGA3dCmdDXReadbackSubResource r_body;
2601                         SVGA3dCmdDXInvalidateSubResource i_body;
2602                         SVGA3dCmdDXUpdateSubResource u_body;
2603                         SVGA3dSurfaceId sid;
2604                 };
2605         } *cmd;
2606
2607         BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2608                      offsetof(typeof(*cmd), sid));
2609         BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2610                      offsetof(typeof(*cmd), sid));
2611         BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2612                      offsetof(typeof(*cmd), sid));
2613
2614         cmd = container_of(header, typeof(*cmd), header);
2615         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2616                                  VMW_RES_DIRTY_NONE, user_surface_converter,
2617                                  &cmd->sid, NULL);
2618 }
2619
2620 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2621                                 struct vmw_sw_context *sw_context,
2622                                 SVGA3dCmdHeader *header)
2623 {
2624         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2625
2626         if (!ctx_node)
2627                 return -EINVAL;
2628
2629         return 0;
2630 }
2631
2632 /**
2633  * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2634  * resource for removal.
2635  *
2636  * @dev_priv: Pointer to a device private struct.
2637  * @sw_context: The software context being used for this batch.
2638  * @header: Pointer to the command header in the command stream.
2639  *
2640  * Check that the view exists, and if it was not created using this command
2641  * batch, conditionally make this command a NOP.
2642  */
2643 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2644                                   struct vmw_sw_context *sw_context,
2645                                   SVGA3dCmdHeader *header)
2646 {
2647         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2648         struct {
2649                 SVGA3dCmdHeader header;
2650                 union vmw_view_destroy body;
2651         } *cmd = container_of(header, typeof(*cmd), header);
2652         enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2653         struct vmw_resource *view;
2654         int ret;
2655
2656         if (!ctx_node)
2657                 return -EINVAL;
2658
2659         ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2660                               &sw_context->staged_cmd_res, &view);
2661         if (ret || !view)
2662                 return ret;
2663
2664         /*
2665          * If the view wasn't created during this command batch, it might
2666          * have been removed due to a context swapout, so add a
2667          * relocation to conditionally make this command a NOP to avoid
2668          * device errors.
2669          */
2670         return vmw_resource_relocation_add(sw_context, view,
2671                                            vmw_ptr_diff(sw_context->buf_start,
2672                                                         &cmd->header.id),
2673                                            vmw_res_rel_cond_nop);
2674 }
2675
2676 /**
2677  * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2678  *
2679  * @dev_priv: Pointer to a device private struct.
2680  * @sw_context: The software context being used for this batch.
2681  * @header: Pointer to the command header in the command stream.
2682  */
2683 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2684                                     struct vmw_sw_context *sw_context,
2685                                     SVGA3dCmdHeader *header)
2686 {
2687         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2688         struct vmw_resource *res;
2689         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2690                 container_of(header, typeof(*cmd), header);
2691         int ret;
2692
2693         if (!ctx_node)
2694                 return -EINVAL;
2695
2696         res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2697         if (IS_ERR_OR_NULL(res))
2698                 return res ? PTR_ERR(res) : -EINVAL;
2699         ret = vmw_cotable_notify(res, cmd->body.shaderId);
2700         if (ret)
2701                 return ret;
2702
2703         return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2704                                  cmd->body.shaderId, cmd->body.type,
2705                                  &sw_context->staged_cmd_res);
2706 }
2707
2708 /**
2709  * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2710  *
2711  * @dev_priv: Pointer to a device private struct.
2712  * @sw_context: The software context being used for this batch.
2713  * @header: Pointer to the command header in the command stream.
2714  */
2715 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2716                                      struct vmw_sw_context *sw_context,
2717                                      SVGA3dCmdHeader *header)
2718 {
2719         struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2720         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2721                 container_of(header, typeof(*cmd), header);
2722         int ret;
2723
2724         if (!ctx_node)
2725                 return -EINVAL;
2726
2727         ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2728                                 &sw_context->staged_cmd_res);
2729
2730         return ret;
2731 }
2732
2733 /**
2734  * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2735  *
2736  * @dev_priv: Pointer to a device private struct.
2737  * @sw_context: The software context being used for this batch.
2738  * @header: Pointer to the command header in the command stream.
2739  */
2740 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2741                                   struct vmw_sw_context *sw_context,
2742                                   SVGA3dCmdHeader *header)
2743 {
2744         struct vmw_resource *ctx;
2745         struct vmw_resource *res;
2746         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2747                 container_of(header, typeof(*cmd), header);
2748         int ret;
2749
2750         if (cmd->body.cid != SVGA3D_INVALID_ID) {
2751                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2752                                         VMW_RES_DIRTY_SET,
2753                                         user_context_converter, &cmd->body.cid,
2754                                         &ctx);
2755                 if (ret)
2756                         return ret;
2757         } else {
2758                 struct vmw_ctx_validation_info *ctx_node =
2759                         VMW_GET_CTX_NODE(sw_context);
2760
2761                 if (!ctx_node)
2762                         return -EINVAL;
2763
2764                 ctx = ctx_node->ctx;
2765         }
2766
2767         res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2768         if (IS_ERR(res)) {
2769                 VMW_DEBUG_USER("Could not find shader to bind.\n");
2770                 return PTR_ERR(res);
2771         }
2772
2773         ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2774                                       vmw_val_add_flag_noctx);
2775         if (ret) {
2776                 VMW_DEBUG_USER("Error creating resource validation node.\n");
2777                 return ret;
2778         }
2779
2780         return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2781                                          &cmd->body.mobid,
2782                                          cmd->body.offsetInBytes);
2783 }
2784
2785 /**
2786  * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2787  *
2788  * @dev_priv: Pointer to a device private struct.
2789  * @sw_context: The software context being used for this batch.
2790  * @header: Pointer to the command header in the command stream.
2791  */
2792 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2793                               struct vmw_sw_context *sw_context,
2794                               SVGA3dCmdHeader *header)
2795 {
2796         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2797                 container_of(header, typeof(*cmd), header);
2798         struct vmw_resource *view;
2799         struct vmw_res_cache_entry *rcache;
2800
2801         view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2802                                    cmd->body.shaderResourceViewId);
2803         if (IS_ERR(view))
2804                 return PTR_ERR(view);
2805
2806         /*
2807          * Normally the shader-resource view is not gpu-dirtying, but for
2808          * this particular command it is...
2809          * So mark the last looked-up surface, which is the surface
2810          * the view points to, gpu-dirty.
2811          */
2812         rcache = &sw_context->res_cache[vmw_res_surface];
2813         vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2814                                      VMW_RES_DIRTY_SET);
2815         return 0;
2816 }
2817
2818 /**
2819  * vmw_cmd_dx_transfer_from_buffer - Validate
2820  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2821  *
2822  * @dev_priv: Pointer to a device private struct.
2823  * @sw_context: The software context being used for this batch.
2824  * @header: Pointer to the command header in the command stream.
2825  */
2826 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2827                                            struct vmw_sw_context *sw_context,
2828                                            SVGA3dCmdHeader *header)
2829 {
2830         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2831                 container_of(header, typeof(*cmd), header);
2832         int ret;
2833
2834         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2835                                 VMW_RES_DIRTY_NONE, user_surface_converter,
2836                                 &cmd->body.srcSid, NULL);
2837         if (ret != 0)
2838                 return ret;
2839
2840         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2841                                  VMW_RES_DIRTY_SET, user_surface_converter,
2842                                  &cmd->body.destSid, NULL);
2843 }
2844
2845 /**
2846  * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2847  *
2848  * @dev_priv: Pointer to a device private struct.
2849  * @sw_context: The software context being used for this batch.
2850  * @header: Pointer to the command header in the command stream.
2851  */
2852 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2853                                            struct vmw_sw_context *sw_context,
2854                                            SVGA3dCmdHeader *header)
2855 {
2856         VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2857                 container_of(header, typeof(*cmd), header);
2858
2859         if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2860                 return -EINVAL;
2861
2862         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2863                                  VMW_RES_DIRTY_SET, user_surface_converter,
2864                                  &cmd->body.surface.sid, NULL);
2865 }
2866
2867 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2868                        struct vmw_sw_context *sw_context,
2869                        SVGA3dCmdHeader *header)
2870 {
2871         if (!has_sm5_context(dev_priv))
2872                 return -EINVAL;
2873
2874         return 0;
2875 }
2876
2877 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2878                                    struct vmw_sw_context *sw_context,
2879                                    SVGA3dCmdHeader *header)
2880 {
2881         if (!has_sm5_context(dev_priv))
2882                 return -EINVAL;
2883
2884         return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2885 }
2886
2887 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2888                                    struct vmw_sw_context *sw_context,
2889                                    SVGA3dCmdHeader *header)
2890 {
2891         if (!has_sm5_context(dev_priv))
2892                 return -EINVAL;
2893
2894         return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2895 }
2896
2897 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2898                                   struct vmw_sw_context *sw_context,
2899                                   SVGA3dCmdHeader *header)
2900 {
2901         struct {
2902                 SVGA3dCmdHeader header;
2903                 SVGA3dCmdDXClearUAViewUint body;
2904         } *cmd = container_of(header, typeof(*cmd), header);
2905         struct vmw_resource *ret;
2906
2907         if (!has_sm5_context(dev_priv))
2908                 return -EINVAL;
2909
2910         ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2911                                   cmd->body.uaViewId);
2912
2913         return PTR_ERR_OR_ZERO(ret);
2914 }
2915
2916 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2917                                    struct vmw_sw_context *sw_context,
2918                                    SVGA3dCmdHeader *header)
2919 {
2920         struct {
2921                 SVGA3dCmdHeader header;
2922                 SVGA3dCmdDXClearUAViewFloat body;
2923         } *cmd = container_of(header, typeof(*cmd), header);
2924         struct vmw_resource *ret;
2925
2926         if (!has_sm5_context(dev_priv))
2927                 return -EINVAL;
2928
2929         ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2930                                   cmd->body.uaViewId);
2931
2932         return PTR_ERR_OR_ZERO(ret);
2933 }
2934
2935 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2936                            struct vmw_sw_context *sw_context,
2937                            SVGA3dCmdHeader *header)
2938 {
2939         struct {
2940                 SVGA3dCmdHeader header;
2941                 SVGA3dCmdDXSetUAViews body;
2942         } *cmd = container_of(header, typeof(*cmd), header);
2943         u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2944                 sizeof(SVGA3dUAViewId);
2945         int ret;
2946
2947         if (!has_sm5_context(dev_priv))
2948                 return -EINVAL;
2949
2950         if (num_uav > vmw_max_num_uavs(dev_priv)) {
2951                 VMW_DEBUG_USER("Invalid UAV binding.\n");
2952                 return -EINVAL;
2953         }
2954
2955         ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2956                                     vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2957                                     num_uav, 0);
2958         if (ret)
2959                 return ret;
2960
2961         vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2962                                          cmd->body.uavSpliceIndex);
2963
2964         return ret;
2965 }
2966
2967 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2968                               struct vmw_sw_context *sw_context,
2969                               SVGA3dCmdHeader *header)
2970 {
2971         struct {
2972                 SVGA3dCmdHeader header;
2973                 SVGA3dCmdDXSetCSUAViews body;
2974         } *cmd = container_of(header, typeof(*cmd), header);
2975         u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2976                 sizeof(SVGA3dUAViewId);
2977         int ret;
2978
2979         if (!has_sm5_context(dev_priv))
2980                 return -EINVAL;
2981
2982         if (num_uav > vmw_max_num_uavs(dev_priv)) {
2983                 VMW_DEBUG_USER("Invalid UAV binding.\n");
2984                 return -EINVAL;
2985         }
2986
2987         ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2988                                     vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2989                                     num_uav, 0);
2990         if (ret)
2991                 return ret;
2992
2993         vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2994                                   cmd->body.startIndex);
2995
2996         return ret;
2997 }
2998
2999 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
3000                                           struct vmw_sw_context *sw_context,
3001                                           SVGA3dCmdHeader *header)
3002 {
3003         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3004         struct vmw_resource *res;
3005         struct {
3006                 SVGA3dCmdHeader header;
3007                 SVGA3dCmdDXDefineStreamOutputWithMob body;
3008         } *cmd = container_of(header, typeof(*cmd), header);
3009         int ret;
3010
3011         if (!has_sm5_context(dev_priv))
3012                 return -EINVAL;
3013
3014         if (!ctx_node) {
3015                 DRM_ERROR("DX Context not set.\n");
3016                 return -EINVAL;
3017         }
3018
3019         res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3020         if (IS_ERR_OR_NULL(res))
3021                 return res ? PTR_ERR(res) : -EINVAL;
3022         ret = vmw_cotable_notify(res, cmd->body.soid);
3023         if (ret)
3024                 return ret;
3025
3026         return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3027                                        cmd->body.soid,
3028                                        &sw_context->staged_cmd_res);
3029 }
3030
3031 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3032                                            struct vmw_sw_context *sw_context,
3033                                            SVGA3dCmdHeader *header)
3034 {
3035         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3036         struct vmw_resource *res;
3037         struct {
3038                 SVGA3dCmdHeader header;
3039                 SVGA3dCmdDXDestroyStreamOutput body;
3040         } *cmd = container_of(header, typeof(*cmd), header);
3041
3042         if (!ctx_node) {
3043                 DRM_ERROR("DX Context not set.\n");
3044                 return -EINVAL;
3045         }
3046
3047         /*
3048          * When device does not support SM5 then streamoutput with mob command is
3049          * not available to user-space. Simply return in this case.
3050          */
3051         if (!has_sm5_context(dev_priv))
3052                 return 0;
3053
3054         /*
3055          * With SM5 capable device if lookup fails then user-space probably used
3056          * old streamoutput define command. Return without an error.
3057          */
3058         res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3059                                          cmd->body.soid);
3060         if (IS_ERR(res))
3061                 return 0;
3062
3063         return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3064                                           &sw_context->staged_cmd_res);
3065 }
3066
3067 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3068                                         struct vmw_sw_context *sw_context,
3069                                         SVGA3dCmdHeader *header)
3070 {
3071         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3072         struct vmw_resource *res;
3073         struct {
3074                 SVGA3dCmdHeader header;
3075                 SVGA3dCmdDXBindStreamOutput body;
3076         } *cmd = container_of(header, typeof(*cmd), header);
3077         int ret;
3078
3079         if (!has_sm5_context(dev_priv))
3080                 return -EINVAL;
3081
3082         if (!ctx_node) {
3083                 DRM_ERROR("DX Context not set.\n");
3084                 return -EINVAL;
3085         }
3086
3087         res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3088                                          cmd->body.soid);
3089         if (IS_ERR(res)) {
3090                 DRM_ERROR("Could not find streamoutput to bind.\n");
3091                 return PTR_ERR(res);
3092         }
3093
3094         vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3095
3096         ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3097                                       vmw_val_add_flag_noctx);
3098         if (ret) {
3099                 DRM_ERROR("Error creating resource validation node.\n");
3100                 return ret;
3101         }
3102
3103         return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3104                                          &cmd->body.mobid,
3105                                          cmd->body.offsetInBytes);
3106 }
3107
3108 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3109                                        struct vmw_sw_context *sw_context,
3110                                        SVGA3dCmdHeader *header)
3111 {
3112         struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3113         struct vmw_resource *res;
3114         struct vmw_ctx_bindinfo_so binding;
3115         struct {
3116                 SVGA3dCmdHeader header;
3117                 SVGA3dCmdDXSetStreamOutput body;
3118         } *cmd = container_of(header, typeof(*cmd), header);
3119         int ret;
3120
3121         if (!ctx_node) {
3122                 DRM_ERROR("DX Context not set.\n");
3123                 return -EINVAL;
3124         }
3125
3126         if (cmd->body.soid == SVGA3D_INVALID_ID)
3127                 return 0;
3128
3129         /*
3130          * When device does not support SM5 then streamoutput with mob command is
3131          * not available to user-space. Simply return in this case.
3132          */
3133         if (!has_sm5_context(dev_priv))
3134                 return 0;
3135
3136         /*
3137          * With SM5 capable device if lookup fails then user-space probably used
3138          * old streamoutput define command. Return without an error.
3139          */
3140         res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3141                                          cmd->body.soid);
3142         if (IS_ERR(res)) {
3143                 return 0;
3144         }
3145
3146         ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3147                                       vmw_val_add_flag_noctx);
3148         if (ret) {
3149                 DRM_ERROR("Error creating resource validation node.\n");
3150                 return ret;
3151         }
3152
3153         binding.bi.ctx = ctx_node->ctx;
3154         binding.bi.res = res;
3155         binding.bi.bt = vmw_ctx_binding_so;
3156         binding.slot = 0; /* Only one SO set to context at a time. */
3157
3158         vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3159                         binding.slot);
3160
3161         return ret;
3162 }
3163
3164 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3165                                               struct vmw_sw_context *sw_context,
3166                                               SVGA3dCmdHeader *header)
3167 {
3168         struct vmw_draw_indexed_instanced_indirect_cmd {
3169                 SVGA3dCmdHeader header;
3170                 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3171         } *cmd = container_of(header, typeof(*cmd), header);
3172
3173         if (!has_sm5_context(dev_priv))
3174                 return -EINVAL;
3175
3176         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3177                                  VMW_RES_DIRTY_NONE, user_surface_converter,
3178                                  &cmd->body.argsBufferSid, NULL);
3179 }
3180
3181 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3182                                       struct vmw_sw_context *sw_context,
3183                                       SVGA3dCmdHeader *header)
3184 {
3185         struct vmw_draw_instanced_indirect_cmd {
3186                 SVGA3dCmdHeader header;
3187                 SVGA3dCmdDXDrawInstancedIndirect body;
3188         } *cmd = container_of(header, typeof(*cmd), header);
3189
3190         if (!has_sm5_context(dev_priv))
3191                 return -EINVAL;
3192
3193         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3194                                  VMW_RES_DIRTY_NONE, user_surface_converter,
3195                                  &cmd->body.argsBufferSid, NULL);
3196 }
3197
3198 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3199                                      struct vmw_sw_context *sw_context,
3200                                      SVGA3dCmdHeader *header)
3201 {
3202         struct vmw_dispatch_indirect_cmd {
3203                 SVGA3dCmdHeader header;
3204                 SVGA3dCmdDXDispatchIndirect body;
3205         } *cmd = container_of(header, typeof(*cmd), header);
3206
3207         if (!has_sm5_context(dev_priv))
3208                 return -EINVAL;
3209
3210         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3211                                  VMW_RES_DIRTY_NONE, user_surface_converter,
3212                                  &cmd->body.argsBufferSid, NULL);
3213 }
3214
3215 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3216                                 struct vmw_sw_context *sw_context,
3217                                 void *buf, uint32_t *size)
3218 {
3219         uint32_t size_remaining = *size;
3220         uint32_t cmd_id;
3221
3222         cmd_id = ((uint32_t *)buf)[0];
3223         switch (cmd_id) {
3224         case SVGA_CMD_UPDATE:
3225                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3226                 break;
3227         case SVGA_CMD_DEFINE_GMRFB:
3228                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3229                 break;
3230         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3231                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3232                 break;
3233         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3234                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3235                 break;
3236         default:
3237                 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3238                 return -EINVAL;
3239         }
3240
3241         if (*size > size_remaining) {
3242                 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3243                                cmd_id);
3244                 return -EINVAL;
3245         }
3246
3247         if (unlikely(!sw_context->kernel)) {
3248                 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3249                 return -EPERM;
3250         }
3251
3252         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3253                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3254
3255         return 0;
3256 }
3257
3258 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3259         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3260                     false, false, false),
3261         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3262                     false, false, false),
3263         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3264                     true, false, false),
3265         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3266                     true, false, false),
3267         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3268                     true, false, false),
3269         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3270                     false, false, false),
3271         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3272                     false, false, false),
3273         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3274                     true, false, false),
3275         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3276                     true, false, false),
3277         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3278                     true, false, false),
3279         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3280                     &vmw_cmd_set_render_target_check, true, false, false),
3281         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3282                     true, false, false),
3283         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3284                     true, false, false),
3285         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3286                     true, false, false),
3287         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3288                     true, false, false),
3289         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3290                     true, false, false),
3291         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3292                     true, false, false),
3293         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3294                     true, false, false),
3295         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3296                     false, false, false),
3297         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3298                     true, false, false),
3299         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3300                     true, false, false),
3301         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3302                     true, false, false),
3303         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3304                     true, false, false),
3305         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3306                     true, false, false),
3307         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3308                     true, false, false),
3309         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3310                     true, false, false),
3311         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3312                     true, false, false),
3313         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3314                     true, false, false),
3315         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3316                     true, false, false),
3317         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3318                     &vmw_cmd_blt_surf_screen_check, false, false, false),
3319         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3320                     false, false, false),
3321         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3322                     false, false, false),
3323         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3324                     false, false, false),
3325         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3326                     false, false, false),
3327         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3328                     false, false, false),
3329         VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3330                     false, false, false),
3331         VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3332                     false, false, false),
3333         VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3334         VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3335         VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3336         VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3337         VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3338         VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3339         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3340                     false, false, true),
3341         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3342                     false, false, true),
3343         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3344                     false, false, true),
3345         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3346                     false, false, true),
3347         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3348                     false, false, true),
3349         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3350                     false, false, true),
3351         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3352                     false, false, true),
3353         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3354                     false, false, true),
3355         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3356                     true, false, true),
3357         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3358                     false, false, true),
3359         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3360                     true, false, true),
3361         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3362                     &vmw_cmd_update_gb_surface, true, false, true),
3363         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3364                     &vmw_cmd_readback_gb_image, true, false, true),
3365         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3366                     &vmw_cmd_readback_gb_surface, true, false, true),
3367         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3368                     &vmw_cmd_invalidate_gb_image, true, false, true),
3369         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3370                     &vmw_cmd_invalidate_gb_surface, true, false, true),
3371         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3372                     false, false, true),
3373         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3374                     false, false, true),
3375         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3376                     false, false, true),
3377         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3378                     false, false, true),
3379         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3380                     false, false, true),
3381         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3382                     false, false, true),
3383         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3384                     true, false, true),
3385         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3386                     false, false, true),
3387         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3388                     false, false, false),
3389         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3390                     true, false, true),
3391         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3392                     true, false, true),
3393         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3394                     true, false, true),
3395         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3396                     true, false, true),
3397         VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3398                     true, false, true),
3399         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3400                     false, false, true),
3401         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3402                     false, false, true),
3403         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3404                     false, false, true),
3405         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3406                     false, false, true),
3407         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3408                     false, false, true),
3409         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3410                     false, false, true),
3411         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3412                     false, false, true),
3413         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3414                     false, false, true),
3415         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3416                     false, false, true),
3417         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3418                     false, false, true),
3419         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3420                     true, false, true),
3421         VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3422                     false, false, true),
3423         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3424                     false, false, true),
3425         VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3426                     false, false, true),
3427         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3428                     false, false, true),
3429
3430         /* SM commands */
3431         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3432                     false, false, true),
3433         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3434                     false, false, true),
3435         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3436                     false, false, true),
3437         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3438                     false, false, true),
3439         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3440                     false, false, true),
3441         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3442                     &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3443         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3444                     &vmw_cmd_dx_set_shader_res, true, false, true),
3445         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3446                     true, false, true),
3447         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3448                     true, false, true),
3449         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3450                     true, false, true),
3451         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3452                     true, false, true),
3453         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3454                     true, false, true),
3455         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3456                     &vmw_cmd_dx_cid_check, true, false, true),
3457         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3458                     true, false, true),
3459         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3460                     &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3461         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3462                     &vmw_cmd_dx_set_index_buffer, true, false, true),
3463         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3464                     &vmw_cmd_dx_set_rendertargets, true, false, true),
3465         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3466                     true, false, true),
3467         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3468                     &vmw_cmd_dx_cid_check, true, false, true),
3469         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3470                     &vmw_cmd_dx_cid_check, true, false, true),
3471         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3472                     true, false, true),
3473         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3474                     true, false, true),
3475         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3476                     true, false, true),
3477         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3478                     &vmw_cmd_dx_cid_check, true, false, true),
3479         VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3480                     true, false, true),
3481         VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3482                     true, false, true),
3483         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3484                     true, false, true),
3485         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3486                     true, false, true),
3487         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3488                     true, false, true),
3489         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3490                     true, false, true),
3491         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3492                     &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3493         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3494                     &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3495         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3496                     true, false, true),
3497         VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3498                     true, false, true),
3499         VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3500                     &vmw_cmd_dx_check_subresource, true, false, true),
3501         VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3502                     &vmw_cmd_dx_check_subresource, true, false, true),
3503         VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3504                     &vmw_cmd_dx_check_subresource, true, false, true),
3505         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3506                     &vmw_cmd_dx_view_define, true, false, true),
3507         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3508                     &vmw_cmd_dx_view_remove, true, false, true),
3509         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3510                     &vmw_cmd_dx_view_define, true, false, true),
3511         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3512                     &vmw_cmd_dx_view_remove, true, false, true),
3513         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3514                     &vmw_cmd_dx_view_define, true, false, true),
3515         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3516                     &vmw_cmd_dx_view_remove, true, false, true),
3517         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3518                     &vmw_cmd_dx_so_define, true, false, true),
3519         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3520                     &vmw_cmd_dx_cid_check, true, false, true),
3521         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3522                     &vmw_cmd_dx_so_define, true, false, true),
3523         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3524                     &vmw_cmd_dx_cid_check, true, false, true),
3525         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3526                     &vmw_cmd_dx_so_define, true, false, true),
3527         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3528                     &vmw_cmd_dx_cid_check, true, false, true),
3529         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3530                     &vmw_cmd_dx_so_define, true, false, true),
3531         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3532                     &vmw_cmd_dx_cid_check, true, false, true),
3533         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3534                     &vmw_cmd_dx_so_define, true, false, true),
3535         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3536                     &vmw_cmd_dx_cid_check, true, false, true),
3537         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3538                     &vmw_cmd_dx_define_shader, true, false, true),
3539         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3540                     &vmw_cmd_dx_destroy_shader, true, false, true),
3541         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3542                     &vmw_cmd_dx_bind_shader, true, false, true),
3543         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3544                     &vmw_cmd_dx_so_define, true, false, true),
3545         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3546                     &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3547         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3548                     &vmw_cmd_dx_set_streamoutput, true, false, true),
3549         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3550                     &vmw_cmd_dx_set_so_targets, true, false, true),
3551         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3552                     &vmw_cmd_dx_cid_check, true, false, true),
3553         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3554                     &vmw_cmd_dx_cid_check, true, false, true),
3555         VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3556                     &vmw_cmd_buffer_copy_check, true, false, true),
3557         VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3558                     &vmw_cmd_pred_copy_check, true, false, true),
3559         VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3560                     &vmw_cmd_dx_transfer_from_buffer,
3561                     true, false, true),
3562         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3563                     &vmw_cmd_dx_set_constant_buffer_offset,
3564                     true, false, true),
3565         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3566                     &vmw_cmd_dx_set_constant_buffer_offset,
3567                     true, false, true),
3568         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3569                     &vmw_cmd_dx_set_constant_buffer_offset,
3570                     true, false, true),
3571         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3572                     &vmw_cmd_dx_set_constant_buffer_offset,
3573                     true, false, true),
3574         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3575                     &vmw_cmd_dx_set_constant_buffer_offset,
3576                     true, false, true),
3577         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3578                     &vmw_cmd_dx_set_constant_buffer_offset,
3579                     true, false, true),
3580         VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3581                     true, false, true),
3582
3583         /*
3584          * SM5 commands
3585          */
3586         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3587                     true, false, true),
3588         VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3589                     true, false, true),
3590         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3591                     true, false, true),
3592         VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3593                     &vmw_cmd_clear_uav_float, true, false, true),
3594         VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3595                     false, true),
3596         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3597                     true),
3598         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3599                     &vmw_cmd_indexed_instanced_indirect, true, false, true),
3600         VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3601                     &vmw_cmd_instanced_indirect, true, false, true),
3602         VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3603         VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3604                     &vmw_cmd_dispatch_indirect, true, false, true),
3605         VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3606                     false, true),
3607         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3608                     &vmw_cmd_sm5_view_define, true, false, true),
3609         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3610                     &vmw_cmd_dx_define_streamoutput, true, false, true),
3611         VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3612                     &vmw_cmd_dx_bind_streamoutput, true, false, true),
3613         VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3614                     &vmw_cmd_dx_so_define, true, false, true),
3615         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
3616                     &vmw_cmd_invalid, false, false, true),
3617 };
3618
3619 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3620 {
3621         u32 cmd_id = ((u32 *) buf)[0];
3622
3623         if (cmd_id >= SVGA_CMD_MAX) {
3624                 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3625                 const struct vmw_cmd_entry *entry;
3626
3627                 *size = header->size + sizeof(SVGA3dCmdHeader);
3628                 cmd_id = header->id;
3629                 if (cmd_id >= SVGA_3D_CMD_MAX)
3630                         return false;
3631
3632                 cmd_id -= SVGA_3D_CMD_BASE;
3633                 entry = &vmw_cmd_entries[cmd_id];
3634                 *cmd = entry->cmd_name;
3635                 return true;
3636         }
3637
3638         switch (cmd_id) {
3639         case SVGA_CMD_UPDATE:
3640                 *cmd = "SVGA_CMD_UPDATE";
3641                 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3642                 break;
3643         case SVGA_CMD_DEFINE_GMRFB:
3644                 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3645                 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3646                 break;
3647         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3648                 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3649                 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3650                 break;
3651         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3652                 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3653                 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3654                 break;
3655         default:
3656                 *cmd = "UNKNOWN";
3657                 *size = 0;
3658                 return false;
3659         }
3660
3661         return true;
3662 }
3663
3664 static int vmw_cmd_check(struct vmw_private *dev_priv,
3665                          struct vmw_sw_context *sw_context, void *buf,
3666                          uint32_t *size)
3667 {
3668         uint32_t cmd_id;
3669         uint32_t size_remaining = *size;
3670         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3671         int ret;
3672         const struct vmw_cmd_entry *entry;
3673         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3674
3675         cmd_id = ((uint32_t *)buf)[0];
3676         /* Handle any none 3D commands */
3677         if (unlikely(cmd_id < SVGA_CMD_MAX))
3678                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3679
3680
3681         cmd_id = header->id;
3682         *size = header->size + sizeof(SVGA3dCmdHeader);
3683
3684         cmd_id -= SVGA_3D_CMD_BASE;
3685         if (unlikely(*size > size_remaining))
3686                 goto out_invalid;
3687
3688         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3689                 goto out_invalid;
3690
3691         entry = &vmw_cmd_entries[cmd_id];
3692         if (unlikely(!entry->func))
3693                 goto out_invalid;
3694
3695         if (unlikely(!entry->user_allow && !sw_context->kernel))
3696                 goto out_privileged;
3697
3698         if (unlikely(entry->gb_disable && gb))
3699                 goto out_old;
3700
3701         if (unlikely(entry->gb_enable && !gb))
3702                 goto out_new;
3703
3704         ret = entry->func(dev_priv, sw_context, header);
3705         if (unlikely(ret != 0)) {
3706                 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3707                                cmd_id + SVGA_3D_CMD_BASE, ret);
3708                 return ret;
3709         }
3710
3711         return 0;
3712 out_invalid:
3713         VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3714                        cmd_id + SVGA_3D_CMD_BASE);
3715         return -EINVAL;
3716 out_privileged:
3717         VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3718                        cmd_id + SVGA_3D_CMD_BASE);
3719         return -EPERM;
3720 out_old:
3721         VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3722                        cmd_id + SVGA_3D_CMD_BASE);
3723         return -EINVAL;
3724 out_new:
3725         VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3726                        cmd_id + SVGA_3D_CMD_BASE);
3727         return -EINVAL;
3728 }
3729
3730 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3731                              struct vmw_sw_context *sw_context, void *buf,
3732                              uint32_t size)
3733 {
3734         int32_t cur_size = size;
3735         int ret;
3736
3737         sw_context->buf_start = buf;
3738
3739         while (cur_size > 0) {
3740                 size = cur_size;
3741                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3742                 if (unlikely(ret != 0))
3743                         return ret;
3744                 buf = (void *)((unsigned long) buf + size);
3745                 cur_size -= size;
3746         }
3747
3748         if (unlikely(cur_size != 0)) {
3749                 VMW_DEBUG_USER("Command verifier out of sync.\n");
3750                 return -EINVAL;
3751         }
3752
3753         return 0;
3754 }
3755
3756 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3757 {
3758         /* Memory is validation context memory, so no need to free it */
3759         INIT_LIST_HEAD(&sw_context->bo_relocations);
3760 }
3761
3762 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3763 {
3764         struct vmw_relocation *reloc;
3765         struct ttm_buffer_object *bo;
3766
3767         list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3768                 bo = &reloc->vbo->tbo;
3769                 switch (bo->resource->mem_type) {
3770                 case TTM_PL_VRAM:
3771                         reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3772                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3773                         break;
3774                 case VMW_PL_GMR:
3775                         reloc->location->gmrId = bo->resource->start;
3776                         break;
3777                 case VMW_PL_MOB:
3778                         *reloc->mob_loc = bo->resource->start;
3779                         break;
3780                 default:
3781                         BUG();
3782                 }
3783         }
3784         vmw_free_relocations(sw_context);
3785 }
3786
3787 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3788                                  uint32_t size)
3789 {
3790         if (likely(sw_context->cmd_bounce_size >= size))
3791                 return 0;
3792
3793         if (sw_context->cmd_bounce_size == 0)
3794                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3795
3796         while (sw_context->cmd_bounce_size < size) {
3797                 sw_context->cmd_bounce_size =
3798                         PAGE_ALIGN(sw_context->cmd_bounce_size +
3799                                    (sw_context->cmd_bounce_size >> 1));
3800         }
3801
3802         vfree(sw_context->cmd_bounce);
3803         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3804
3805         if (sw_context->cmd_bounce == NULL) {
3806                 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3807                 sw_context->cmd_bounce_size = 0;
3808                 return -ENOMEM;
3809         }
3810
3811         return 0;
3812 }
3813
3814 /*
3815  * vmw_execbuf_fence_commands - create and submit a command stream fence
3816  *
3817  * Creates a fence object and submits a command stream marker.
3818  * If this fails for some reason, We sync the fifo and return NULL.
3819  * It is then safe to fence buffers with a NULL pointer.
3820  *
3821  * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3822  * userspace handle if @p_handle is not NULL, otherwise not.
3823  */
3824
3825 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3826                                struct vmw_private *dev_priv,
3827                                struct vmw_fence_obj **p_fence,
3828                                uint32_t *p_handle)
3829 {
3830         uint32_t sequence;
3831         int ret;
3832         bool synced = false;
3833
3834         /* p_handle implies file_priv. */
3835         BUG_ON(p_handle != NULL && file_priv == NULL);
3836
3837         ret = vmw_cmd_send_fence(dev_priv, &sequence);
3838         if (unlikely(ret != 0)) {
3839                 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3840                 synced = true;
3841         }
3842
3843         if (p_handle != NULL)
3844                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3845                                             sequence, p_fence, p_handle);
3846         else
3847                 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3848
3849         if (unlikely(ret != 0 && !synced)) {
3850                 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3851                                          false, VMW_FENCE_WAIT_TIMEOUT);
3852                 *p_fence = NULL;
3853         }
3854
3855         return ret;
3856 }
3857
3858 /**
3859  * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3860  *
3861  * @dev_priv: Pointer to a vmw_private struct.
3862  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3863  * @ret: Return value from fence object creation.
3864  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3865  * the information should be copied.
3866  * @fence: Pointer to the fenc object.
3867  * @fence_handle: User-space fence handle.
3868  * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3869  *
3870  * This function copies fence information to user-space. If copying fails, the
3871  * user-space struct drm_vmw_fence_rep::error member is hopefully left
3872  * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3873  * will hopefully be detected.
3874  *
3875  * Also if copying fails, user-space will be unable to signal the fence object
3876  * so we wait for it immediately, and then unreference the user-space reference.
3877  */
3878 int
3879 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3880                             struct vmw_fpriv *vmw_fp, int ret,
3881                             struct drm_vmw_fence_rep __user *user_fence_rep,
3882                             struct vmw_fence_obj *fence, uint32_t fence_handle,
3883                             int32_t out_fence_fd)
3884 {
3885         struct drm_vmw_fence_rep fence_rep;
3886
3887         if (user_fence_rep == NULL)
3888                 return 0;
3889
3890         memset(&fence_rep, 0, sizeof(fence_rep));
3891
3892         fence_rep.error = ret;
3893         fence_rep.fd = out_fence_fd;
3894         if (ret == 0) {
3895                 BUG_ON(fence == NULL);
3896
3897                 fence_rep.handle = fence_handle;
3898                 fence_rep.seqno = fence->base.seqno;
3899                 vmw_update_seqno(dev_priv);
3900                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3901         }
3902
3903         /*
3904          * copy_to_user errors will be detected by user space not seeing
3905          * fence_rep::error filled in. Typically user-space would have pre-set
3906          * that member to -EFAULT.
3907          */
3908         ret = copy_to_user(user_fence_rep, &fence_rep,
3909                            sizeof(fence_rep));
3910
3911         /*
3912          * User-space lost the fence object. We need to sync and unreference the
3913          * handle.
3914          */
3915         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3916                 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3917                 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3918                 (void) vmw_fence_obj_wait(fence, false, false,
3919                                           VMW_FENCE_WAIT_TIMEOUT);
3920         }
3921
3922         return ret ? -EFAULT : 0;
3923 }
3924
3925 /**
3926  * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3927  *
3928  * @dev_priv: Pointer to a device private structure.
3929  * @kernel_commands: Pointer to the unpatched command batch.
3930  * @command_size: Size of the unpatched command batch.
3931  * @sw_context: Structure holding the relocation lists.
3932  *
3933  * Side effects: If this function returns 0, then the command batch pointed to
3934  * by @kernel_commands will have been modified.
3935  */
3936 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3937                                    void *kernel_commands, u32 command_size,
3938                                    struct vmw_sw_context *sw_context)
3939 {
3940         void *cmd;
3941
3942         if (sw_context->dx_ctx_node)
3943                 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3944                                           sw_context->dx_ctx_node->ctx->id);
3945         else
3946                 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3947
3948         if (!cmd)
3949                 return -ENOMEM;
3950
3951         vmw_apply_relocations(sw_context);
3952         memcpy(cmd, kernel_commands, command_size);
3953         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3954         vmw_resource_relocations_free(&sw_context->res_relocations);
3955         vmw_cmd_commit(dev_priv, command_size);
3956
3957         return 0;
3958 }
3959
3960 /**
3961  * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3962  * command buffer manager.
3963  *
3964  * @dev_priv: Pointer to a device private structure.
3965  * @header: Opaque handle to the command buffer allocation.
3966  * @command_size: Size of the unpatched command batch.
3967  * @sw_context: Structure holding the relocation lists.
3968  *
3969  * Side effects: If this function returns 0, then the command buffer represented
3970  * by @header will have been modified.
3971  */
3972 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3973                                      struct vmw_cmdbuf_header *header,
3974                                      u32 command_size,
3975                                      struct vmw_sw_context *sw_context)
3976 {
3977         u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3978                   SVGA3D_INVALID_ID);
3979         void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3980                                        header);
3981
3982         vmw_apply_relocations(sw_context);
3983         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3984         vmw_resource_relocations_free(&sw_context->res_relocations);
3985         vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3986
3987         return 0;
3988 }
3989
3990 /**
3991  * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3992  * submission using a command buffer.
3993  *
3994  * @dev_priv: Pointer to a device private structure.
3995  * @user_commands: User-space pointer to the commands to be submitted.
3996  * @command_size: Size of the unpatched command batch.
3997  * @header: Out parameter returning the opaque pointer to the command buffer.
3998  *
3999  * This function checks whether we can use the command buffer manager for
4000  * submission and if so, creates a command buffer of suitable size and copies
4001  * the user data into that buffer.
4002  *
4003  * On successful return, the function returns a pointer to the data in the
4004  * command buffer and *@header is set to non-NULL.
4005  *
4006  * @kernel_commands: If command buffers could not be used, the function will
4007  * return the value of @kernel_commands on function call. That value may be
4008  * NULL. In that case, the value of *@header will be set to NULL.
4009  *
4010  * If an error is encountered, the function will return a pointer error value.
4011  * If the function is interrupted by a signal while sleeping, it will return
4012  * -ERESTARTSYS casted to a pointer error value.
4013  */
4014 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4015                                 void __user *user_commands,
4016                                 void *kernel_commands, u32 command_size,
4017                                 struct vmw_cmdbuf_header **header)
4018 {
4019         size_t cmdbuf_size;
4020         int ret;
4021
4022         *header = NULL;
4023         if (command_size > SVGA_CB_MAX_SIZE) {
4024                 VMW_DEBUG_USER("Command buffer is too large.\n");
4025                 return ERR_PTR(-EINVAL);
4026         }
4027
4028         if (!dev_priv->cman || kernel_commands)
4029                 return kernel_commands;
4030
4031         /* If possible, add a little space for fencing. */
4032         cmdbuf_size = command_size + 512;
4033         cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4034         kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4035                                            header);
4036         if (IS_ERR(kernel_commands))
4037                 return kernel_commands;
4038
4039         ret = copy_from_user(kernel_commands, user_commands, command_size);
4040         if (ret) {
4041                 VMW_DEBUG_USER("Failed copying commands.\n");
4042                 vmw_cmdbuf_header_free(*header);
4043                 *header = NULL;
4044                 return ERR_PTR(-EFAULT);
4045         }
4046
4047         return kernel_commands;
4048 }
4049
4050 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4051                                    struct vmw_sw_context *sw_context,
4052                                    uint32_t handle)
4053 {
4054         struct vmw_resource *res;
4055         int ret;
4056         unsigned int size;
4057
4058         if (handle == SVGA3D_INVALID_ID)
4059                 return 0;
4060
4061         size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4062         ret = vmw_validation_preload_res(sw_context->ctx, size);
4063         if (ret)
4064                 return ret;
4065
4066         ret = vmw_user_resource_lookup_handle
4067                 (dev_priv, sw_context->fp->tfile, handle,
4068                  user_context_converter, &res);
4069         if (ret != 0) {
4070                 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4071                                (unsigned int) handle);
4072                 return ret;
4073         }
4074
4075         ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4076                                       vmw_val_add_flag_none);
4077         if (unlikely(ret != 0)) {
4078                 vmw_resource_unreference(&res);
4079                 return ret;
4080         }
4081
4082         sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4083         sw_context->man = vmw_context_res_man(res);
4084
4085         vmw_resource_unreference(&res);
4086         return 0;
4087 }
4088
4089 int vmw_execbuf_process(struct drm_file *file_priv,
4090                         struct vmw_private *dev_priv,
4091                         void __user *user_commands, void *kernel_commands,
4092                         uint32_t command_size, uint64_t throttle_us,
4093                         uint32_t dx_context_handle,
4094                         struct drm_vmw_fence_rep __user *user_fence_rep,
4095                         struct vmw_fence_obj **out_fence, uint32_t flags)
4096 {
4097         struct vmw_sw_context *sw_context = &dev_priv->ctx;
4098         struct vmw_fence_obj *fence = NULL;
4099         struct vmw_cmdbuf_header *header;
4100         uint32_t handle = 0;
4101         int ret;
4102         int32_t out_fence_fd = -1;
4103         struct sync_file *sync_file = NULL;
4104         DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4105
4106         if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4107                 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4108                 if (out_fence_fd < 0) {
4109                         VMW_DEBUG_USER("Failed to get a fence fd.\n");
4110                         return out_fence_fd;
4111                 }
4112         }
4113
4114         if (throttle_us) {
4115                 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4116         }
4117
4118         kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4119                                              kernel_commands, command_size,
4120                                              &header);
4121         if (IS_ERR(kernel_commands)) {
4122                 ret = PTR_ERR(kernel_commands);
4123                 goto out_free_fence_fd;
4124         }
4125
4126         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4127         if (ret) {
4128                 ret = -ERESTARTSYS;
4129                 goto out_free_header;
4130         }
4131
4132         sw_context->kernel = false;
4133         if (kernel_commands == NULL) {
4134                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4135                 if (unlikely(ret != 0))
4136                         goto out_unlock;
4137
4138                 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4139                                      command_size);
4140                 if (unlikely(ret != 0)) {
4141                         ret = -EFAULT;
4142                         VMW_DEBUG_USER("Failed copying commands.\n");
4143                         goto out_unlock;
4144                 }
4145
4146                 kernel_commands = sw_context->cmd_bounce;
4147         } else if (!header) {
4148                 sw_context->kernel = true;
4149         }
4150
4151         sw_context->filp = file_priv;
4152         sw_context->fp = vmw_fpriv(file_priv);
4153         INIT_LIST_HEAD(&sw_context->ctx_list);
4154         sw_context->cur_query_bo = dev_priv->pinned_bo;
4155         sw_context->last_query_ctx = NULL;
4156         sw_context->needs_post_query_barrier = false;
4157         sw_context->dx_ctx_node = NULL;
4158         sw_context->dx_query_mob = NULL;
4159         sw_context->dx_query_ctx = NULL;
4160         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4161         INIT_LIST_HEAD(&sw_context->res_relocations);
4162         INIT_LIST_HEAD(&sw_context->bo_relocations);
4163
4164         if (sw_context->staged_bindings)
4165                 vmw_binding_state_reset(sw_context->staged_bindings);
4166
4167         INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4168         sw_context->ctx = &val_ctx;
4169         ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4170         if (unlikely(ret != 0))
4171                 goto out_err_nores;
4172
4173         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4174                                 command_size);
4175         if (unlikely(ret != 0))
4176                 goto out_err_nores;
4177
4178         ret = vmw_resources_reserve(sw_context);
4179         if (unlikely(ret != 0))
4180                 goto out_err_nores;
4181
4182         ret = vmw_validation_bo_reserve(&val_ctx, true);
4183         if (unlikely(ret != 0))
4184                 goto out_err_nores;
4185
4186         ret = vmw_validation_bo_validate(&val_ctx, true);
4187         if (unlikely(ret != 0))
4188                 goto out_err;
4189
4190         ret = vmw_validation_res_validate(&val_ctx, true);
4191         if (unlikely(ret != 0))
4192                 goto out_err;
4193
4194         vmw_validation_drop_ht(&val_ctx);
4195
4196         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4197         if (unlikely(ret != 0)) {
4198                 ret = -ERESTARTSYS;
4199                 goto out_err;
4200         }
4201
4202         if (dev_priv->has_mob) {
4203                 ret = vmw_rebind_contexts(sw_context);
4204                 if (unlikely(ret != 0))
4205                         goto out_unlock_binding;
4206         }
4207
4208         if (!header) {
4209                 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4210                                               command_size, sw_context);
4211         } else {
4212                 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4213                                                 sw_context);
4214                 header = NULL;
4215         }
4216         mutex_unlock(&dev_priv->binding_mutex);
4217         if (ret)
4218                 goto out_err;
4219
4220         vmw_query_bo_switch_commit(dev_priv, sw_context);
4221         ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4222                                          (user_fence_rep) ? &handle : NULL);
4223         /*
4224          * This error is harmless, because if fence submission fails,
4225          * vmw_fifo_send_fence will sync. The error will be propagated to
4226          * user-space in @fence_rep
4227          */
4228         if (ret != 0)
4229                 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4230
4231         vmw_execbuf_bindings_commit(sw_context, false);
4232         vmw_bind_dx_query_mob(sw_context);
4233         vmw_validation_res_unreserve(&val_ctx, false);
4234
4235         vmw_validation_bo_fence(sw_context->ctx, fence);
4236
4237         if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4238                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4239
4240         /*
4241          * If anything fails here, give up trying to export the fence and do a
4242          * sync since the user mode will not be able to sync the fence itself.
4243          * This ensures we are still functionally correct.
4244          */
4245         if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4246
4247                 sync_file = sync_file_create(&fence->base);
4248                 if (!sync_file) {
4249                         VMW_DEBUG_USER("Sync file create failed for fence\n");
4250                         put_unused_fd(out_fence_fd);
4251                         out_fence_fd = -1;
4252
4253                         (void) vmw_fence_obj_wait(fence, false, false,
4254                                                   VMW_FENCE_WAIT_TIMEOUT);
4255                 }
4256         }
4257
4258         ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4259                                     user_fence_rep, fence, handle, out_fence_fd);
4260
4261         if (sync_file) {
4262                 if (ret) {
4263                         /* usercopy of fence failed, put the file object */
4264                         fput(sync_file->file);
4265                         put_unused_fd(out_fence_fd);
4266                 } else {
4267                         /* Link the fence with the FD created earlier */
4268                         fd_install(out_fence_fd, sync_file->file);
4269                 }
4270         }
4271
4272         /* Don't unreference when handing fence out */
4273         if (unlikely(out_fence != NULL)) {
4274                 *out_fence = fence;
4275                 fence = NULL;
4276         } else if (likely(fence != NULL)) {
4277                 vmw_fence_obj_unreference(&fence);
4278         }
4279
4280         vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4281         mutex_unlock(&dev_priv->cmdbuf_mutex);
4282
4283         /*
4284          * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4285          * in resource destruction paths.
4286          */
4287         vmw_validation_unref_lists(&val_ctx);
4288
4289         return ret;
4290
4291 out_unlock_binding:
4292         mutex_unlock(&dev_priv->binding_mutex);
4293 out_err:
4294         vmw_validation_bo_backoff(&val_ctx);
4295 out_err_nores:
4296         vmw_execbuf_bindings_commit(sw_context, true);
4297         vmw_validation_res_unreserve(&val_ctx, true);
4298         vmw_resource_relocations_free(&sw_context->res_relocations);
4299         vmw_free_relocations(sw_context);
4300         if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4301                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4302 out_unlock:
4303         vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4304         vmw_validation_drop_ht(&val_ctx);
4305         WARN_ON(!list_empty(&sw_context->ctx_list));
4306         mutex_unlock(&dev_priv->cmdbuf_mutex);
4307
4308         /*
4309          * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4310          * in resource destruction paths.
4311          */
4312         vmw_validation_unref_lists(&val_ctx);
4313 out_free_header:
4314         if (header)
4315                 vmw_cmdbuf_header_free(header);
4316 out_free_fence_fd:
4317         if (out_fence_fd >= 0)
4318                 put_unused_fd(out_fence_fd);
4319
4320         return ret;
4321 }
4322
4323 /**
4324  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4325  *
4326  * @dev_priv: The device private structure.
4327  *
4328  * This function is called to idle the fifo and unpin the query buffer if the
4329  * normal way to do this hits an error, which should typically be extremely
4330  * rare.
4331  */
4332 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4333 {
4334         VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4335
4336         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4337         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4338         if (dev_priv->dummy_query_bo_pinned) {
4339                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4340                 dev_priv->dummy_query_bo_pinned = false;
4341         }
4342 }
4343
4344
4345 /**
4346  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4347  * bo.
4348  *
4349  * @dev_priv: The device private structure.
4350  * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4351  * query barrier that flushes all queries touching the current buffer pointed to
4352  * by @dev_priv->pinned_bo
4353  *
4354  * This function should be used to unpin the pinned query bo, or as a query
4355  * barrier when we need to make sure that all queries have finished before the
4356  * next fifo command. (For example on hardware context destructions where the
4357  * hardware may otherwise leak unfinished queries).
4358  *
4359  * This function does not return any failure codes, but make attempts to do safe
4360  * unpinning in case of errors.
4361  *
4362  * The function will synchronize on the previous query barrier, and will thus
4363  * not finish until that barrier has executed.
4364  *
4365  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4366  * calling this function.
4367  */
4368 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4369                                      struct vmw_fence_obj *fence)
4370 {
4371         int ret = 0;
4372         struct vmw_fence_obj *lfence = NULL;
4373         DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4374
4375         if (dev_priv->pinned_bo == NULL)
4376                 goto out_unlock;
4377
4378         vmw_bo_placement_set(dev_priv->pinned_bo,
4379                              VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4380                              VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4381         ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4382         if (ret)
4383                 goto out_no_reserve;
4384
4385         vmw_bo_placement_set(dev_priv->dummy_query_bo,
4386                              VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4387                              VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4388         ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4389         if (ret)
4390                 goto out_no_reserve;
4391
4392         ret = vmw_validation_bo_reserve(&val_ctx, false);
4393         if (ret)
4394                 goto out_no_reserve;
4395
4396         if (dev_priv->query_cid_valid) {
4397                 BUG_ON(fence != NULL);
4398                 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4399                 if (ret)
4400                         goto out_no_emit;
4401                 dev_priv->query_cid_valid = false;
4402         }
4403
4404         vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4405         if (dev_priv->dummy_query_bo_pinned) {
4406                 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4407                 dev_priv->dummy_query_bo_pinned = false;
4408         }
4409         if (fence == NULL) {
4410                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4411                                                   NULL);
4412                 fence = lfence;
4413         }
4414         vmw_validation_bo_fence(&val_ctx, fence);
4415         if (lfence != NULL)
4416                 vmw_fence_obj_unreference(&lfence);
4417
4418         vmw_validation_unref_lists(&val_ctx);
4419         vmw_bo_unreference(&dev_priv->pinned_bo);
4420
4421 out_unlock:
4422         return;
4423 out_no_emit:
4424         vmw_validation_bo_backoff(&val_ctx);
4425 out_no_reserve:
4426         vmw_validation_unref_lists(&val_ctx);
4427         vmw_execbuf_unpin_panic(dev_priv);
4428         vmw_bo_unreference(&dev_priv->pinned_bo);
4429 }
4430
4431 /**
4432  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4433  *
4434  * @dev_priv: The device private structure.
4435  *
4436  * This function should be used to unpin the pinned query bo, or as a query
4437  * barrier when we need to make sure that all queries have finished before the
4438  * next fifo command. (For example on hardware context destructions where the
4439  * hardware may otherwise leak unfinished queries).
4440  *
4441  * This function does not return any failure codes, but make attempts to do safe
4442  * unpinning in case of errors.
4443  *
4444  * The function will synchronize on the previous query barrier, and will thus
4445  * not finish until that barrier has executed.
4446  */
4447 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4448 {
4449         mutex_lock(&dev_priv->cmdbuf_mutex);
4450         if (dev_priv->query_cid_valid)
4451                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4452         mutex_unlock(&dev_priv->cmdbuf_mutex);
4453 }
4454
4455 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4456                       struct drm_file *file_priv)
4457 {
4458         struct vmw_private *dev_priv = vmw_priv(dev);
4459         struct drm_vmw_execbuf_arg *arg = data;
4460         int ret;
4461         struct dma_fence *in_fence = NULL;
4462
4463         MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4464         MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4465
4466         /*
4467          * Extend the ioctl argument while maintaining backwards compatibility:
4468          * We take different code paths depending on the value of arg->version.
4469          *
4470          * Note: The ioctl argument is extended and zeropadded by core DRM.
4471          */
4472         if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4473                      arg->version == 0)) {
4474                 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4475                 ret = -EINVAL;
4476                 goto mksstats_out;
4477         }
4478
4479         switch (arg->version) {
4480         case 1:
4481                 /* For v1 core DRM have extended + zeropadded the data */
4482                 arg->context_handle = (uint32_t) -1;
4483                 break;
4484         case 2:
4485         default:
4486                 /* For v2 and later core DRM would have correctly copied it */
4487                 break;
4488         }
4489
4490         /* If imported a fence FD from elsewhere, then wait on it */
4491         if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4492                 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4493
4494                 if (!in_fence) {
4495                         VMW_DEBUG_USER("Cannot get imported fence\n");
4496                         ret = -EINVAL;
4497                         goto mksstats_out;
4498                 }
4499
4500                 ret = dma_fence_wait(in_fence, true);
4501                 if (ret)
4502                         goto out;
4503         }
4504
4505         ret = vmw_execbuf_process(file_priv, dev_priv,
4506                                   (void __user *)(unsigned long)arg->commands,
4507                                   NULL, arg->command_size, arg->throttle_us,
4508                                   arg->context_handle,
4509                                   (void __user *)(unsigned long)arg->fence_rep,
4510                                   NULL, arg->flags);
4511
4512         if (unlikely(ret != 0))
4513                 goto out;
4514
4515         vmw_kms_cursor_post_execbuf(dev_priv);
4516
4517 out:
4518         if (in_fence)
4519                 dma_fence_put(in_fence);
4520
4521 mksstats_out:
4522         MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4523         return ret;
4524 }
This page took 0.293469 seconds and 4 git commands to generate.