]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_context.c
Merge tag 'mmc-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_context.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_placement.h>
29
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_binding.h"
33
34 struct vmw_user_context {
35         struct ttm_base_object base;
36         struct vmw_resource res;
37         struct vmw_ctx_binding_state *cbs;
38         struct vmw_cmdbuf_res_manager *man;
39         struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
40         spinlock_t cotable_lock;
41         struct vmw_buffer_object *dx_query_mob;
42 };
43
44 static void vmw_user_context_free(struct vmw_resource *res);
45 static struct vmw_resource *
46 vmw_user_context_base_to_res(struct ttm_base_object *base);
47
48 static int vmw_gb_context_create(struct vmw_resource *res);
49 static int vmw_gb_context_bind(struct vmw_resource *res,
50                                struct ttm_validate_buffer *val_buf);
51 static int vmw_gb_context_unbind(struct vmw_resource *res,
52                                  bool readback,
53                                  struct ttm_validate_buffer *val_buf);
54 static int vmw_gb_context_destroy(struct vmw_resource *res);
55 static int vmw_dx_context_create(struct vmw_resource *res);
56 static int vmw_dx_context_bind(struct vmw_resource *res,
57                                struct ttm_validate_buffer *val_buf);
58 static int vmw_dx_context_unbind(struct vmw_resource *res,
59                                  bool readback,
60                                  struct ttm_validate_buffer *val_buf);
61 static int vmw_dx_context_destroy(struct vmw_resource *res);
62
63 static uint64_t vmw_user_context_size;
64
65 static const struct vmw_user_resource_conv user_context_conv = {
66         .object_type = VMW_RES_CONTEXT,
67         .base_obj_to_res = vmw_user_context_base_to_res,
68         .res_free = vmw_user_context_free
69 };
70
71 const struct vmw_user_resource_conv *user_context_converter =
72         &user_context_conv;
73
74
75 static const struct vmw_res_func vmw_legacy_context_func = {
76         .res_type = vmw_res_context,
77         .needs_backup = false,
78         .may_evict = false,
79         .type_name = "legacy contexts",
80         .backup_placement = NULL,
81         .create = NULL,
82         .destroy = NULL,
83         .bind = NULL,
84         .unbind = NULL
85 };
86
87 static const struct vmw_res_func vmw_gb_context_func = {
88         .res_type = vmw_res_context,
89         .needs_backup = true,
90         .may_evict = true,
91         .type_name = "guest backed contexts",
92         .backup_placement = &vmw_mob_placement,
93         .create = vmw_gb_context_create,
94         .destroy = vmw_gb_context_destroy,
95         .bind = vmw_gb_context_bind,
96         .unbind = vmw_gb_context_unbind
97 };
98
99 static const struct vmw_res_func vmw_dx_context_func = {
100         .res_type = vmw_res_dx_context,
101         .needs_backup = true,
102         .may_evict = true,
103         .type_name = "dx contexts",
104         .backup_placement = &vmw_mob_placement,
105         .create = vmw_dx_context_create,
106         .destroy = vmw_dx_context_destroy,
107         .bind = vmw_dx_context_bind,
108         .unbind = vmw_dx_context_unbind
109 };
110
111 /**
112  * Context management:
113  */
114
115 static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
116 {
117         struct vmw_resource *res;
118         int i;
119
120         for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
121                 spin_lock(&uctx->cotable_lock);
122                 res = uctx->cotables[i];
123                 uctx->cotables[i] = NULL;
124                 spin_unlock(&uctx->cotable_lock);
125
126                 if (res)
127                         vmw_resource_unreference(&res);
128         }
129 }
130
131 static void vmw_hw_context_destroy(struct vmw_resource *res)
132 {
133         struct vmw_user_context *uctx =
134                 container_of(res, struct vmw_user_context, res);
135         struct vmw_private *dev_priv = res->dev_priv;
136         struct {
137                 SVGA3dCmdHeader header;
138                 SVGA3dCmdDestroyContext body;
139         } *cmd;
140
141
142         if (res->func->destroy == vmw_gb_context_destroy ||
143             res->func->destroy == vmw_dx_context_destroy) {
144                 mutex_lock(&dev_priv->cmdbuf_mutex);
145                 vmw_cmdbuf_res_man_destroy(uctx->man);
146                 mutex_lock(&dev_priv->binding_mutex);
147                 vmw_binding_state_kill(uctx->cbs);
148                 (void) res->func->destroy(res);
149                 mutex_unlock(&dev_priv->binding_mutex);
150                 if (dev_priv->pinned_bo != NULL &&
151                     !dev_priv->query_cid_valid)
152                         __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
153                 mutex_unlock(&dev_priv->cmdbuf_mutex);
154                 vmw_context_cotables_unref(uctx);
155                 return;
156         }
157
158         vmw_execbuf_release_pinned_bo(dev_priv);
159         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
160         if (unlikely(cmd == NULL)) {
161                 DRM_ERROR("Failed reserving FIFO space for surface "
162                           "destruction.\n");
163                 return;
164         }
165
166         cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
167         cmd->header.size = sizeof(cmd->body);
168         cmd->body.cid = res->id;
169
170         vmw_fifo_commit(dev_priv, sizeof(*cmd));
171         vmw_fifo_resource_dec(dev_priv);
172 }
173
174 static int vmw_gb_context_init(struct vmw_private *dev_priv,
175                                bool dx,
176                                struct vmw_resource *res,
177                                void (*res_free)(struct vmw_resource *res))
178 {
179         int ret, i;
180         struct vmw_user_context *uctx =
181                 container_of(res, struct vmw_user_context, res);
182
183         res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
184                             SVGA3D_CONTEXT_DATA_SIZE);
185         ret = vmw_resource_init(dev_priv, res, true,
186                                 res_free,
187                                 dx ? &vmw_dx_context_func :
188                                 &vmw_gb_context_func);
189         if (unlikely(ret != 0))
190                 goto out_err;
191
192         if (dev_priv->has_mob) {
193                 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
194                 if (IS_ERR(uctx->man)) {
195                         ret = PTR_ERR(uctx->man);
196                         uctx->man = NULL;
197                         goto out_err;
198                 }
199         }
200
201         uctx->cbs = vmw_binding_state_alloc(dev_priv);
202         if (IS_ERR(uctx->cbs)) {
203                 ret = PTR_ERR(uctx->cbs);
204                 goto out_err;
205         }
206
207         spin_lock_init(&uctx->cotable_lock);
208
209         if (dx) {
210                 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
211                         uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
212                                                               &uctx->res, i);
213                         if (unlikely(IS_ERR(uctx->cotables[i]))) {
214                                 ret = PTR_ERR(uctx->cotables[i]);
215                                 goto out_cotables;
216                         }
217                 }
218         }
219
220         res->hw_destroy = vmw_hw_context_destroy;
221         return 0;
222
223 out_cotables:
224         vmw_context_cotables_unref(uctx);
225 out_err:
226         if (res_free)
227                 res_free(res);
228         else
229                 kfree(res);
230         return ret;
231 }
232
233 static int vmw_context_init(struct vmw_private *dev_priv,
234                             struct vmw_resource *res,
235                             void (*res_free)(struct vmw_resource *res),
236                             bool dx)
237 {
238         int ret;
239
240         struct {
241                 SVGA3dCmdHeader header;
242                 SVGA3dCmdDefineContext body;
243         } *cmd;
244
245         if (dev_priv->has_mob)
246                 return vmw_gb_context_init(dev_priv, dx, res, res_free);
247
248         ret = vmw_resource_init(dev_priv, res, false,
249                                 res_free, &vmw_legacy_context_func);
250
251         if (unlikely(ret != 0)) {
252                 DRM_ERROR("Failed to allocate a resource id.\n");
253                 goto out_early;
254         }
255
256         if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
257                 DRM_ERROR("Out of hw context ids.\n");
258                 vmw_resource_unreference(&res);
259                 return -ENOMEM;
260         }
261
262         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
263         if (unlikely(cmd == NULL)) {
264                 DRM_ERROR("Fifo reserve failed.\n");
265                 vmw_resource_unreference(&res);
266                 return -ENOMEM;
267         }
268
269         cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
270         cmd->header.size = sizeof(cmd->body);
271         cmd->body.cid = res->id;
272
273         vmw_fifo_commit(dev_priv, sizeof(*cmd));
274         vmw_fifo_resource_inc(dev_priv);
275         res->hw_destroy = vmw_hw_context_destroy;
276         return 0;
277
278 out_early:
279         if (res_free == NULL)
280                 kfree(res);
281         else
282                 res_free(res);
283         return ret;
284 }
285
286
287 /*
288  * GB context.
289  */
290
291 static int vmw_gb_context_create(struct vmw_resource *res)
292 {
293         struct vmw_private *dev_priv = res->dev_priv;
294         int ret;
295         struct {
296                 SVGA3dCmdHeader header;
297                 SVGA3dCmdDefineGBContext body;
298         } *cmd;
299
300         if (likely(res->id != -1))
301                 return 0;
302
303         ret = vmw_resource_alloc_id(res);
304         if (unlikely(ret != 0)) {
305                 DRM_ERROR("Failed to allocate a context id.\n");
306                 goto out_no_id;
307         }
308
309         if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
310                 ret = -EBUSY;
311                 goto out_no_fifo;
312         }
313
314         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
315         if (unlikely(cmd == NULL)) {
316                 DRM_ERROR("Failed reserving FIFO space for context "
317                           "creation.\n");
318                 ret = -ENOMEM;
319                 goto out_no_fifo;
320         }
321
322         cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
323         cmd->header.size = sizeof(cmd->body);
324         cmd->body.cid = res->id;
325         vmw_fifo_commit(dev_priv, sizeof(*cmd));
326         vmw_fifo_resource_inc(dev_priv);
327
328         return 0;
329
330 out_no_fifo:
331         vmw_resource_release_id(res);
332 out_no_id:
333         return ret;
334 }
335
336 static int vmw_gb_context_bind(struct vmw_resource *res,
337                                struct ttm_validate_buffer *val_buf)
338 {
339         struct vmw_private *dev_priv = res->dev_priv;
340         struct {
341                 SVGA3dCmdHeader header;
342                 SVGA3dCmdBindGBContext body;
343         } *cmd;
344         struct ttm_buffer_object *bo = val_buf->bo;
345
346         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
347
348         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
349         if (unlikely(cmd == NULL)) {
350                 DRM_ERROR("Failed reserving FIFO space for context "
351                           "binding.\n");
352                 return -ENOMEM;
353         }
354         cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
355         cmd->header.size = sizeof(cmd->body);
356         cmd->body.cid = res->id;
357         cmd->body.mobid = bo->mem.start;
358         cmd->body.validContents = res->backup_dirty;
359         res->backup_dirty = false;
360         vmw_fifo_commit(dev_priv, sizeof(*cmd));
361
362         return 0;
363 }
364
365 static int vmw_gb_context_unbind(struct vmw_resource *res,
366                                  bool readback,
367                                  struct ttm_validate_buffer *val_buf)
368 {
369         struct vmw_private *dev_priv = res->dev_priv;
370         struct ttm_buffer_object *bo = val_buf->bo;
371         struct vmw_fence_obj *fence;
372         struct vmw_user_context *uctx =
373                 container_of(res, struct vmw_user_context, res);
374
375         struct {
376                 SVGA3dCmdHeader header;
377                 SVGA3dCmdReadbackGBContext body;
378         } *cmd1;
379         struct {
380                 SVGA3dCmdHeader header;
381                 SVGA3dCmdBindGBContext body;
382         } *cmd2;
383         uint32_t submit_size;
384         uint8_t *cmd;
385
386
387         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
388
389         mutex_lock(&dev_priv->binding_mutex);
390         vmw_binding_state_scrub(uctx->cbs);
391
392         submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
393
394         cmd = vmw_fifo_reserve(dev_priv, submit_size);
395         if (unlikely(cmd == NULL)) {
396                 DRM_ERROR("Failed reserving FIFO space for context "
397                           "unbinding.\n");
398                 mutex_unlock(&dev_priv->binding_mutex);
399                 return -ENOMEM;
400         }
401
402         cmd2 = (void *) cmd;
403         if (readback) {
404                 cmd1 = (void *) cmd;
405                 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
406                 cmd1->header.size = sizeof(cmd1->body);
407                 cmd1->body.cid = res->id;
408                 cmd2 = (void *) (&cmd1[1]);
409         }
410         cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
411         cmd2->header.size = sizeof(cmd2->body);
412         cmd2->body.cid = res->id;
413         cmd2->body.mobid = SVGA3D_INVALID_ID;
414
415         vmw_fifo_commit(dev_priv, submit_size);
416         mutex_unlock(&dev_priv->binding_mutex);
417
418         /*
419          * Create a fence object and fence the backup buffer.
420          */
421
422         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
423                                           &fence, NULL);
424
425         vmw_bo_fence_single(bo, fence);
426
427         if (likely(fence != NULL))
428                 vmw_fence_obj_unreference(&fence);
429
430         return 0;
431 }
432
433 static int vmw_gb_context_destroy(struct vmw_resource *res)
434 {
435         struct vmw_private *dev_priv = res->dev_priv;
436         struct {
437                 SVGA3dCmdHeader header;
438                 SVGA3dCmdDestroyGBContext body;
439         } *cmd;
440
441         if (likely(res->id == -1))
442                 return 0;
443
444         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
445         if (unlikely(cmd == NULL)) {
446                 DRM_ERROR("Failed reserving FIFO space for context "
447                           "destruction.\n");
448                 return -ENOMEM;
449         }
450
451         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
452         cmd->header.size = sizeof(cmd->body);
453         cmd->body.cid = res->id;
454         vmw_fifo_commit(dev_priv, sizeof(*cmd));
455         if (dev_priv->query_cid == res->id)
456                 dev_priv->query_cid_valid = false;
457         vmw_resource_release_id(res);
458         vmw_fifo_resource_dec(dev_priv);
459
460         return 0;
461 }
462
463 /*
464  * DX context.
465  */
466
467 static int vmw_dx_context_create(struct vmw_resource *res)
468 {
469         struct vmw_private *dev_priv = res->dev_priv;
470         int ret;
471         struct {
472                 SVGA3dCmdHeader header;
473                 SVGA3dCmdDXDefineContext body;
474         } *cmd;
475
476         if (likely(res->id != -1))
477                 return 0;
478
479         ret = vmw_resource_alloc_id(res);
480         if (unlikely(ret != 0)) {
481                 DRM_ERROR("Failed to allocate a context id.\n");
482                 goto out_no_id;
483         }
484
485         if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
486                 ret = -EBUSY;
487                 goto out_no_fifo;
488         }
489
490         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
491         if (unlikely(cmd == NULL)) {
492                 DRM_ERROR("Failed reserving FIFO space for context "
493                           "creation.\n");
494                 ret = -ENOMEM;
495                 goto out_no_fifo;
496         }
497
498         cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
499         cmd->header.size = sizeof(cmd->body);
500         cmd->body.cid = res->id;
501         vmw_fifo_commit(dev_priv, sizeof(*cmd));
502         vmw_fifo_resource_inc(dev_priv);
503
504         return 0;
505
506 out_no_fifo:
507         vmw_resource_release_id(res);
508 out_no_id:
509         return ret;
510 }
511
512 static int vmw_dx_context_bind(struct vmw_resource *res,
513                                struct ttm_validate_buffer *val_buf)
514 {
515         struct vmw_private *dev_priv = res->dev_priv;
516         struct {
517                 SVGA3dCmdHeader header;
518                 SVGA3dCmdDXBindContext body;
519         } *cmd;
520         struct ttm_buffer_object *bo = val_buf->bo;
521
522         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
523
524         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
525         if (unlikely(cmd == NULL)) {
526                 DRM_ERROR("Failed reserving FIFO space for context "
527                           "binding.\n");
528                 return -ENOMEM;
529         }
530
531         cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
532         cmd->header.size = sizeof(cmd->body);
533         cmd->body.cid = res->id;
534         cmd->body.mobid = bo->mem.start;
535         cmd->body.validContents = res->backup_dirty;
536         res->backup_dirty = false;
537         vmw_fifo_commit(dev_priv, sizeof(*cmd));
538
539
540         return 0;
541 }
542
543 /**
544  * vmw_dx_context_scrub_cotables - Scrub all bindings and
545  * cotables from a context
546  *
547  * @ctx: Pointer to the context resource
548  * @readback: Whether to save the otable contents on scrubbing.
549  *
550  * COtables must be unbound before their context, but unbinding requires
551  * the backup buffer being reserved, whereas scrubbing does not.
552  * This function scrubs all cotables of a context, potentially reading back
553  * the contents into their backup buffers. However, scrubbing cotables
554  * also makes the device context invalid, so scrub all bindings first so
555  * that doesn't have to be done later with an invalid context.
556  */
557 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
558                                    bool readback)
559 {
560         struct vmw_user_context *uctx =
561                 container_of(ctx, struct vmw_user_context, res);
562         int i;
563
564         vmw_binding_state_scrub(uctx->cbs);
565         for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
566                 struct vmw_resource *res;
567
568                 /* Avoid racing with ongoing cotable destruction. */
569                 spin_lock(&uctx->cotable_lock);
570                 res = uctx->cotables[vmw_cotable_scrub_order[i]];
571                 if (res)
572                         res = vmw_resource_reference_unless_doomed(res);
573                 spin_unlock(&uctx->cotable_lock);
574                 if (!res)
575                         continue;
576
577                 WARN_ON(vmw_cotable_scrub(res, readback));
578                 vmw_resource_unreference(&res);
579         }
580 }
581
582 static int vmw_dx_context_unbind(struct vmw_resource *res,
583                                  bool readback,
584                                  struct ttm_validate_buffer *val_buf)
585 {
586         struct vmw_private *dev_priv = res->dev_priv;
587         struct ttm_buffer_object *bo = val_buf->bo;
588         struct vmw_fence_obj *fence;
589         struct vmw_user_context *uctx =
590                 container_of(res, struct vmw_user_context, res);
591
592         struct {
593                 SVGA3dCmdHeader header;
594                 SVGA3dCmdDXReadbackContext body;
595         } *cmd1;
596         struct {
597                 SVGA3dCmdHeader header;
598                 SVGA3dCmdDXBindContext body;
599         } *cmd2;
600         uint32_t submit_size;
601         uint8_t *cmd;
602
603
604         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
605
606         mutex_lock(&dev_priv->binding_mutex);
607         vmw_dx_context_scrub_cotables(res, readback);
608
609         if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
610             readback) {
611                 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
612                 if (vmw_query_readback_all(uctx->dx_query_mob))
613                         DRM_ERROR("Failed to read back query states\n");
614         }
615
616         submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
617
618         cmd = vmw_fifo_reserve(dev_priv, submit_size);
619         if (unlikely(cmd == NULL)) {
620                 DRM_ERROR("Failed reserving FIFO space for context "
621                           "unbinding.\n");
622                 mutex_unlock(&dev_priv->binding_mutex);
623                 return -ENOMEM;
624         }
625
626         cmd2 = (void *) cmd;
627         if (readback) {
628                 cmd1 = (void *) cmd;
629                 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
630                 cmd1->header.size = sizeof(cmd1->body);
631                 cmd1->body.cid = res->id;
632                 cmd2 = (void *) (&cmd1[1]);
633         }
634         cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
635         cmd2->header.size = sizeof(cmd2->body);
636         cmd2->body.cid = res->id;
637         cmd2->body.mobid = SVGA3D_INVALID_ID;
638
639         vmw_fifo_commit(dev_priv, submit_size);
640         mutex_unlock(&dev_priv->binding_mutex);
641
642         /*
643          * Create a fence object and fence the backup buffer.
644          */
645
646         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
647                                           &fence, NULL);
648
649         vmw_bo_fence_single(bo, fence);
650
651         if (likely(fence != NULL))
652                 vmw_fence_obj_unreference(&fence);
653
654         return 0;
655 }
656
657 static int vmw_dx_context_destroy(struct vmw_resource *res)
658 {
659         struct vmw_private *dev_priv = res->dev_priv;
660         struct {
661                 SVGA3dCmdHeader header;
662                 SVGA3dCmdDXDestroyContext body;
663         } *cmd;
664
665         if (likely(res->id == -1))
666                 return 0;
667
668         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
669         if (unlikely(cmd == NULL)) {
670                 DRM_ERROR("Failed reserving FIFO space for context "
671                           "destruction.\n");
672                 return -ENOMEM;
673         }
674
675         cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
676         cmd->header.size = sizeof(cmd->body);
677         cmd->body.cid = res->id;
678         vmw_fifo_commit(dev_priv, sizeof(*cmd));
679         if (dev_priv->query_cid == res->id)
680                 dev_priv->query_cid_valid = false;
681         vmw_resource_release_id(res);
682         vmw_fifo_resource_dec(dev_priv);
683
684         return 0;
685 }
686
687 /**
688  * User-space context management:
689  */
690
691 static struct vmw_resource *
692 vmw_user_context_base_to_res(struct ttm_base_object *base)
693 {
694         return &(container_of(base, struct vmw_user_context, base)->res);
695 }
696
697 static void vmw_user_context_free(struct vmw_resource *res)
698 {
699         struct vmw_user_context *ctx =
700             container_of(res, struct vmw_user_context, res);
701         struct vmw_private *dev_priv = res->dev_priv;
702
703         if (ctx->cbs)
704                 vmw_binding_state_free(ctx->cbs);
705
706         (void) vmw_context_bind_dx_query(res, NULL);
707
708         ttm_base_object_kfree(ctx, base);
709         ttm_mem_global_free(vmw_mem_glob(dev_priv),
710                             vmw_user_context_size);
711 }
712
713 /**
714  * This function is called when user space has no more references on the
715  * base object. It releases the base-object's reference on the resource object.
716  */
717
718 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
719 {
720         struct ttm_base_object *base = *p_base;
721         struct vmw_user_context *ctx =
722             container_of(base, struct vmw_user_context, base);
723         struct vmw_resource *res = &ctx->res;
724
725         *p_base = NULL;
726         vmw_resource_unreference(&res);
727 }
728
729 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
730                               struct drm_file *file_priv)
731 {
732         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
733         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
734
735         return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
736 }
737
738 static int vmw_context_define(struct drm_device *dev, void *data,
739                               struct drm_file *file_priv, bool dx)
740 {
741         struct vmw_private *dev_priv = vmw_priv(dev);
742         struct vmw_user_context *ctx;
743         struct vmw_resource *res;
744         struct vmw_resource *tmp;
745         struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
746         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
747         struct ttm_operation_ctx ttm_opt_ctx = {
748                 .interruptible = true,
749                 .no_wait_gpu = false
750         };
751         int ret;
752
753         if (!dev_priv->has_dx && dx) {
754                 DRM_ERROR("DX contexts not supported by device.\n");
755                 return -EINVAL;
756         }
757
758         if (unlikely(vmw_user_context_size == 0))
759                 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
760                   ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
761                   + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
762
763         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
764         if (unlikely(ret != 0))
765                 return ret;
766
767         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
768                                    vmw_user_context_size,
769                                    &ttm_opt_ctx);
770         if (unlikely(ret != 0)) {
771                 if (ret != -ERESTARTSYS)
772                         DRM_ERROR("Out of graphics memory for context"
773                                   " creation.\n");
774                 goto out_unlock;
775         }
776
777         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
778         if (unlikely(!ctx)) {
779                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
780                                     vmw_user_context_size);
781                 ret = -ENOMEM;
782                 goto out_unlock;
783         }
784
785         res = &ctx->res;
786         ctx->base.shareable = false;
787         ctx->base.tfile = NULL;
788
789         /*
790          * From here on, the destructor takes over resource freeing.
791          */
792
793         ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
794         if (unlikely(ret != 0))
795                 goto out_unlock;
796
797         tmp = vmw_resource_reference(&ctx->res);
798         ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
799                                    &vmw_user_context_base_release, NULL);
800
801         if (unlikely(ret != 0)) {
802                 vmw_resource_unreference(&tmp);
803                 goto out_err;
804         }
805
806         arg->cid = ctx->base.handle;
807 out_err:
808         vmw_resource_unreference(&res);
809 out_unlock:
810         ttm_read_unlock(&dev_priv->reservation_sem);
811         return ret;
812 }
813
814 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
815                              struct drm_file *file_priv)
816 {
817         return vmw_context_define(dev, data, file_priv, false);
818 }
819
820 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
821                                       struct drm_file *file_priv)
822 {
823         union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
824         struct drm_vmw_context_arg *rep = &arg->rep;
825
826         switch (arg->req) {
827         case drm_vmw_context_legacy:
828                 return vmw_context_define(dev, rep, file_priv, false);
829         case drm_vmw_context_dx:
830                 return vmw_context_define(dev, rep, file_priv, true);
831         default:
832                 break;
833         }
834         return -EINVAL;
835 }
836
837 /**
838  * vmw_context_binding_list - Return a list of context bindings
839  *
840  * @ctx: The context resource
841  *
842  * Returns the current list of bindings of the given context. Note that
843  * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
844  */
845 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
846 {
847         struct vmw_user_context *uctx =
848                 container_of(ctx, struct vmw_user_context, res);
849
850         return vmw_binding_state_list(uctx->cbs);
851 }
852
853 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
854 {
855         return container_of(ctx, struct vmw_user_context, res)->man;
856 }
857
858 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
859                                          SVGACOTableType cotable_type)
860 {
861         if (cotable_type >= SVGA_COTABLE_DX10_MAX)
862                 return ERR_PTR(-EINVAL);
863
864         return container_of(ctx, struct vmw_user_context, res)->
865                 cotables[cotable_type];
866 }
867
868 /**
869  * vmw_context_binding_state -
870  * Return a pointer to a context binding state structure
871  *
872  * @ctx: The context resource
873  *
874  * Returns the current state of bindings of the given context. Note that
875  * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
876  */
877 struct vmw_ctx_binding_state *
878 vmw_context_binding_state(struct vmw_resource *ctx)
879 {
880         return container_of(ctx, struct vmw_user_context, res)->cbs;
881 }
882
883 /**
884  * vmw_context_bind_dx_query -
885  * Sets query MOB for the context.  If @mob is NULL, then this function will
886  * remove the association between the MOB and the context.  This function
887  * assumes the binding_mutex is held.
888  *
889  * @ctx_res: The context resource
890  * @mob: a reference to the query MOB
891  *
892  * Returns -EINVAL if a MOB has already been set and does not match the one
893  * specified in the parameter.  0 otherwise.
894  */
895 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
896                               struct vmw_buffer_object *mob)
897 {
898         struct vmw_user_context *uctx =
899                 container_of(ctx_res, struct vmw_user_context, res);
900
901         if (mob == NULL) {
902                 if (uctx->dx_query_mob) {
903                         uctx->dx_query_mob->dx_query_ctx = NULL;
904                         vmw_bo_unreference(&uctx->dx_query_mob);
905                         uctx->dx_query_mob = NULL;
906                 }
907
908                 return 0;
909         }
910
911         /* Can only have one MOB per context for queries */
912         if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
913                 return -EINVAL;
914
915         mob->dx_query_ctx  = ctx_res;
916
917         if (!uctx->dx_query_mob)
918                 uctx->dx_query_mob = vmw_bo_reference(mob);
919
920         return 0;
921 }
922
923 /**
924  * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
925  *
926  * @ctx_res: The context resource
927  */
928 struct vmw_buffer_object *
929 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
930 {
931         struct vmw_user_context *uctx =
932                 container_of(ctx_res, struct vmw_user_context, res);
933
934         return uctx->dx_query_mob;
935 }
This page took 0.087781 seconds and 4 git commands to generate.